Skip to content
Open

Zodv4 #1224

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .changeset/dark-pans-carry.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"@browserbasehq/stagehand": patch
---

Migrate from zodv3 to zodv4
2 changes: 1 addition & 1 deletion packages/core/examples/2048.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { Stagehand } from "../lib/v3";
import { z } from "zod/v3";
import { z } from "zod";

async function example() {
console.log("🎮 Starting 2048 bot...");
Expand Down
2 changes: 1 addition & 1 deletion packages/core/examples/agent-custom-tools.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/**
* This example shows how to pass custom tools to stagehand agent (both CUA and non-CUA)
*/
import { z } from "zod/v3";
import { z } from "zod";
import { tool } from "ai";
import { Stagehand } from "../lib/v3";
import chalk from "chalk";
Expand Down
2 changes: 1 addition & 1 deletion packages/core/examples/custom_client_aisdk.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
*/
import { Stagehand } from "../lib/v3";
import { AISdkClient } from "./external_clients/aisdk";
import { z } from "zod/v3";
import { z } from "zod";
import { openai } from "@ai-sdk/openai";

async function example() {
Expand Down
4 changes: 3 additions & 1 deletion packages/core/examples/custom_client_langchain.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,17 @@
*
* You will need to reference the Langchain Client in /external_clients/langchain.ts
*/
import { z } from "zod/v3";
import { z } from "zod";
import { Stagehand } from "../lib/v3";
import { LangchainClient } from "./external_clients/langchain";
import { ChatOpenAI } from "@langchain/openai";

async function example() {
// @ts-expect-error Type instantiation is excessively deep and possibly infinite
const stagehand = new Stagehand({
env: "BROWSERBASE",
verbose: 1,
// @ts-expect-error Type instantiation is excessively deep and possibly infinite
llmClient: new LangchainClient(
new ChatOpenAI({
model: "gpt-4o",
Expand Down
2 changes: 1 addition & 1 deletion packages/core/examples/custom_client_openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
* You will need to reference the Custom OpenAI Client in /external_clients/customOpenAI.ts
*/
import { Stagehand } from "../lib/v3";
import { z } from "zod/v3";
import { z } from "zod";
import { CustomOpenAIClient } from "./external_clients/customOpenAI";
import OpenAI from "openai";

Expand Down
2 changes: 1 addition & 1 deletion packages/core/examples/external_clients/customOpenAI.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import type {
ChatCompletionSystemMessageParam,
ChatCompletionUserMessageParam,
} from "openai/resources/chat/completions";
import { z } from "zod/v3";
import { z } from "zod";
import { CreateChatCompletionResponseError } from "../../lib/v3";

function validateZodSchema(schema: z.ZodTypeAny, data: unknown) {
Expand Down
7 changes: 3 additions & 4 deletions packages/core/examples/external_clients/langchain.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ import {
LLMClient,
AvailableModel,
} from "../../lib/v3";
import { zodToJsonSchema } from "zod-to-json-schema";
import { z } from "zod";
import {
AIMessage,
BaseMessageLike,
Expand Down Expand Up @@ -60,9 +60,8 @@ export class LangchainClient extends LLMClient {
);

if (options.response_model) {
const responseSchema = zodToJsonSchema(options.response_model.schema, {
$refStrategy: "none",
});
//ref string no longer needed, this is now default behavior
const responseSchema = z.toJSONSchema(options.response_model.schema);
const structuredModel = this.model.withStructuredOutput(responseSchema);
const response = await structuredModel.invoke(formattedMessages);

Expand Down
2 changes: 1 addition & 1 deletion packages/core/examples/parameterizeApiKey.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { Stagehand } from "../lib/v3";
import { z } from "zod/v3";
import { z } from "zod";

/**
* This example shows how to parameterize the API key for the LLM provider.
Expand Down
2 changes: 1 addition & 1 deletion packages/core/examples/v3/patchright.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { Stagehand } from "../../lib/v3";
import { chromium } from "patchright-core";
import { z } from "zod/v3";
import { z } from "zod";

async function example(stagehand: Stagehand) {
const browser = await chromium.connectOverCDP({
Expand Down
2 changes: 1 addition & 1 deletion packages/core/examples/v3/playwright.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { Stagehand } from "../../lib/v3";
import { chromium } from "playwright-core";
import { z } from "zod/v3";
import { z } from "zod";

async function example(stagehand: Stagehand) {
const browser = await chromium.connectOverCDP({
Expand Down
2 changes: 1 addition & 1 deletion packages/core/examples/v3/targetedExtract.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { Stagehand } from "../../lib/v3";
import { z } from "zod/v3";
import { z } from "zod";

async function example(stagehand: Stagehand) {
const page = stagehand.context.pages()[0];
Expand Down
2 changes: 1 addition & 1 deletion packages/core/examples/v3_example.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { V3 } from "../lib/v3";
import { z } from "zod/v3";
import { z } from "zod";

async function example(v3: V3) {
const page = v3.context.pages()[0];
Expand Down
29 changes: 7 additions & 22 deletions packages/core/lib/inference.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { z } from "zod/v3";
import { z } from "zod";
import { LogLine } from "./v3/types/public/logs";
import { ChatMessage, LLMClient } from "./v3/llm/LLMClient";
import {
Expand All @@ -12,20 +12,8 @@ import {
} from "./prompt";
import { appendSummary, writeTimestampedTxtFile } from "./inferenceLogUtils";

/** Simple usage shape if your LLM returns usage tokens. */
interface LLMUsage {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
}

/**
* For calls that use a schema: the LLMClient may return { data: T; usage?: LLMUsage }
*/
export interface LLMParsedResponse<T> {
data: T;
usage?: LLMUsage;
}
// Re-export for backward compatibility
export type { LLMParsedResponse } from "./v3/llm/LLMClient";

export async function extract({
instruction,
Expand Down Expand Up @@ -101,8 +89,7 @@ export async function extract({
});
const extractEndTime = Date.now();

const { data: extractedData, usage: extractUsage } =
extractionResponse as LLMParsedResponse<ExtractionResponse>;
const { data: extractedData, usage: extractUsage } = extractionResponse;

let extractResponseFile = "";
if (logInferenceToFile) {
Expand Down Expand Up @@ -171,7 +158,7 @@ export async function extract({
progress: metadataResponseProgress,
},
usage: metadataResponseUsage,
} = metadataResponse as LLMParsedResponse<MetadataResponse>;
} = metadataResponse;

let metadataResponseFile = "";
if (logInferenceToFile) {
Expand Down Expand Up @@ -308,8 +295,7 @@ export async function observe({
const end = Date.now();
const usageTimeMs = end - start;

const { data: observeData, usage: observeUsage } =
rawResponse as LLMParsedResponse<ObserveResponse>;
const { data: observeData, usage: observeUsage } = rawResponse;
const promptTokens = observeUsage?.prompt_tokens ?? 0;
const completionTokens = observeUsage?.completion_tokens ?? 0;

Expand Down Expand Up @@ -436,8 +422,7 @@ export async function act({
const end = Date.now();
const usageTimeMs = end - start;

const { data: actData, usage: actUsage } =
rawResponse as LLMParsedResponse<ActResponse>;
const { data: actData, usage: actUsage } = rawResponse;
const promptTokens = actUsage?.prompt_tokens ?? 0;
const completionTokens = actUsage?.completion_tokens ?? 0;

Expand Down
Loading