Skip to content

Commit 4750450

Browse files
authored
Add custom inference logger for @hf/inference (#1598)
The goal is to pass a custom logging tool (needed mainly by moon). I chose a global overwrite instead of an option like fetch to avoid too many code changes (btw, there are some places where fetch is not properly overwritten by the options). I think the best approach would be to have a separate package for logging to use the same module in the hf.js ecosystem
1 parent 6accae0 commit 4750450

File tree

8 files changed

+39
-8
lines changed

8 files changed

+39
-8
lines changed

packages/inference/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,5 +5,6 @@ export * from "./tasks/index.js";
55
import * as snippets from "./snippets/index.js";
66
export * from "./lib/getProviderHelper.js";
77
export * from "./lib/makeRequestOptions.js";
8+
export { setLogger } from "./lib/logger.js";
89

910
export { snippets };

packages/inference/src/lib/getInferenceProviderMapping.ts

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import { EQUIVALENT_SENTENCE_TRANSFORMERS_TASKS } from "../providers/hf-inferenc
55
import type { InferenceProvider, InferenceProviderMappingEntry, InferenceProviderOrPolicy, ModelId } from "../types.js";
66
import { typedInclude } from "../utils/typedInclude.js";
77
import { InferenceClientHubApiError, InferenceClientInputError } from "../errors.js";
8+
import { getLogger } from "./logger.js";
89

910
export const inferenceProviderMappingCache = new Map<ModelId, InferenceProviderMappingEntry[]>();
1011

@@ -122,6 +123,7 @@ export async function getInferenceProviderMapping(
122123
fetch?: (input: RequestInfo, init?: RequestInit) => Promise<Response>;
123124
}
124125
): Promise<InferenceProviderMappingEntry | null> {
126+
const logger = getLogger();
125127
if (HARDCODED_MODEL_INFERENCE_MAPPING[params.provider][params.modelId]) {
126128
return HARDCODED_MODEL_INFERENCE_MAPPING[params.provider][params.modelId];
127129
}
@@ -138,7 +140,7 @@ export async function getInferenceProviderMapping(
138140
);
139141
}
140142
if (providerMapping.status === "staging") {
141-
console.warn(
143+
logger.warn(
142144
`Model ${params.modelId} is in staging mode for provider ${params.provider}. Meant for test purposes only.`
143145
);
144146
}
@@ -152,6 +154,7 @@ export async function resolveProvider(
152154
modelId?: string,
153155
endpointUrl?: string
154156
): Promise<InferenceProvider> {
157+
const logger = getLogger();
155158
if (endpointUrl) {
156159
if (provider) {
157160
throw new InferenceClientInputError("Specifying both endpointUrl and provider is not supported.");
@@ -160,7 +163,7 @@ export async function resolveProvider(
160163
return "hf-inference";
161164
}
162165
if (!provider) {
163-
console.log(
166+
logger.log(
164167
"Defaulting to 'auto' which will select the first provider available for the model, sorted by the user's order in https://hf.co/settings/inference-providers."
165168
);
166169
provider = "auto";
@@ -171,7 +174,7 @@ export async function resolveProvider(
171174
}
172175
const mappings = await fetchInferenceProviderMappingForModel(modelId);
173176
provider = mappings[0]?.provider as InferenceProvider | undefined;
174-
console.log("Auto selected provider:", provider);
177+
logger.log("Auto selected provider:", provider);
175178
}
176179
if (!provider) {
177180
throw new InferenceClientInputError(`No Inference Provider available for model ${modelId}.`);

packages/inference/src/lib/logger.ts

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
import type { Logger } from "../types.js";
2+
3+
let globalLogger: Logger = console;
4+
5+
export function setLogger(logger: Logger): void {
6+
globalLogger = logger;
7+
}
8+
9+
export function getLogger(): Logger {
10+
return globalLogger;
11+
}

packages/inference/src/providers/black-forest-labs.ts

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ import {
1919
InferenceClientProviderApiError,
2020
InferenceClientProviderOutputError,
2121
} from "../errors.js";
22+
import { getLogger } from "../lib/logger.js";
2223
import type { BodyParams, HeaderParams, UrlParams } from "../types.js";
2324
import { delay } from "../utils/delay.js";
2425
import { omit } from "../utils/omit.js";
@@ -67,10 +68,11 @@ export class BlackForestLabsTextToImageTask extends TaskProviderHelper implement
6768
headers?: HeadersInit,
6869
outputType?: "url" | "blob"
6970
): Promise<string | Blob> {
71+
const logger = getLogger();
7072
const urlObj = new URL(response.polling_url);
7173
for (let step = 0; step < 5; step++) {
7274
await delay(1000);
73-
console.debug(`Polling Black Forest Labs API for the result... ${step + 1}/5`);
75+
logger.debug(`Polling Black Forest Labs API for the result... ${step + 1}/5`);
7476
urlObj.searchParams.set("attempt", step.toString(10));
7577
const resp = await fetch(urlObj, { headers: { "Content-Type": "application/json" } });
7678
if (!resp.ok) {

packages/inference/src/snippets/getInferenceSnippets.ts

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ import { getProviderHelper } from "../lib/getProviderHelper.js";
1212
import { makeRequestOptionsFromResolvedModel } from "../lib/makeRequestOptions.js";
1313
import type { InferenceProviderMappingEntry, InferenceProviderOrPolicy, InferenceTask, RequestArgs } from "../types.js";
1414
import { templates } from "./templates.exported.js";
15+
import { getLogger } from "../lib/logger.js";
1516

1617
export type InferenceSnippetOptions = {
1718
streaming?: boolean;
@@ -140,6 +141,7 @@ const snippetGenerator = (templateName: string, inputPreparationFn?: InputPrepar
140141
inferenceProviderMapping?: InferenceProviderMappingEntry,
141142
opts?: InferenceSnippetOptions
142143
): InferenceSnippet[] => {
144+
const logger = getLogger();
143145
const providerModelId = inferenceProviderMapping?.providerId ?? model.id;
144146
/// Hacky: hard-code conversational templates here
145147
let task = model.pipeline_tag as InferenceTask;
@@ -156,7 +158,7 @@ const snippetGenerator = (templateName: string, inputPreparationFn?: InputPrepar
156158
try {
157159
providerHelper = getProviderHelper(provider, task);
158160
} catch (e) {
159-
console.error(`Failed to get provider helper for ${provider} (${task})`, e);
161+
logger.error(`Failed to get provider helper for ${provider} (${task})`, e);
160162
return [];
161163
}
162164

@@ -191,7 +193,7 @@ const snippetGenerator = (templateName: string, inputPreparationFn?: InputPrepar
191193
try {
192194
providerInputs = JSON.parse(bodyAsObj);
193195
} catch (e) {
194-
console.error("Failed to parse body as JSON", e);
196+
logger.error("Failed to parse body as JSON", e);
195197
}
196198
}
197199

packages/inference/src/tasks/custom/request.ts

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ import { resolveProvider } from "../../lib/getInferenceProviderMapping.js";
22
import { getProviderHelper } from "../../lib/getProviderHelper.js";
33
import type { InferenceTask, Options, RequestArgs } from "../../types.js";
44
import { innerRequest } from "../../utils/request.js";
5+
import { getLogger } from "../../lib/logger.js";
56

67
/**
78
* Primitive to make custom calls to the inference provider
@@ -14,7 +15,8 @@ export async function request<T>(
1415
task?: InferenceTask;
1516
}
1617
): Promise<T> {
17-
console.warn(
18+
const logger = getLogger();
19+
logger.warn(
1820
"The request method is deprecated and will be removed in a future version of huggingface.js. Use specific task functions instead."
1921
);
2022
const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);

packages/inference/src/tasks/custom/streamingRequest.ts

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ import { resolveProvider } from "../../lib/getInferenceProviderMapping.js";
22
import { getProviderHelper } from "../../lib/getProviderHelper.js";
33
import type { InferenceTask, Options, RequestArgs } from "../../types.js";
44
import { innerStreamingRequest } from "../../utils/request.js";
5+
import { getLogger } from "../../lib/logger.js";
56

67
/**
78
* Primitive to make custom inference calls that expect server-sent events, and returns the response through a generator
@@ -14,7 +15,8 @@ export async function* streamingRequest<T>(
1415
task?: InferenceTask;
1516
}
1617
): AsyncGenerator<T> {
17-
console.warn(
18+
const logger = getLogger();
19+
logger.warn(
1820
"The streamingRequest method is deprecated and will be removed in a future version of huggingface.js. Use specific task functions instead."
1921
);
2022
const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);

packages/inference/src/types.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,14 @@ import type { ChatCompletionInput, PipelineType, WidgetType } from "@huggingface
55
*/
66
export type ModelId = string;
77

8+
export interface Logger {
9+
debug: (message: string, ...args: unknown[]) => void;
10+
info: (message: string, ...args: unknown[]) => void;
11+
warn: (message: string, ...args: unknown[]) => void;
12+
error: (message: string, ...args: unknown[]) => void;
13+
log: (message: string, ...args: unknown[]) => void;
14+
}
15+
816
export interface Options {
917
/**
1018
* (Default: true) Boolean. If a request 503s, the request will be retried with the same parameters.

0 commit comments

Comments
 (0)