Passa al contenuto principale

Class: HuggingFaceInferenceAPI

Wrapper on the Hugging Face's Inference API. API Docs: https://huggingface.co/docs/huggingface.js/inference/README List of tasks with models: huggingface.co/api/tasks

Note that Conversational API is not yet supported by the Inference API. They recommend using the text generation API instead. See: https://github.com/huggingface/huggingface.js/issues/586#issuecomment-2024059308

Extends

  • BaseLLM

Constructors

new HuggingFaceInferenceAPI()

new HuggingFaceInferenceAPI(init): HuggingFaceInferenceAPI

Parameters

init: HFConfig

Returns

HuggingFaceInferenceAPI

Overrides

BaseLLM.constructor

Source

packages/llamaindex/src/llm/huggingface.ts:87

Properties

contextWindow

contextWindow: number

Source

packages/llamaindex/src/llm/huggingface.ts:84


hf

hf: HfInference

Source

packages/llamaindex/src/llm/huggingface.ts:85


maxTokens?

optional maxTokens: number

Source

packages/llamaindex/src/llm/huggingface.ts:83


model

model: string

Source

packages/llamaindex/src/llm/huggingface.ts:80


temperature

temperature: number

Source

packages/llamaindex/src/llm/huggingface.ts:81


topP

topP: number

Source

packages/llamaindex/src/llm/huggingface.ts:82

Accessors

metadata

get metadata(): LLMMetadata

Returns

LLMMetadata

Source

packages/llamaindex/src/llm/huggingface.ts:108

Methods

chat()

chat(params)

chat(params): Promise<AsyncIterable <ChatResponseChunk>>

Parameters

params: LLMChatParamsStreaming<object, object>

Returns

Promise<AsyncIterable <ChatResponseChunk>>

Overrides

BaseLLM.chat

Source

packages/llamaindex/src/llm/huggingface.ts:119

chat(params)

chat(params): Promise <ChatResponse<object>>

Parameters

params: LLMChatParamsNonStreaming<object, object>

Returns

Promise <ChatResponse<object>>

Overrides

BaseLLM.chat

Source

packages/llamaindex/src/llm/huggingface.ts:122


complete()

complete(params)

complete(params): Promise<AsyncIterable <CompletionResponse>>

Parameters

params: LLMCompletionParamsStreaming

Returns

Promise<AsyncIterable <CompletionResponse>>

Inherited from

BaseLLM.complete

Source

packages/llamaindex/src/llm/base.ts:22

complete(params)

complete(params): Promise <CompletionResponse>

Parameters

params: LLMCompletionParamsNonStreaming

Returns

Promise <CompletionResponse>

Inherited from

BaseLLM.complete

Source

packages/llamaindex/src/llm/base.ts:25


messagesToPrompt()

private messagesToPrompt(messages): string

Parameters

messages: ChatMessage <ToolCallLLMMessageOptions>[]

Returns

string

Source

packages/llamaindex/src/llm/huggingface.ts:131


nonStreamChat()

protected nonStreamChat(params): Promise <ChatResponse<object>>

Parameters

params: LLMChatParamsNonStreaming<object, object>

Returns

Promise <ChatResponse<object>>

Source

packages/llamaindex/src/llm/huggingface.ts:151


streamChat()

protected streamChat(params): AsyncIterable <ChatResponseChunk>

Parameters

params: LLMChatParamsStreaming<object, object>

Returns

AsyncIterable <ChatResponseChunk>

Source

packages/llamaindex/src/llm/huggingface.ts:168