Ana içeriğe geç

Class: HuggingFaceLLM

Extends

  • BaseLLM

Constructors

new HuggingFaceLLM()

new HuggingFaceLLM(init?): HuggingFaceLLM

Parameters

init?: HFLLMConfig

Returns

HuggingFaceLLM

Overrides

BaseLLM.constructor

Source

packages/llamaindex/src/llm/huggingface.ts:205

Properties

contextWindow

contextWindow: number

Source

packages/llamaindex/src/llm/huggingface.ts:200


maxTokens?

optional maxTokens: number

Source

packages/llamaindex/src/llm/huggingface.ts:199


model

private model: null | PreTrainedModel = null

Source

packages/llamaindex/src/llm/huggingface.ts:203


modelName

modelName: string

Source

packages/llamaindex/src/llm/huggingface.ts:195


temperature

temperature: number

Source

packages/llamaindex/src/llm/huggingface.ts:197


tokenizer

private tokenizer: null | PreTrainedTokenizer = null

Source

packages/llamaindex/src/llm/huggingface.ts:202


tokenizerName

tokenizerName: string

Source

packages/llamaindex/src/llm/huggingface.ts:196


topP

topP: number

Source

packages/llamaindex/src/llm/huggingface.ts:198

Accessors

metadata

get metadata(): LLMMetadata

Returns

LLMMetadata

Source

packages/llamaindex/src/llm/huggingface.ts:215

Methods

chat()

chat(params)

chat(params): Promise<AsyncIterable <ChatResponseChunk>>

Parameters

params: LLMChatParamsStreaming<object, object>

Returns

Promise<AsyncIterable <ChatResponseChunk>>

Overrides

BaseLLM.chat

Source

packages/llamaindex/src/llm/huggingface.ts:242

chat(params)

chat(params): Promise <ChatResponse<object>>

Parameters

params: LLMChatParamsNonStreaming<object, object>

Returns

Promise <ChatResponse<object>>

Overrides

BaseLLM.chat

Source

packages/llamaindex/src/llm/huggingface.ts:245


complete()

complete(params)

complete(params): Promise<AsyncIterable <CompletionResponse>>

Parameters

params: LLMCompletionParamsStreaming

Returns

Promise<AsyncIterable <CompletionResponse>>

Inherited from

BaseLLM.complete

Source

packages/llamaindex/src/llm/base.ts:22

complete(params)

complete(params): Promise <CompletionResponse>

Parameters

params: LLMCompletionParamsNonStreaming

Returns

Promise <CompletionResponse>

Inherited from

BaseLLM.complete

Source

packages/llamaindex/src/llm/base.ts:25


getModel()

getModel(): Promise<PreTrainedModel>

Returns

Promise<PreTrainedModel>

Source

packages/llamaindex/src/llm/huggingface.ts:234


getTokenizer()

getTokenizer(): Promise<PreTrainedTokenizer>

Returns

Promise<PreTrainedTokenizer>

Source

packages/llamaindex/src/llm/huggingface.ts:226


nonStreamChat()

protected nonStreamChat(params): Promise <ChatResponse<object>>

Parameters

params: LLMChatParamsNonStreaming<object, object>

Returns

Promise <ChatResponse<object>>

Source

packages/llamaindex/src/llm/huggingface.ts:254


streamChat()

protected streamChat(params): AsyncIterable <ChatResponseChunk>

Parameters

params: LLMChatParamsStreaming<object, object>

Returns

AsyncIterable <ChatResponseChunk>

Source

packages/llamaindex/src/llm/huggingface.ts:286