29 lines
1.3 KiB
Go
29 lines
1.3 KiB
Go
// Package llm provides the LLM integration layer for the Orca framework.
|
|
//
|
|
// It defines the LLM interface for interacting with language models,
|
|
// the Ollama client implementation, and the shared types for chat
|
|
// messages, tool calls, and streaming responses.
|
|
package llm
|
|
|
|
import "context"
|
|
|
|
// LLM is the interface for interacting with language models.
|
|
//
|
|
// Implementations provide Chat (for complete responses) and Stream
|
|
// (for streaming token-by-token responses) methods. Both methods
|
|
// accept a list of messages and return the model's response.
|
|
type LLM interface {
|
|
// Chat sends a list of messages to the LLM and returns a complete response.
|
|
// If the model decides to call tools, the response contains ToolCalls.
|
|
Chat(ctx context.Context, messages []Message) (*Response, error)
|
|
|
|
// ChatWithTools sends messages with available tools and returns a response.
|
|
// The model may return ToolCalls that the caller should execute and feed back.
|
|
ChatWithTools(ctx context.Context, messages []Message, tools []ToolDef) (*Response, error)
|
|
|
|
// Stream sends messages and streams the response token-by-token.
|
|
// The handler is called for each chunk. The final response is not
|
|
// collected; use Chat for complete responses.
|
|
Stream(ctx context.Context, messages []Message, handler StreamHandler) error
|
|
}
|