diff --git a/llms/mistral/mistralmodel.go b/llms/mistral/mistralmodel.go index 3dbf6e8be..8d2ad22a6 100644 --- a/llms/mistral/mistralmodel.go +++ b/llms/mistral/mistralmodel.go @@ -116,6 +116,7 @@ func mistralChatParamsFromCallOptions(callOpts *llms.CallOptions) sdk.ChatReques chatOpts := sdk.DefaultChatRequestParams chatOpts.MaxTokens = callOpts.MaxTokens chatOpts.Temperature = callOpts.Temperature + chatOpts.RandomSeed = callOpts.Seed chatOpts.Tools = make([]sdk.Tool, 0) for _, function := range callOpts.Functions { chatOpts.Tools = append(chatOpts.Tools, sdk.Tool{ diff --git a/llms/openai/internal/openaiclient/chat.go b/llms/openai/internal/openaiclient/chat.go index 435d90c9e..872084a0e 100644 --- a/llms/openai/internal/openaiclient/chat.go +++ b/llms/openai/internal/openaiclient/chat.go @@ -32,6 +32,7 @@ type ChatRequest struct { Stream bool `json:"stream,omitempty"` FrequencyPenalty float64 `json:"frequency_penalty,omitempty"` PresencePenalty float64 `json:"presence_penalty,omitempty"` + Seed int `json:"seed,omitempty"` // ResponseFormat is the format of the response. ResponseFormat *ResponseFormat `json:"response_format,omitempty"` diff --git a/llms/openai/internal/openaiclient/completions.go b/llms/openai/internal/openaiclient/completions.go index 9b7734597..21553e215 100644 --- a/llms/openai/internal/openaiclient/completions.go +++ b/llms/openai/internal/openaiclient/completions.go @@ -15,6 +15,7 @@ type CompletionRequest struct { PresencePenalty float64 `json:"presence_penalty,omitempty"` TopP float64 `json:"top_p,omitempty"` StopWords []string `json:"stop,omitempty"` + Seed int `json:"seed,omitempty"` // StreamingFunc is a function to be called for each chunk of a streaming response. // Return an error to stop streaming early. @@ -85,5 +86,6 @@ func (c *Client) createCompletion(ctx context.Context, payload *CompletionReques FrequencyPenalty: payload.FrequencyPenalty, PresencePenalty: payload.PresencePenalty, StreamingFunc: payload.StreamingFunc, + Seed: payload.Seed, }) } diff --git a/llms/openai/openaillm.go b/llms/openai/openaillm.go index 454583475..ecec953be 100644 --- a/llms/openai/openaillm.go +++ b/llms/openai/openaillm.go @@ -108,6 +108,7 @@ func (o *LLM) GenerateContent(ctx context.Context, messages []llms.MessageConten PresencePenalty: opts.PresencePenalty, FunctionCallBehavior: openaiclient.FunctionCallBehavior(opts.FunctionCallBehavior), + Seed: opts.Seed, } if opts.JSONMode { req.ResponseFormat = ResponseFormatJSON