From 1f52a7f85a81d712c5316e711a21d99e5da315f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Paul=20Heidekr=C3=BCger?= Date: Mon, 28 Oct 2024 09:49:05 +0100 Subject: [PATCH] Introduce `LLMOpenAIRequestType` type alias --- .../Configuration/LLMOpenAIModelParameters.swift | 9 ++++----- .../Configuration/LLMOpenAIParameters.swift | 6 +++--- .../SpeziLLMOpenAI/Helpers/OpenAI+Export.swift | 8 ++++---- .../LLMOpenAISession+Configuration.swift | 5 ++--- .../LLMOpenAIModelOnboardingStep.swift | 16 ++++++++-------- 5 files changed, 21 insertions(+), 23 deletions(-) diff --git a/Sources/SpeziLLMOpenAI/Configuration/LLMOpenAIModelParameters.swift b/Sources/SpeziLLMOpenAI/Configuration/LLMOpenAIModelParameters.swift index 86dbfa88..6db795da 100644 --- a/Sources/SpeziLLMOpenAI/Configuration/LLMOpenAIModelParameters.swift +++ b/Sources/SpeziLLMOpenAI/Configuration/LLMOpenAIModelParameters.swift @@ -12,7 +12,7 @@ import OpenAPIRuntime /// Represents the model-specific parameters of OpenAIs LLMs. public struct LLMOpenAIModelParameters: Sendable { /// The format for model responses. - let responseFormat: Components.Schemas.CreateChatCompletionRequest.response_formatPayload? + let responseFormat: LLMOpenAIRequestType.response_formatPayload? /// The sampling temperature (0 to 2). Higher values increase randomness, lower values enhance focus. let temperature: Double? /// Nucleus sampling threshold. Considers tokens with top_p probability mass. Alternative to temperature sampling. @@ -30,7 +30,7 @@ public struct LLMOpenAIModelParameters: Sendable { /// Controls repetition (-2.0 to 2.0). Higher values reduce the likelihood of repeating content. let frequencyPenalty: Double? /// Alters specific token's likelihood in completion. - let logitBias: Components.Schemas.CreateChatCompletionRequest.logit_biasPayload + let logitBias: LLMOpenAIRequestType.logit_biasPayload /// Unique identifier for the end-user, aiding in abuse monitoring. let user: String? @@ -50,7 +50,7 @@ public struct LLMOpenAIModelParameters: Sendable { /// - logitBias: Alters specific token's likelihood in completion. /// - user: Unique identifier for the end-user, aiding in abuse monitoring. public init( - responseFormat: Components.Schemas.CreateChatCompletionRequest.response_formatPayload? = nil, + responseFormat: LLMOpenAIRequestType.response_formatPayload? = nil, temperature: Double? = nil, topP: Double? = nil, completionsPerOutput: Int? = nil, @@ -71,8 +71,7 @@ public struct LLMOpenAIModelParameters: Sendable { self.seed = seed self.presencePenalty = presencePenalty self.frequencyPenalty = frequencyPenalty - self.logitBias = Components.Schemas.CreateChatCompletionRequest - .logit_biasPayload(additionalProperties: logitBias) + self.logitBias = LLMOpenAIRequestType.logit_biasPayload(additionalProperties: logitBias) self.user = user } } diff --git a/Sources/SpeziLLMOpenAI/Configuration/LLMOpenAIParameters.swift b/Sources/SpeziLLMOpenAI/Configuration/LLMOpenAIParameters.swift index d3341b3c..1bc2b4a4 100644 --- a/Sources/SpeziLLMOpenAI/Configuration/LLMOpenAIParameters.swift +++ b/Sources/SpeziLLMOpenAI/Configuration/LLMOpenAIParameters.swift @@ -20,7 +20,7 @@ public struct LLMOpenAIParameters: Sendable { /// The to-be-used OpenAI model. - let modelType: LLMOpenAIModelType + let modelType: LLMOpenAIRequestType.modelPayload /// The to-be-used system prompt(s) of the LLM. let systemPrompts: [String] /// Indicates if a model access test should be made during LLM setup. @@ -37,7 +37,7 @@ public struct LLMOpenAIParameters: Sendable { /// - modelAccessTest: Indicates if access to the configured OpenAI model via the specified token should be made upon LLM setup. /// - overwritingToken: Separate OpenAI token that overrides the one defined within the ``LLMOpenAIPlatform``. public init( - modelType: LLMOpenAIModelType, + modelType: LLMOpenAIRequestType.modelPayload, systemPrompt: String? = Defaults.defaultOpenAISystemPrompt, modelAccessTest: Bool = false, overwritingToken: String? = nil @@ -59,7 +59,7 @@ public struct LLMOpenAIParameters: Sendable { /// - overwritingToken: Separate OpenAI token that overrides the one defined within the ``LLMOpenAIPlatform``. @_disfavoredOverload public init( - modelType: LLMOpenAIModelType, + modelType: LLMOpenAIRequestType.modelPayload, systemPrompts: [String] = [Defaults.defaultOpenAISystemPrompt], modelAccessTest: Bool = false, overwritingToken: String? = nil diff --git a/Sources/SpeziLLMOpenAI/Helpers/OpenAI+Export.swift b/Sources/SpeziLLMOpenAI/Helpers/OpenAI+Export.swift index 74844bfe..b1124321 100644 --- a/Sources/SpeziLLMOpenAI/Helpers/OpenAI+Export.swift +++ b/Sources/SpeziLLMOpenAI/Helpers/OpenAI+Export.swift @@ -8,8 +8,8 @@ import OpenAPIRuntime -/// Convenience export of the `OpenAI/Model` type. +/// Convenience export of the generated chat completion request type. /// -/// The ``LLMOpenAIModelType`` exports the `OpenAI/Model` describing the type of the to-be-used OpenAI Model. -/// This enables convenience access to the `OpenAI/Model` without naming conflicts resulting from the `OpenAI/Model` name. -public typealias LLMOpenAIModelType = Components.Schemas.CreateChatCompletionRequest.modelPayload +/// The ``LLMOpenAIRequestType`` exports the generated chat completion request type. +/// This enables shorthand access to further request-related generated types. +public typealias LLMOpenAIRequestType = Components.Schemas.CreateChatCompletionRequest diff --git a/Sources/SpeziLLMOpenAI/LLMOpenAISession+Configuration.swift b/Sources/SpeziLLMOpenAI/LLMOpenAISession+Configuration.swift index 16ffd619..a7ec6f37 100644 --- a/Sources/SpeziLLMOpenAI/LLMOpenAISession+Configuration.swift +++ b/Sources/SpeziLLMOpenAI/LLMOpenAISession+Configuration.swift @@ -35,7 +35,7 @@ extension LLMOpenAISession { } return await Operations.createChatCompletion - .Input(body: .json(Components.Schemas.CreateChatCompletionRequest( + .Input(body: .json(LLMOpenAIRequestType( messages: openAIContext, model: schema.parameters.modelType, frequency_penalty: schema.modelParameters.frequencyPenalty, @@ -47,8 +47,7 @@ extension LLMOpenAISession { presence_penalty: schema.modelParameters.presencePenalty, response_format: schema.modelParameters.responseFormat, seed: schema.modelParameters.seed, - stop: Components.Schemas.CreateChatCompletionRequest.stopPayload - .case2(schema.modelParameters.stopSequence), + stop: LLMOpenAIRequestType.stopPayload.case2(schema.modelParameters.stopSequence), stream: true, temperature: schema.modelParameters.temperature, top_p: schema.modelParameters.topP, diff --git a/Sources/SpeziLLMOpenAI/Onboarding/LLMOpenAIModelOnboardingStep.swift b/Sources/SpeziLLMOpenAI/Onboarding/LLMOpenAIModelOnboardingStep.swift index 9ada2d77..63b06295 100644 --- a/Sources/SpeziLLMOpenAI/Onboarding/LLMOpenAIModelOnboardingStep.swift +++ b/Sources/SpeziLLMOpenAI/Onboarding/LLMOpenAIModelOnboardingStep.swift @@ -14,7 +14,7 @@ import SwiftUI /// View to display an onboarding step for the user to enter change the OpenAI model. public struct LLMOpenAIModelOnboardingStep: View { public enum Default { - public static let models: [LLMOpenAIModelType] = [ + public static let models: [LLMOpenAIRequestType.modelPayload] = [ .init( value1: "GPT 3.5 Turbo", value2: .gpt_hyphen_3_period_5_hyphen_turbo @@ -31,10 +31,10 @@ public struct LLMOpenAIModelOnboardingStep: View { } - @State private var modelSelection: LLMOpenAIModelType + @State private var modelSelection: LLMOpenAIRequestType.modelPayload private let actionText: String - private let action: (LLMOpenAIModelType) -> Void - private let models: [LLMOpenAIModelType] + private let action: (LLMOpenAIRequestType.modelPayload) -> Void + private let models: [LLMOpenAIRequestType.modelPayload] public var body: some View { @@ -81,8 +81,8 @@ public struct LLMOpenAIModelOnboardingStep: View { /// - action: Action that should be performed after the openAI model selection has been done, selection is passed as closure argument. public init( actionText: LocalizedStringResource? = nil, - models: [LLMOpenAIModelType] = Default.models, - _ action: @escaping (LLMOpenAIModelType) -> Void + models: [LLMOpenAIRequestType.modelPayload] = Default.models, + _ action: @escaping (LLMOpenAIRequestType.modelPayload) -> Void ) { self.init( actionText: actionText?.localizedString() ?? String(localized: "OPENAI_MODEL_SELECTION_SAVE_BUTTON", bundle: .module), @@ -98,8 +98,8 @@ public struct LLMOpenAIModelOnboardingStep: View { @_disfavoredOverload public init( actionText: ActionText, - models: [LLMOpenAIModelType] = Default.models, - _ action: @escaping (LLMOpenAIModelType) -> Void + models: [LLMOpenAIRequestType.modelPayload] = Default.models, + _ action: @escaping (LLMOpenAIRequestType.modelPayload) -> Void ) { self.actionText = String(actionText) self.models = models