diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml new file mode 100644 index 0000000..b6de7b7 --- /dev/null +++ b/.github/workflows/check.yml @@ -0,0 +1,47 @@ +name: check + +on: + pull_request: + push: + branches: main + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} + cancel-in-progress: true + +jobs: + check: + runs-on: macos-latest + env: + MINT_PATH: ${{ github.workspace }}/mint + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v4 + with: + python-version: 3.6 + + - name: Cache Mint packages + uses: actions/cache@v3 + with: + path: ${{ env.MINT_PATH }} + key: ${{ runner.os }}-mint-${{ hashFiles('**/Mintfile') }} + restore-keys: ${{ runner.os }}-mint- + + - name: Setup check + run: | + brew update + brew install mint + mint bootstrap + + - name: Style + run: scripts/style.sh test-only + + - name: Whitespace + run: scripts/check_whitespace.sh + + - name: Filename spaces + run: scripts/check_filename_spaces.sh + + - name: Copyrights + run: scripts/check_copyright.sh diff --git a/.github/workflows/cli.yml b/.github/workflows/cli.yml new file mode 100644 index 0000000..857c46c --- /dev/null +++ b/.github/workflows/cli.yml @@ -0,0 +1,30 @@ +name: cli + +on: + pull_request: + schedule: + # Run every day at 11pm (PST) - cron uses UTC times + - cron: '0 7 * * *' + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} + cancel-in-progress: true + +jobs: + swift-build-run: + strategy: + matrix: + target: [macOS] + os: [macos-13] + include: + - os: macos-13 + xcode: Xcode_15.0.1 + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v4 + - name: Xcode + run: sudo xcode-select -s /Applications/${{ matrix.xcode }}.app/Contents/Developer + - name: Initialize xcodebuild + run: xcodebuild -list + - name: Build CLI + run: scripts/third_party/travis/retry.sh scripts/build.sh GenerativeAICLI macOS build Examples/GenerativeAICLI diff --git a/.github/workflows/samples.yml b/.github/workflows/samples.yml new file mode 100644 index 0000000..0135c09 --- /dev/null +++ b/.github/workflows/samples.yml @@ -0,0 +1,31 @@ +name: samples + +on: + pull_request: + schedule: + # Run every day at 11pm (PST) - cron uses UTC times + - cron: '0 7 * * *' + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} + cancel-in-progress: true + +jobs: + swift-build-run: + strategy: + matrix: + # Test build with debug and release configs (whether or not DEBUG is set and optimization level) + build: [build, archive] + os: [macos-13] + include: + - os: macos-13 + xcode: Xcode_15.0.1 + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v4 + - name: Xcode + run: sudo xcode-select -s /Applications/${{ matrix.xcode }}.app/Contents/Developer + - name: Initialize xcodebuild + run: xcodebuild -list + - name: Build the sample + run: scripts/third_party/travis/retry.sh scripts/build.sh GenerativeAISample iOS ${{ matrix.build }} Examples/GenerativeAISample/GenerativeAISample.xcodeproj diff --git a/.github/workflows/spm.yml b/.github/workflows/spm.yml new file mode 100644 index 0000000..81b1233 --- /dev/null +++ b/.github/workflows/spm.yml @@ -0,0 +1,30 @@ +name: spm + +on: + pull_request: + schedule: + # Run every day at 11pm (PST) - cron uses UTC times + - cron: '0 7 * * *' + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} + cancel-in-progress: true + +jobs: + swift-build-run: + strategy: + matrix: + target: [iOS, macOS] + os: [macos-13] + include: + - os: macos-13 + xcode: Xcode_15.0.1 + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v4 + - name: Xcode + run: sudo xcode-select -s /Applications/${{ matrix.xcode }}.app/Contents/Developer + - name: Initialize xcodebuild + run: xcodebuild -list + - name: Build and unit test + run: scripts/third_party/travis/retry.sh scripts/build.sh generative-ai-swift ${{ matrix.target }} test diff --git a/.gitignore b/.gitignore index 16039ed..4ec6c36 100644 --- a/.gitignore +++ b/.gitignore @@ -1,57 +1,38 @@ -# Created by https://www.toptal.com/developers/gitignore/api/macos,xcode -# Edit at https://www.toptal.com/developers/gitignore?templates=macos,xcode - -### macOS ### -# General +# OS X .DS_Store -.AppleDouble -.LSOverride - -# Icon must end with two \r -Icon - - -# Thumbnails -._* - -# Files that might appear in the root of a volume -.DocumentRevisions-V100 -.fseventsd -.Spotlight-V100 -.TemporaryItems -.Trashes -.VolumeIcon.icns -.com.apple.timemachine.donotpresent - -# Directories potentially created on remote AFP share -.AppleDB -.AppleDesktop -Network Trash Folder -Temporary Items -.apdisk -### macOS Patch ### -# iCloud generated files -*.icloud - -### Xcode ### -## User settings +# Xcode +build/ +*.pbxuser +!default.pbxuser +*.mode1v3 +!default.mode1v3 +*.mode2v3 +!default.mode2v3 +*.perspectivev3 +!default.perspectivev3 xcuserdata/ - -## Xcode 8 and earlier -*.xcscmblueprint *.xccheckout - -### Xcode Patch ### -*.xcodeproj/* -!*.xcodeproj/project.pbxproj -!*.xcodeproj/xcshareddata/ -!*.xcodeproj/project.xcworkspace/ -!*.xcworkspace/contents.xcworkspacedata -/*.gcno -**/xcshareddata/WorkspaceSettings.xcsettings - -# End of https://www.toptal.com/developers/gitignore/api/macos,xcode - -# This file contains the PaLM API key - don't check it in -PaLM-Info.plist +profile +*.moved-aside +DerivedData +*.hmap +*.ipa +*.xcworkspace + +# Swift Package Manager +Package.resolved +**/.build +**/.swiftpm +.netrc + +# Bad sorts get generated if the package .xcscheme is not regenerated. +# Anything committed to xcshareddata gets propagated to clients. (#8167) +.swiftpm/xcode/xcshareddata/ + +# Mint package manager +Mint + +# CLI Tool +Apps/GoogleAICLI/GoogleAICLI.xcodeproj/xcshareddata/xcschemes/* +GenerativeAI-Info.plist diff --git a/Examples/GenerativeAICLI/Package.swift b/Examples/GenerativeAICLI/Package.swift new file mode 100644 index 0000000..da0e578 --- /dev/null +++ b/Examples/GenerativeAICLI/Package.swift @@ -0,0 +1,37 @@ +// swift-tools-version: 5.9 +// The swift-tools-version declares the minimum version of Swift required to build this package. + +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import PackageDescription + +let package = Package( + name: "GenerativeAICLI", + platforms: [.macOS(.v13)], + dependencies: [ + .package(url: "https://github.com/apple/swift-argument-parser.git", from: "1.2.0"), + .package(name: "GoogleGenerativeAI", path: "../../"), + ], + targets: [ + .executableTarget( + name: "generate-content", + dependencies: [ + .product(name: "ArgumentParser", package: "swift-argument-parser"), + .product(name: "GoogleGenerativeAI", package: "GoogleGenerativeAI"), + ], + path: "Sources" + ), + ] +) diff --git a/Examples/GenerativeAICLI/Sources/GenerateContent.swift b/Examples/GenerativeAICLI/Sources/GenerateContent.swift new file mode 100644 index 0000000..aace076 --- /dev/null +++ b/Examples/GenerativeAICLI/Sources/GenerateContent.swift @@ -0,0 +1,136 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ArgumentParser +import Foundation +import GoogleGenerativeAI + +@main +struct GenerateContent: AsyncParsableCommand { + @Option(help: "The API key to use when calling the Generative Language API.") + var apiKey: String + + @Option(name: .customLong("model"), help: "The name of the model to use (e.g., \"gemini-pro\").") + var modelName: String? + + @Option(help: "The text prompt for the model in natural language.") + var textPrompt: String? + + @Option( + name: .customLong("image-path"), + help: "The file path of an image to pass to the model; must be in JPEG or PNG format.", + transform: URL.filePath(_:) + ) + var imageURL: URL? + + @Flag( + name: .customLong("streaming"), + help: "Stream response data, printing it incrementally as it's received." + ) var isStreaming = false + + @Flag( + name: .customLong("GoogleGenerativeAIDebugLogEnabled", withSingleDash: true), + help: "Enable additional debug logging." + ) var debugLogEnabled = false + + mutating func validate() throws { + if textPrompt == nil && imageURL == nil { + throw ValidationError( + "Missing expected argument(s) '--text-prompt ' and/or" + + " '--image-path '." + ) + } + } + + mutating func run() async throws { + do { + let safetySettings = [SafetySetting(harmCategory: .dangerousContent, threshold: .blockNone)] + // Let the server pick the default config. + let config = GenerationConfig( + temperature: 0.2, + topP: 0.1, + topK: 16, + candidateCount: 1, + maxOutputTokens: isStreaming ? nil : 256, + stopSequences: nil + ) + + let model = GenerativeModel( + name: modelNameOrDefault(), + apiKey: apiKey, + generationConfig: config, + safetySettings: safetySettings + ) + + var parts = [ModelContent.Part]() + + if let textPrompt = textPrompt { + parts.append(.text(textPrompt)) + } + + if let imageURL = imageURL { + let mimeType: String + switch imageURL.pathExtension { + case "jpg", "jpeg": + mimeType = "image/jpeg" + case "png": + mimeType = "image/png" + default: + throw CLIError.unsupportedImageType + } + let imageData = try Data(contentsOf: imageURL) + parts.append(.data(mimetype: mimeType, imageData)) + } + + let input = [ModelContent(parts: parts)] + + if isStreaming { + let contentStream = model.generateContentStream(input) + print("Generated Content :") + for try await content in contentStream { + if let text = content.text { + print(text) + } + } + } else { + let content = try await model.generateContent(input) + if let text = content.text { + print("Generated Content:\n\(text)") + } + } + } catch { + print("Generate Content Error: \(error)") + } + } + + func modelNameOrDefault() -> String { + if let modelName = modelName { + return modelName + } else if imageURL != nil { + return "gemini-pro-vision" + } else { + return "gemini-pro" + } + } +} + +enum CLIError: Error { + case unsupportedImageType +} + +private extension URL { + static func filePath(_ filePath: String) throws -> URL { + return URL(fileURLWithPath: filePath) + } +} diff --git a/Examples/GenerativeAISample/APIKey/APIKey.swift b/Examples/GenerativeAISample/APIKey/APIKey.swift new file mode 100644 index 0000000..a91e05d --- /dev/null +++ b/Examples/GenerativeAISample/APIKey/APIKey.swift @@ -0,0 +1,36 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation + +enum APIKey { + /// Fetch the API key from `GenerativeAI-Info.plist` + /// This is just *one* way how you can retrieve the API key for your app. + static var `default`: String { + guard let filePath = Bundle.main.path(forResource: "GenerativeAI-Info", ofType: "plist") + else { + fatalError("Couldn't find file 'GenerativeAI-Info.plist'.") + } + let plist = NSDictionary(contentsOfFile: filePath) + guard let value = plist?.object(forKey: "API_KEY") as? String else { + fatalError("Couldn't find key 'API_KEY' in 'GenerativeAI-Info.plist'.") + } + if value.starts(with: "_") { + fatalError( + "Follow the instructions at https://ai.google.dev/tutorials/setup to get an API key." + ) + } + return value + } +} diff --git a/Examples/PaLMChat/PaLMChat/PaLM-Info-Sample.plist b/Examples/GenerativeAISample/APIKey/GenerativeAI-Info-Sample.plist similarity index 100% rename from Examples/PaLMChat/PaLMChat/PaLM-Info-Sample.plist rename to Examples/GenerativeAISample/APIKey/GenerativeAI-Info-Sample.plist diff --git a/Examples/PaLMChat/PaLMChat/Assets.xcassets/AccentColor.colorset/Contents.json b/Examples/GenerativeAISample/ChatSample/Assets.xcassets/AccentColor.colorset/Contents.json similarity index 100% rename from Examples/PaLMChat/PaLMChat/Assets.xcassets/AccentColor.colorset/Contents.json rename to Examples/GenerativeAISample/ChatSample/Assets.xcassets/AccentColor.colorset/Contents.json diff --git a/Examples/PaLMChat/PaLMChat/Assets.xcassets/AppIcon.appiconset/Contents.json b/Examples/GenerativeAISample/ChatSample/Assets.xcassets/AppIcon.appiconset/Contents.json similarity index 100% rename from Examples/PaLMChat/PaLMChat/Assets.xcassets/AppIcon.appiconset/Contents.json rename to Examples/GenerativeAISample/ChatSample/Assets.xcassets/AppIcon.appiconset/Contents.json diff --git a/Examples/PaLMChat/PaLMChat/Assets.xcassets/Contents.json b/Examples/GenerativeAISample/ChatSample/Assets.xcassets/Contents.json similarity index 100% rename from Examples/PaLMChat/PaLMChat/Assets.xcassets/Contents.json rename to Examples/GenerativeAISample/ChatSample/Assets.xcassets/Contents.json diff --git a/Examples/GenerativeAISample/ChatSample/ChatSampleApp.swift b/Examples/GenerativeAISample/ChatSample/ChatSampleApp.swift new file mode 100644 index 0000000..3cd8620 --- /dev/null +++ b/Examples/GenerativeAISample/ChatSample/ChatSampleApp.swift @@ -0,0 +1,30 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import SwiftUI + +@main +struct ChatSampleApp: App { + @StateObject + var viewModel = ConversationViewModel() + + var body: some Scene { + WindowGroup { + NavigationStack { + ConversationScreen() + .environmentObject(viewModel) + } + } + } +} diff --git a/Examples/GenerativeAISample/ChatSample/Models/ChatMessage.swift b/Examples/GenerativeAISample/ChatSample/Models/ChatMessage.swift new file mode 100644 index 0000000..6f7ab32 --- /dev/null +++ b/Examples/GenerativeAISample/ChatSample/Models/ChatMessage.swift @@ -0,0 +1,64 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation + +enum Participant { + case system + case user +} + +struct ChatMessage: Identifiable, Equatable { + let id = UUID().uuidString + var message: String + let participant: Participant + var pending = false + + static func pending(participant: Participant) -> ChatMessage { + Self(message: "", participant: participant, pending: true) + } +} + +extension ChatMessage { + static var samples: [ChatMessage] = [ + .init(message: "Hello. What can I do for you today?", participant: .system), + .init(message: "Show me a simple loop in Swift.", participant: .user), + .init(message: """ + Sure, here is a simple loop in Swift: + + # Example 1 + ``` + for i in 1...5 { + print("Hello, world!") + } + ``` + + This loop will print the string "Hello, world!" five times. The for loop iterates over a range of numbers, + in this case the numbers from 1 to 5. The variable i is assigned each number in the range, and the code inside the loop is executed. + + **Here is another example of a simple loop in Swift:** + ```swift + var sum = 0 + for i in 1...100 { + sum += i + } + print("The sum of the numbers from 1 to 100 is \\(sum).") + ``` + + This loop calculates the sum of the numbers from 1 to 100. The variable sum is initialized to 0, and then the for loop iterates over the range of numbers from 1 to 100. The variable i is assigned each number in the range, and the value of i is added to the sum variable. After the loop has finished executing, the value of sum is printed to the console. + """, participant: .system), + ] + + static var sample = samples[0] +} diff --git a/Examples/PaLMChat/PaLMChat/Preview Content/Preview Assets.xcassets/Contents.json b/Examples/GenerativeAISample/ChatSample/Preview Content/Preview Assets.xcassets/Contents.json similarity index 100% rename from Examples/PaLMChat/PaLMChat/Preview Content/Preview Assets.xcassets/Contents.json rename to Examples/GenerativeAISample/ChatSample/Preview Content/Preview Assets.xcassets/Contents.json diff --git a/Examples/GenerativeAISample/ChatSample/Screens/ConversationScreen.swift b/Examples/GenerativeAISample/ChatSample/Screens/ConversationScreen.swift new file mode 100644 index 0000000..0d99152 --- /dev/null +++ b/Examples/GenerativeAISample/ChatSample/Screens/ConversationScreen.swift @@ -0,0 +1,132 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import GoogleGenerativeAI +import SwiftUI + +struct ConversationScreen: View { + @EnvironmentObject + var viewModel: ConversationViewModel + + @State + private var userPrompt = "" + + enum FocusedField: Hashable { + case message + } + + @FocusState + var focusedField: FocusedField? + + var body: some View { + VStack { + ScrollViewReader { scrollViewProxy in + List { + ForEach(viewModel.messages) { message in + MessageView(message: message) + } + if let error = viewModel.error { + ErrorView(error: error) + .tag("errorView") + } + } + .listStyle(.plain) + .onChange(of: viewModel.messages, perform: { newValue in + if viewModel.hasError { + // wait for a short moment to make sure we can actually scroll to the bottom + DispatchQueue.main.asyncAfter(deadline: .now() + 0.05) { + withAnimation { + scrollViewProxy.scrollTo("errorView", anchor: .bottom) + } + focusedField = .message + } + } else { + guard let lastMessage = viewModel.messages.last else { return } + + // wait for a short moment to make sure we can actually scroll to the bottom + DispatchQueue.main.asyncAfter(deadline: .now() + 0.05) { + withAnimation { + scrollViewProxy.scrollTo(lastMessage.id, anchor: .bottom) + } + focusedField = .message + } + } + }) + } + HStack(alignment: .bottom) { + TextField("Message...", text: $userPrompt, axis: .vertical) + .focused($focusedField, equals: .message) + .textFieldStyle(.roundedBorder) + .frame(minHeight: CGFloat(30)) + .onSubmit { sendMessage() } + Button(action: sendOrStop) { + Image(systemName: viewModel.busy ? "stop.circle.fill" : "arrow.up.circle.fill") + .font(.title) + } + } + .padding(.horizontal) + } + .toolbar { + ToolbarItem(placement: .primaryAction) { + Button(action: newChat) { + Image(systemName: "square.and.pencil") + } + } + } + .navigationTitle("Chat sample") + .onAppear { + focusedField = .message + } + } + + private func sendMessage() { + Task { + let prompt = userPrompt + userPrompt = "" + await viewModel.sendMessage(prompt, streaming: true) + } + } + + private func sendOrStop() { + if viewModel.busy { + viewModel.stop() + } else { + sendMessage() + } + } + + private func newChat() { + viewModel.startNewChat() + } +} + +struct ConversationScreen_Previews: PreviewProvider { + struct ContainerView: View { + @StateObject var viewModel = ConversationViewModel() + + var body: some View { + ConversationScreen() + .environmentObject(viewModel) + .onAppear { + viewModel.messages = ChatMessage.samples + } + } + } + + static var previews: some View { + NavigationStack { + ConversationScreen() + } + } +} diff --git a/Examples/GenerativeAISample/ChatSample/ViewModels/ConversationViewModel.swift b/Examples/GenerativeAISample/ChatSample/ViewModels/ConversationViewModel.swift new file mode 100644 index 0000000..0c59e82 --- /dev/null +++ b/Examples/GenerativeAISample/ChatSample/ViewModels/ConversationViewModel.swift @@ -0,0 +1,130 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation +import GoogleGenerativeAI +import UIKit + +@MainActor +class ConversationViewModel: ObservableObject { + /// This array holds both the user's and the system's chat messages + @Published var messages = [ChatMessage]() + + /// Indicates we're waiting for the model to finish + @Published var busy = false + + @Published var error: Error? + var hasError: Bool { + return error != nil + } + + private var model: GenerativeModel + private var chat: Chat + private var stopGenerating = false + + private var chatTask: Task? + + init() { + model = GenerativeModel(name: "gemini-pro", apiKey: APIKey.default) + chat = model.startChat() + } + + func sendMessage(_ text: String, streaming: Bool = true) async { + error = nil + if streaming { + await internalSendMessageStreaming(text) + } else { + await internalSendMessage(text) + } + } + + func startNewChat() { + stop() + error = nil + chat = model.startChat() + messages.removeAll() + } + + func stop() { + chatTask?.cancel() + error = nil + } + + private func internalSendMessageStreaming(_ text: String) async { + chatTask?.cancel() + + chatTask = Task { + busy = true + defer { + busy = false + } + + // first, add the user's message to the chat + let userMessage = ChatMessage(message: text, participant: .user) + messages.append(userMessage) + + // add a pending message while we're waiting for a response from the backend + let systemMessage = ChatMessage.pending(participant: .system) + messages.append(systemMessage) + + do { + let responseStream = chat.sendMessageStream(text) + for try await chunk in responseStream { + messages[messages.count - 1].pending = false + if let text = chunk.text { + messages[messages.count - 1].message += text + } + } + } catch { + self.error = error + print(error.localizedDescription) + messages.removeLast() + } + } + } + + private func internalSendMessage(_ text: String) async { + chatTask?.cancel() + + chatTask = Task { + busy = true + defer { + busy = false + } + + // first, add the user's message to the chat + let userMessage = ChatMessage(message: text, participant: .user) + messages.append(userMessage) + + // add a pending message while we're waiting for a response from the backend + let systemMessage = ChatMessage.pending(participant: .system) + messages.append(systemMessage) + + do { + var response: GenerateContentResponse? + response = try await chat.sendMessage(text) + + if let responseText = response?.text { + // replace pending message with backend response + messages[messages.count - 1].message = responseText + messages[messages.count - 1].pending = false + } + } catch { + self.error = error + print(error.localizedDescription) + messages.removeLast() + } + } + } +} diff --git a/Examples/PaLMChat/PaLMChat/Views/BouncingDots.swift b/Examples/GenerativeAISample/ChatSample/Views/BouncingDots.swift similarity index 99% rename from Examples/PaLMChat/PaLMChat/Views/BouncingDots.swift rename to Examples/GenerativeAISample/ChatSample/Views/BouncingDots.swift index e21ecb4..6895e67 100644 --- a/Examples/PaLMChat/PaLMChat/Views/BouncingDots.swift +++ b/Examples/GenerativeAISample/ChatSample/Views/BouncingDots.swift @@ -23,10 +23,10 @@ struct BouncingDots: View { @State private var dot3YOffset: CGFloat = 0.0 - + let animation = Animation.easeInOut(duration: 0.8) .repeatForever(autoreverses: true) - + var body: some View { HStack(spacing: 8) { Circle() @@ -59,7 +59,7 @@ struct BouncingDots: View { } .onAppear { let baseOffset: CGFloat = -2 - + self.dot1YOffset = baseOffset self.dot2YOffset = baseOffset self.dot3YOffset = baseOffset @@ -67,7 +67,6 @@ struct BouncingDots: View { } } - struct BouncingDots_Previews: PreviewProvider { static var previews: some View { BouncingDots() diff --git a/Examples/GenerativeAISample/ChatSample/Views/ErrorDetailsView.swift b/Examples/GenerativeAISample/ChatSample/Views/ErrorDetailsView.swift new file mode 100644 index 0000000..66c7288 --- /dev/null +++ b/Examples/GenerativeAISample/ChatSample/Views/ErrorDetailsView.swift @@ -0,0 +1,213 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import GoogleGenerativeAI +import MarkdownUI +import SwiftUI + +extension SafetyRating: Hashable { + public func hash(into hasher: inout Hasher) { + hasher.combine(category) + hasher.combine(probability) + } +} + +extension SafetySetting.HarmCategory: CustomStringConvertible { + public var description: String { + switch self { + case .dangerousContent: "Dangerous content" + case .harassment: "Harassment" + case .hateSpeech: "Hate speech" + case .sexuallyExplicit: "Sexually explicit" + case .unknown: "Unknown" + case .unspecified: "Unspecified" + } + } +} + +extension SafetyRating.HarmProbability: CustomStringConvertible { + public var description: String { + switch self { + case .high: "High" + case .low: "Low" + case .medium: "Medium" + case .negligible: "Negligible" + case .unknown: "Unknown" + case .unspecified: "Unspecified" + } + } +} + +private struct SubtitleFormRow: View { + var title: String + var value: String + + var body: some View { + VStack(alignment: .leading) { + Text(title) + .font(.subheadline) + Text(value) + } + } +} + +private struct SubtitleMarkdownFormRow: View { + var title: String + var value: String + + var body: some View { + VStack(alignment: .leading) { + Text(title) + .font(.subheadline) + Markdown(value) + } + } +} + +private struct SafetyRatingsSection: View { + var ratings: [SafetyRating] + + var body: some View { + Section("Safety ratings") { + List(ratings, id: \.self) { rating in + HStack { + Text("\(String(describing: rating.category))") + .font(.subheadline) + Spacer() + Text("\(String(describing: rating.probability))") + } + } + } + } +} + +struct ErrorDetailsView: View { + var error: Error + + var body: some View { + NavigationView { + Form { + switch error { + case let GenerateContentError.internalError(underlying: underlyingError): + Section("Error Type") { + Text("Internal error") + } + + Section("Details") { + SubtitleFormRow(title: "Error description", + value: underlyingError.localizedDescription) + } + + case let GenerateContentError.promptBlocked(response: generateContentResponse): + Section("Error Type") { + Text("Your prompt was blocked") + } + + Section("Details") { + if let reason = generateContentResponse.promptFeedback?.blockReason { + SubtitleFormRow(title: "Reason for blocking", value: reason.rawValue) + } + + if let text = generateContentResponse.text { + SubtitleMarkdownFormRow(title: "Last chunk for the response", value: text) + } + } + + if let ratings = generateContentResponse.candidates.first?.safetyRatings { + SafetyRatingsSection(ratings: ratings) + } + + case let GenerateContentError.responseStoppedEarly( + reason: finishReason, + response: generateContentResponse + ): + + Section("Error Type") { + Text("Response stopped early") + } + + Section("Details") { + SubtitleFormRow(title: "Reason for finishing early", value: finishReason.rawValue) + + if let text = generateContentResponse.text { + SubtitleMarkdownFormRow(title: "Last chunk for the response", value: text) + } + } + + if let ratings = generateContentResponse.candidates.first?.safetyRatings { + SafetyRatingsSection(ratings: ratings) + } + + default: + Section("Error Type") { + Text("Some other error") + } + + Section("Details") { + SubtitleFormRow(title: "Error description", value: error.localizedDescription) + } + } + } + .navigationTitle("Error details") + .navigationBarTitleDisplayMode(.inline) + } + } +} + +#Preview { + NavigationView { + let _ = GenerateContentError.promptBlocked( + response: GenerateContentResponse(candidates: [ + CandidateResponse(content: ModelContent(role: "model", [ + """ + A _hypothetical_ model response. + Cillum ex aliqua amet aliquip labore amet eiusmod consectetur reprehenderit sit commodo. + """, + ]), + safetyRatings: [ + SafetyRating(category: .dangerousContent, probability: .high), + SafetyRating(category: .harassment, probability: .low), + SafetyRating(category: .hateSpeech, probability: .low), + SafetyRating(category: .sexuallyExplicit, probability: .low), + ], + finishReason: FinishReason.other, + citationMetadata: nil), + ], + promptFeedback: nil) + ) + + let errorFinishedEarly = GenerateContentError.responseStoppedEarly( + reason: .maxTokens, + response: GenerateContentResponse(candidates: [ + CandidateResponse(content: ModelContent(role: "model", [ + """ + A _hypothetical_ model response. + Cillum ex aliqua amet aliquip labore amet eiusmod consectetur reprehenderit sit commodo. + """, + ]), + safetyRatings: [ + SafetyRating(category: .dangerousContent, probability: .high), + SafetyRating(category: .harassment, probability: .low), + SafetyRating(category: .hateSpeech, probability: .low), + SafetyRating(category: .sexuallyExplicit, probability: .low), + ], + finishReason: FinishReason.maxTokens, + citationMetadata: nil), + ], + promptFeedback: nil) + ) + + ErrorDetailsView(error: errorFinishedEarly) + } +} diff --git a/Examples/GenerativeAISample/ChatSample/Views/ErrorView.swift b/Examples/GenerativeAISample/ChatSample/Views/ErrorView.swift new file mode 100644 index 0000000..aafdcd2 --- /dev/null +++ b/Examples/GenerativeAISample/ChatSample/Views/ErrorView.swift @@ -0,0 +1,65 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import GoogleGenerativeAI +import SwiftUI + +struct ErrorView: View { + var error: Error + @State private var isDetailsSheetPresented = false + var body: some View { + HStack { + Text("An error occurred.") + Button(action: { isDetailsSheetPresented.toggle() }) { + Image(systemName: "info.circle") + } + } + .frame(maxWidth: .infinity, alignment: .center) + .listRowSeparator(.hidden) + .sheet(isPresented: $isDetailsSheetPresented) { + ErrorDetailsView(error: error) + } + } +} + +#Preview { + NavigationView { + let errorPromptBlocked = GenerateContentError.promptBlocked( + response: GenerateContentResponse(candidates: [ + CandidateResponse(content: ModelContent(role: "model", [ + """ + A _hypothetical_ model response. + Cillum ex aliqua amet aliquip labore amet eiusmod consectetur reprehenderit sit commodo. + """, + ]), + safetyRatings: [ + SafetyRating(category: .dangerousContent, probability: .high), + SafetyRating(category: .harassment, probability: .low), + SafetyRating(category: .hateSpeech, probability: .low), + SafetyRating(category: .sexuallyExplicit, probability: .low), + ], + finishReason: FinishReason.other, + citationMetadata: nil), + ], + promptFeedback: nil) + ) + List { + MessageView(message: ChatMessage.samples[0]) + MessageView(message: ChatMessage.samples[1]) + ErrorView(error: errorPromptBlocked) + } + .listStyle(.plain) + .navigationTitle("Chat sample") + } +} diff --git a/Examples/PaLMChat/PaLMChat/Views/MessageView.swift b/Examples/GenerativeAISample/ChatSample/Views/MessageView.swift similarity index 52% rename from Examples/PaLMChat/PaLMChat/Views/MessageView.swift rename to Examples/GenerativeAISample/ChatSample/Views/MessageView.swift index 4e7827f..038ba81 100644 --- a/Examples/PaLMChat/PaLMChat/Views/MessageView.swift +++ b/Examples/GenerativeAISample/ChatSample/Views/MessageView.swift @@ -12,33 +12,55 @@ // See the License for the specific language governing permissions and // limitations under the License. +import MarkdownUI import SwiftUI struct RoundedCorner: Shape { var radius: CGFloat = .infinity var corners: UIRectCorner = .allCorners - + func path(in rect: CGRect) -> Path { - let path = UIBezierPath(roundedRect: rect, byRoundingCorners: corners, cornerRadii: CGSize(width: radius, height: radius)) + let path = UIBezierPath( + roundedRect: rect, + byRoundingCorners: corners, + cornerRadii: CGSize(width: radius, height: radius) + ) return Path(path.cgPath) } } extension View { func roundedCorner(_ radius: CGFloat, corners: UIRectCorner) -> some View { - clipShape(RoundedCorner(radius: radius, corners: corners) ) + clipShape(RoundedCorner(radius: radius, corners: corners)) } } struct MessageContentView: View { var message: ChatMessage - + var body: some View { if message.pending { BouncingDots() - } - else { - Text(message.message) + } else { + Markdown(message.message) + .markdownTextStyle { + FontFamilyVariant(.normal) + FontSize(.em(0.85)) + ForegroundColor(.white) + } + .markdownBlockStyle(\.codeBlock) { configuration in + configuration.label + .relativeLineSpacing(.em(0.25)) + .markdownTextStyle { + FontFamilyVariant(.monospaced) + FontSize(.em(0.85)) + ForegroundColor(Color(.label)) + } + .padding() + .background(Color(.secondarySystemBackground)) + .clipShape(RoundedRectangle(cornerRadius: 8)) + .markdownMargin(top: .zero, bottom: .em(0.8)) + } } } } @@ -57,9 +79,9 @@ struct MessageView: View { .foregroundColor(.white) .roundedCorner(10, corners: [ - .topLeft, - .topRight, - message.participant == .system ? .bottomRight : .bottomLeft + .topLeft, + .topRight, + message.participant == .system ? .bottomRight : .bottomLeft, ]) if message.participant == .system { Spacer() @@ -71,9 +93,15 @@ struct MessageView: View { struct MessageView_Previews: PreviewProvider { static var previews: some View { - List { - MessageView(message: ChatMessage.sample) - MessageView(message: ChatMessage(message: "Hello!", participant: .system, pending: true)) + NavigationView { + List { + MessageView(message: ChatMessage.samples[0]) + MessageView(message: ChatMessage.samples[1]) + MessageView(message: ChatMessage.samples[2]) + MessageView(message: ChatMessage(message: "Hello!", participant: .system, pending: true)) + } + .listStyle(.plain) + .navigationTitle("Chat sample") } } } diff --git a/Examples/PaLMEmbeddings/PaLMEmbeddings/Assets.xcassets/AccentColor.colorset/Contents.json b/Examples/GenerativeAISample/GenerativeAIMultimodalSample/Assets.xcassets/AccentColor.colorset/Contents.json similarity index 100% rename from Examples/PaLMEmbeddings/PaLMEmbeddings/Assets.xcassets/AccentColor.colorset/Contents.json rename to Examples/GenerativeAISample/GenerativeAIMultimodalSample/Assets.xcassets/AccentColor.colorset/Contents.json diff --git a/Examples/PaLMEmbeddings/PaLMEmbeddings/Assets.xcassets/AppIcon.appiconset/Contents.json b/Examples/GenerativeAISample/GenerativeAIMultimodalSample/Assets.xcassets/AppIcon.appiconset/Contents.json similarity index 100% rename from Examples/PaLMEmbeddings/PaLMEmbeddings/Assets.xcassets/AppIcon.appiconset/Contents.json rename to Examples/GenerativeAISample/GenerativeAIMultimodalSample/Assets.xcassets/AppIcon.appiconset/Contents.json diff --git a/Examples/PaLMEmbeddings/PaLMEmbeddings/Assets.xcassets/Contents.json b/Examples/GenerativeAISample/GenerativeAIMultimodalSample/Assets.xcassets/Contents.json similarity index 100% rename from Examples/PaLMEmbeddings/PaLMEmbeddings/Assets.xcassets/Contents.json rename to Examples/GenerativeAISample/GenerativeAIMultimodalSample/Assets.xcassets/Contents.json diff --git a/Examples/PaLMEmbeddings/PaLMEmbeddings/PaLMEmbeddingsApp.swift b/Examples/GenerativeAISample/GenerativeAIMultimodalSample/GenerativeAIMultimodalSampleApp.swift similarity index 85% rename from Examples/PaLMEmbeddings/PaLMEmbeddings/PaLMEmbeddingsApp.swift rename to Examples/GenerativeAISample/GenerativeAIMultimodalSample/GenerativeAIMultimodalSampleApp.swift index 08777ab..0403873 100644 --- a/Examples/PaLMEmbeddings/PaLMEmbeddings/PaLMEmbeddingsApp.swift +++ b/Examples/GenerativeAISample/GenerativeAIMultimodalSample/GenerativeAIMultimodalSampleApp.swift @@ -15,10 +15,12 @@ import SwiftUI @main -struct PaLMEmbeddingsApp: App { +struct GenerativeAIMultimodalSampleApp: App { var body: some Scene { WindowGroup { - EmbeddingsScreen() + NavigationStack { + PhotoReasoningScreen() + } } } } diff --git a/Examples/PaLMEmbeddings/PaLMEmbeddings/Preview Content/Preview Assets.xcassets/Contents.json b/Examples/GenerativeAISample/GenerativeAIMultimodalSample/Preview Content/Preview Assets.xcassets/Contents.json similarity index 100% rename from Examples/PaLMEmbeddings/PaLMEmbeddings/Preview Content/Preview Assets.xcassets/Contents.json rename to Examples/GenerativeAISample/GenerativeAIMultimodalSample/Preview Content/Preview Assets.xcassets/Contents.json diff --git a/Examples/GenerativeAISample/GenerativeAIMultimodalSample/Screens/PhotoReasoningScreen.swift b/Examples/GenerativeAISample/GenerativeAIMultimodalSample/Screens/PhotoReasoningScreen.swift new file mode 100644 index 0000000..98f3275 --- /dev/null +++ b/Examples/GenerativeAISample/GenerativeAIMultimodalSample/Screens/PhotoReasoningScreen.swift @@ -0,0 +1,65 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import GenerativeAIUIComponents +import MarkdownUI +import PhotosUI +import SwiftUI + +struct PhotoReasoningScreen: View { + @StateObject var viewModel = PhotoReasoningViewModel() + + var body: some View { + VStack { + MultimodalInputField(text: $viewModel.userInput, selection: $viewModel.selectedItems) + .onSubmit { + onSendTapped() + } + + ScrollViewReader { scrollViewProxy in + List { + if let outputText = viewModel.outputText { + HStack(alignment: .top) { + if viewModel.inProgress { + ProgressView() + } else { + Image(systemName: "cloud.circle.fill") + .font(.title2) + } + + Markdown("\(outputText)") + } + .listRowSeparator(.hidden) + } + } + .listStyle(.plain) + } + } + .navigationTitle("Multimodal sample") + } + + // MARK: - Actions + + private func onSendTapped() { + Task { + await viewModel.reason() + } + } +} + +#Preview { + NavigationStack { + PhotoReasoningScreen() + } +} diff --git a/Examples/GenerativeAISample/GenerativeAIMultimodalSample/ViewModels/PhotoReasoningViewModel.swift b/Examples/GenerativeAISample/GenerativeAIMultimodalSample/ViewModels/PhotoReasoningViewModel.swift new file mode 100644 index 0000000..e4613d2 --- /dev/null +++ b/Examples/GenerativeAISample/GenerativeAIMultimodalSample/ViewModels/PhotoReasoningViewModel.swift @@ -0,0 +1,83 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation +import GoogleGenerativeAI +import OSLog +import PhotosUI +import SwiftUI + +@MainActor +class PhotoReasoningViewModel: ObservableObject { + private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") + + @Published + var userInput: String = "" + + @Published + var selectedItems = [PhotosPickerItem]() + + @Published + var outputText: String? = nil + + @Published + var errorMessage: String? + + @Published + var inProgress = false + + private var model: GenerativeModel? + + init() { + model = GenerativeModel(name: "gemini-pro-vision", apiKey: APIKey.default) + } + + func reason() async { + defer { + inProgress = false + } + guard let model else { + return + } + + do { + inProgress = true + errorMessage = nil + outputText = "" + + let prompt = "Look at the image(s), and then answer the following question: \(userInput)" + + var images = [PartsRepresentable]() + for item in selectedItems { + if let data = try? await item.loadTransferable(type: Data.self) { + images.append(ModelContent.Part.png(data)) + } + } + + let outputContentStream = model.generateContentStream(prompt, images) + + // stream response + for try await outputContent in outputContentStream { + guard let line = outputContent.text else { + return + } + + outputText = (outputText ?? "") + line + } + } catch { + logger.error("\(error.localizedDescription)") + errorMessage = error.localizedDescription + } + } +} diff --git a/Examples/GenerativeAISample/GenerativeAISample.xcodeproj/project.pbxproj b/Examples/GenerativeAISample/GenerativeAISample.xcodeproj/project.pbxproj new file mode 100644 index 0000000..b3043c9 --- /dev/null +++ b/Examples/GenerativeAISample/GenerativeAISample.xcodeproj/project.pbxproj @@ -0,0 +1,1145 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 60; + objects = { + +/* Begin PBXBuildFile section */ + 880266762B0FC39000CF7CB6 /* PhotoReasoningViewModel.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8802666F2B0FC39000CF7CB6 /* PhotoReasoningViewModel.swift */; }; + 880266792B0FC39000CF7CB6 /* PhotoReasoningScreen.swift in Sources */ = {isa = PBXBuildFile; fileRef = 880266752B0FC39000CF7CB6 /* PhotoReasoningScreen.swift */; }; + 881B753A2B0FDCE600528058 /* APIKey.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88209C192B0FBDC300F64795 /* APIKey.swift */; }; + 881B753B2B0FDCE600528058 /* APIKey.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88209C192B0FBDC300F64795 /* APIKey.swift */; }; + 88209C152B0F928F00F64795 /* GenerativeAI-Info.plist in Resources */ = {isa = PBXBuildFile; fileRef = 88209C142B0F928F00F64795 /* GenerativeAI-Info.plist */; }; + 88209C162B0F92AF00F64795 /* GenerativeAI-Info.plist in Resources */ = {isa = PBXBuildFile; fileRef = 88209C142B0F928F00F64795 /* GenerativeAI-Info.plist */; }; + 88209C172B0F92AF00F64795 /* GenerativeAI-Info.plist in Resources */ = {isa = PBXBuildFile; fileRef = 88209C142B0F928F00F64795 /* GenerativeAI-Info.plist */; }; + 88209C1E2B0FBDC300F64795 /* APIKey.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88209C192B0FBDC300F64795 /* APIKey.swift */; }; + 88209C1F2B0FBDC300F64795 /* SummarizeScreen.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88209C1B2B0FBDC300F64795 /* SummarizeScreen.swift */; }; + 88209C202B0FBDC300F64795 /* SummarizeViewModel.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88209C1D2B0FBDC300F64795 /* SummarizeViewModel.swift */; }; + 88209C242B0FBE1700F64795 /* MarkdownUI in Frameworks */ = {isa = PBXBuildFile; productRef = 88209C232B0FBE1700F64795 /* MarkdownUI */; }; + 88209C262B0FBF7100F64795 /* GoogleGenerativeAI in Frameworks */ = {isa = PBXBuildFile; productRef = 88209C252B0FBF7100F64795 /* GoogleGenerativeAI */; }; + 88209C292B0FBFA200F64795 /* GoogleGenerativeAI in Frameworks */ = {isa = PBXBuildFile; productRef = 88209C282B0FBFA200F64795 /* GoogleGenerativeAI */; }; + 88263BEF2B239BFE008AB09B /* ErrorView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88263BEE2B239BFE008AB09B /* ErrorView.swift */; }; + 88263BF02B239C09008AB09B /* ErrorView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88263BEE2B239BFE008AB09B /* ErrorView.swift */; }; + 88263BF12B239C11008AB09B /* ErrorDetailsView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 889873842B208563005B4896 /* ErrorDetailsView.swift */; }; + 8848C8332B0D04BC007B434F /* GenerativeAISampleApp.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8848C8322B0D04BC007B434F /* GenerativeAISampleApp.swift */; }; + 8848C8352B0D04BC007B434F /* ContentView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8848C8342B0D04BC007B434F /* ContentView.swift */; }; + 8848C8372B0D04BD007B434F /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 8848C8362B0D04BD007B434F /* Assets.xcassets */; }; + 8848C83A2B0D04BD007B434F /* Preview Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 8848C8392B0D04BD007B434F /* Preview Assets.xcassets */; }; + 8848C8472B0D051E007B434F /* GenerativeAITextSampleApp.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8848C8462B0D051E007B434F /* GenerativeAITextSampleApp.swift */; }; + 8848C84B2B0D051F007B434F /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 8848C84A2B0D051F007B434F /* Assets.xcassets */; }; + 8848C84E2B0D051F007B434F /* Preview Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 8848C84D2B0D051F007B434F /* Preview Assets.xcassets */; }; + 8848C8592B0D056C007B434F /* GenerativeAIMultimodalSampleApp.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8848C8582B0D056C007B434F /* GenerativeAIMultimodalSampleApp.swift */; }; + 8848C85D2B0D056D007B434F /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 8848C85C2B0D056D007B434F /* Assets.xcassets */; }; + 8848C8602B0D056D007B434F /* Preview Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 8848C85F2B0D056D007B434F /* Preview Assets.xcassets */; }; + 886F95D52B17BA010036F07A /* SummarizeScreen.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88209C1B2B0FBDC300F64795 /* SummarizeScreen.swift */; }; + 886F95D62B17BA010036F07A /* SummarizeViewModel.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88209C1D2B0FBDC300F64795 /* SummarizeViewModel.swift */; }; + 886F95D82B17BA420036F07A /* MarkdownUI in Frameworks */ = {isa = PBXBuildFile; productRef = 886F95D72B17BA420036F07A /* MarkdownUI */; }; + 886F95DA2B17BA8D0036F07A /* GoogleGenerativeAI in Frameworks */ = {isa = PBXBuildFile; productRef = 886F95D92B17BA8D0036F07A /* GoogleGenerativeAI */; }; + 886F95DB2B17BAEF0036F07A /* PhotoReasoningViewModel.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8802666F2B0FC39000CF7CB6 /* PhotoReasoningViewModel.swift */; }; + 886F95DC2B17BAEF0036F07A /* PhotoReasoningScreen.swift in Sources */ = {isa = PBXBuildFile; fileRef = 880266752B0FC39000CF7CB6 /* PhotoReasoningScreen.swift */; }; + 886F95DD2B17D5010036F07A /* MessageView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88E10F5A2B11133E00C08E95 /* MessageView.swift */; }; + 886F95DE2B17D5010036F07A /* ChatMessage.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88E10F582B11131900C08E95 /* ChatMessage.swift */; }; + 886F95DF2B17D5010036F07A /* BouncingDots.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88E10F5C2B11135000C08E95 /* BouncingDots.swift */; }; + 886F95E02B17D5010036F07A /* ConversationViewModel.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88E10F562B1112F600C08E95 /* ConversationViewModel.swift */; }; + 886F95E12B17D5010036F07A /* ConversationScreen.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88E10F542B1112CA00C08E95 /* ConversationScreen.swift */; }; + 886F95E32B17D6630036F07A /* GenerativeAIUIComponents in Frameworks */ = {isa = PBXBuildFile; productRef = 886F95E22B17D6630036F07A /* GenerativeAIUIComponents */; }; + 889873852B208563005B4896 /* ErrorDetailsView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 889873842B208563005B4896 /* ErrorDetailsView.swift */; }; + 88B8A91E2B0FC55100424728 /* MarkdownUI in Frameworks */ = {isa = PBXBuildFile; productRef = 88B8A91D2B0FC55100424728 /* MarkdownUI */; }; + 88B8A9372B0FCBE700424728 /* GenerativeAIUIComponents in Frameworks */ = {isa = PBXBuildFile; productRef = 88B8A9362B0FCBE700424728 /* GenerativeAIUIComponents */; }; + 88D9474D2B14F27E008B5580 /* MarkdownUI in Frameworks */ = {isa = PBXBuildFile; productRef = 88D9474C2B14F27E008B5580 /* MarkdownUI */; }; + 88E10F452B110D5300C08E95 /* ChatSampleApp.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88E10F442B110D5300C08E95 /* ChatSampleApp.swift */; }; + 88E10F492B110D5400C08E95 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 88E10F482B110D5400C08E95 /* Assets.xcassets */; }; + 88E10F4C2B110D5400C08E95 /* Preview Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 88E10F4B2B110D5400C08E95 /* Preview Assets.xcassets */; }; + 88E10F552B1112CA00C08E95 /* ConversationScreen.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88E10F542B1112CA00C08E95 /* ConversationScreen.swift */; }; + 88E10F572B1112F600C08E95 /* ConversationViewModel.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88E10F562B1112F600C08E95 /* ConversationViewModel.swift */; }; + 88E10F592B11131900C08E95 /* ChatMessage.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88E10F582B11131900C08E95 /* ChatMessage.swift */; }; + 88E10F5B2B11133E00C08E95 /* MessageView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88E10F5A2B11133E00C08E95 /* MessageView.swift */; }; + 88E10F5D2B11135000C08E95 /* BouncingDots.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88E10F5C2B11135000C08E95 /* BouncingDots.swift */; }; + 88E10F5E2B11140200C08E95 /* GenerativeAI-Info.plist in Resources */ = {isa = PBXBuildFile; fileRef = 88209C142B0F928F00F64795 /* GenerativeAI-Info.plist */; }; + 88E10F5F2B11140500C08E95 /* APIKey.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88209C192B0FBDC300F64795 /* APIKey.swift */; }; + 88E10F612B11162B00C08E95 /* GoogleGenerativeAI in Frameworks */ = {isa = PBXBuildFile; productRef = 88E10F602B11162B00C08E95 /* GoogleGenerativeAI */; }; +/* End PBXBuildFile section */ + +/* Begin PBXFileReference section */ + 8802666F2B0FC39000CF7CB6 /* PhotoReasoningViewModel.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = PhotoReasoningViewModel.swift; sourceTree = ""; }; + 880266752B0FC39000CF7CB6 /* PhotoReasoningScreen.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = PhotoReasoningScreen.swift; sourceTree = ""; }; + 88209C142B0F928F00F64795 /* GenerativeAI-Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = "GenerativeAI-Info.plist"; sourceTree = ""; }; + 88209C192B0FBDC300F64795 /* APIKey.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = APIKey.swift; sourceTree = ""; }; + 88209C1B2B0FBDC300F64795 /* SummarizeScreen.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SummarizeScreen.swift; sourceTree = ""; }; + 88209C1D2B0FBDC300F64795 /* SummarizeViewModel.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SummarizeViewModel.swift; sourceTree = ""; }; + 88263BEE2B239BFE008AB09B /* ErrorView.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ErrorView.swift; sourceTree = ""; }; + 8848C82F2B0D04BC007B434F /* GenerativeAISample.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = GenerativeAISample.app; sourceTree = BUILT_PRODUCTS_DIR; }; + 8848C8322B0D04BC007B434F /* GenerativeAISampleApp.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = GenerativeAISampleApp.swift; sourceTree = ""; }; + 8848C8342B0D04BC007B434F /* ContentView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ContentView.swift; sourceTree = ""; }; + 8848C8362B0D04BD007B434F /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; + 8848C8392B0D04BD007B434F /* Preview Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = "Preview Assets.xcassets"; sourceTree = ""; }; + 8848C8442B0D051E007B434F /* GenerativeAITextSample.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = GenerativeAITextSample.app; sourceTree = BUILT_PRODUCTS_DIR; }; + 8848C8462B0D051E007B434F /* GenerativeAITextSampleApp.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = GenerativeAITextSampleApp.swift; sourceTree = ""; }; + 8848C84A2B0D051F007B434F /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; + 8848C84D2B0D051F007B434F /* Preview Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = "Preview Assets.xcassets"; sourceTree = ""; }; + 8848C8562B0D056C007B434F /* GenerativeAIMultimodalSample.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = GenerativeAIMultimodalSample.app; sourceTree = BUILT_PRODUCTS_DIR; }; + 8848C8582B0D056C007B434F /* GenerativeAIMultimodalSampleApp.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = GenerativeAIMultimodalSampleApp.swift; sourceTree = ""; }; + 8848C85C2B0D056D007B434F /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; + 8848C85F2B0D056D007B434F /* Preview Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = "Preview Assets.xcassets"; sourceTree = ""; }; + 889873842B208563005B4896 /* ErrorDetailsView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ErrorDetailsView.swift; sourceTree = ""; }; + 88B8A9352B0FCBA700424728 /* GenerativeAIUIComponents */ = {isa = PBXFileReference; lastKnownFileType = wrapper; path = GenerativeAIUIComponents; sourceTree = ""; }; + 88E10F422B110D5300C08E95 /* ChatSample.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = ChatSample.app; sourceTree = BUILT_PRODUCTS_DIR; }; + 88E10F442B110D5300C08E95 /* ChatSampleApp.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatSampleApp.swift; sourceTree = ""; }; + 88E10F482B110D5400C08E95 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; + 88E10F4B2B110D5400C08E95 /* Preview Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = "Preview Assets.xcassets"; sourceTree = ""; }; + 88E10F542B1112CA00C08E95 /* ConversationScreen.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ConversationScreen.swift; sourceTree = ""; }; + 88E10F562B1112F600C08E95 /* ConversationViewModel.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ConversationViewModel.swift; sourceTree = ""; }; + 88E10F582B11131900C08E95 /* ChatMessage.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatMessage.swift; sourceTree = ""; }; + 88E10F5A2B11133E00C08E95 /* MessageView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = MessageView.swift; sourceTree = ""; }; + 88E10F5C2B11135000C08E95 /* BouncingDots.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = BouncingDots.swift; sourceTree = ""; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + 8848C82C2B0D04BC007B434F /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 886F95D82B17BA420036F07A /* MarkdownUI in Frameworks */, + 886F95DA2B17BA8D0036F07A /* GoogleGenerativeAI in Frameworks */, + 886F95E32B17D6630036F07A /* GenerativeAIUIComponents in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 8848C8412B0D051E007B434F /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 88209C242B0FBE1700F64795 /* MarkdownUI in Frameworks */, + 88209C262B0FBF7100F64795 /* GoogleGenerativeAI in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 8848C8532B0D056C007B434F /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 88209C292B0FBFA200F64795 /* GoogleGenerativeAI in Frameworks */, + 88B8A91E2B0FC55100424728 /* MarkdownUI in Frameworks */, + 88B8A9372B0FCBE700424728 /* GenerativeAIUIComponents in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 88E10F3F2B110D5300C08E95 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 88E10F612B11162B00C08E95 /* GoogleGenerativeAI in Frameworks */, + 88D9474D2B14F27E008B5580 /* MarkdownUI in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 8802666E2B0FC39000CF7CB6 /* ViewModels */ = { + isa = PBXGroup; + children = ( + 8802666F2B0FC39000CF7CB6 /* PhotoReasoningViewModel.swift */, + ); + path = ViewModels; + sourceTree = ""; + }; + 880266742B0FC39000CF7CB6 /* Screens */ = { + isa = PBXGroup; + children = ( + 880266752B0FC39000CF7CB6 /* PhotoReasoningScreen.swift */, + ); + path = Screens; + sourceTree = ""; + }; + 88209C1A2B0FBDC300F64795 /* Screens */ = { + isa = PBXGroup; + children = ( + 88209C1B2B0FBDC300F64795 /* SummarizeScreen.swift */, + ); + path = Screens; + sourceTree = ""; + }; + 88209C1C2B0FBDC300F64795 /* ViewModels */ = { + isa = PBXGroup; + children = ( + 88209C1D2B0FBDC300F64795 /* SummarizeViewModel.swift */, + ); + path = ViewModels; + sourceTree = ""; + }; + 88209C222B0FBE1700F64795 /* Frameworks */ = { + isa = PBXGroup; + children = ( + ); + name = Frameworks; + sourceTree = ""; + }; + 8848C8262B0D04BC007B434F = { + isa = PBXGroup; + children = ( + 88B8A9352B0FCBA700424728 /* GenerativeAIUIComponents */, + 88B8A91F2B0FC73700424728 /* APIKey */, + 8848C8312B0D04BC007B434F /* GenerativeAISample */, + 8848C8452B0D051E007B434F /* GenerativeAITextSample */, + 8848C8572B0D056C007B434F /* GenerativeAIMultimodalSample */, + 88E10F432B110D5300C08E95 /* ChatSample */, + 8848C8302B0D04BC007B434F /* Products */, + 88209C222B0FBE1700F64795 /* Frameworks */, + ); + sourceTree = ""; + }; + 8848C8302B0D04BC007B434F /* Products */ = { + isa = PBXGroup; + children = ( + 8848C82F2B0D04BC007B434F /* GenerativeAISample.app */, + 8848C8442B0D051E007B434F /* GenerativeAITextSample.app */, + 8848C8562B0D056C007B434F /* GenerativeAIMultimodalSample.app */, + 88E10F422B110D5300C08E95 /* ChatSample.app */, + ); + name = Products; + sourceTree = ""; + }; + 8848C8312B0D04BC007B434F /* GenerativeAISample */ = { + isa = PBXGroup; + children = ( + 8848C8322B0D04BC007B434F /* GenerativeAISampleApp.swift */, + 8848C8342B0D04BC007B434F /* ContentView.swift */, + 8848C8362B0D04BD007B434F /* Assets.xcassets */, + 8848C8382B0D04BD007B434F /* Preview Content */, + ); + path = GenerativeAISample; + sourceTree = ""; + }; + 8848C8382B0D04BD007B434F /* Preview Content */ = { + isa = PBXGroup; + children = ( + 8848C8392B0D04BD007B434F /* Preview Assets.xcassets */, + ); + path = "Preview Content"; + sourceTree = ""; + }; + 8848C8452B0D051E007B434F /* GenerativeAITextSample */ = { + isa = PBXGroup; + children = ( + 88209C1C2B0FBDC300F64795 /* ViewModels */, + 88209C1A2B0FBDC300F64795 /* Screens */, + 8848C8462B0D051E007B434F /* GenerativeAITextSampleApp.swift */, + 8848C84A2B0D051F007B434F /* Assets.xcassets */, + 8848C84C2B0D051F007B434F /* Preview Content */, + ); + path = GenerativeAITextSample; + sourceTree = ""; + }; + 8848C84C2B0D051F007B434F /* Preview Content */ = { + isa = PBXGroup; + children = ( + 8848C84D2B0D051F007B434F /* Preview Assets.xcassets */, + ); + path = "Preview Content"; + sourceTree = ""; + }; + 8848C8572B0D056C007B434F /* GenerativeAIMultimodalSample */ = { + isa = PBXGroup; + children = ( + 8802666E2B0FC39000CF7CB6 /* ViewModels */, + 880266742B0FC39000CF7CB6 /* Screens */, + 8848C8582B0D056C007B434F /* GenerativeAIMultimodalSampleApp.swift */, + 8848C85C2B0D056D007B434F /* Assets.xcassets */, + 8848C85E2B0D056D007B434F /* Preview Content */, + ); + path = GenerativeAIMultimodalSample; + sourceTree = ""; + }; + 8848C85E2B0D056D007B434F /* Preview Content */ = { + isa = PBXGroup; + children = ( + 8848C85F2B0D056D007B434F /* Preview Assets.xcassets */, + ); + path = "Preview Content"; + sourceTree = ""; + }; + 88B8A91F2B0FC73700424728 /* APIKey */ = { + isa = PBXGroup; + children = ( + 88209C192B0FBDC300F64795 /* APIKey.swift */, + 88209C142B0F928F00F64795 /* GenerativeAI-Info.plist */, + ); + path = APIKey; + sourceTree = ""; + }; + 88E10F432B110D5300C08E95 /* ChatSample */ = { + isa = PBXGroup; + children = ( + 88E10F522B11124A00C08E95 /* Models */, + 88E10F502B11123600C08E95 /* ViewModels */, + 88E10F512B11124100C08E95 /* Views */, + 88E10F532B1112B900C08E95 /* Screens */, + 88E10F442B110D5300C08E95 /* ChatSampleApp.swift */, + 88E10F482B110D5400C08E95 /* Assets.xcassets */, + 88E10F4A2B110D5400C08E95 /* Preview Content */, + ); + path = ChatSample; + sourceTree = ""; + }; + 88E10F4A2B110D5400C08E95 /* Preview Content */ = { + isa = PBXGroup; + children = ( + 88E10F4B2B110D5400C08E95 /* Preview Assets.xcassets */, + ); + path = "Preview Content"; + sourceTree = ""; + }; + 88E10F502B11123600C08E95 /* ViewModels */ = { + isa = PBXGroup; + children = ( + 88E10F562B1112F600C08E95 /* ConversationViewModel.swift */, + ); + path = ViewModels; + sourceTree = ""; + }; + 88E10F512B11124100C08E95 /* Views */ = { + isa = PBXGroup; + children = ( + 88263BEE2B239BFE008AB09B /* ErrorView.swift */, + 88E10F5A2B11133E00C08E95 /* MessageView.swift */, + 88E10F5C2B11135000C08E95 /* BouncingDots.swift */, + 889873842B208563005B4896 /* ErrorDetailsView.swift */, + ); + path = Views; + sourceTree = ""; + }; + 88E10F522B11124A00C08E95 /* Models */ = { + isa = PBXGroup; + children = ( + 88E10F582B11131900C08E95 /* ChatMessage.swift */, + ); + path = Models; + sourceTree = ""; + }; + 88E10F532B1112B900C08E95 /* Screens */ = { + isa = PBXGroup; + children = ( + 88E10F542B1112CA00C08E95 /* ConversationScreen.swift */, + ); + path = Screens; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXNativeTarget section */ + 8848C82E2B0D04BC007B434F /* GenerativeAISample */ = { + isa = PBXNativeTarget; + buildConfigurationList = 8848C83D2B0D04BD007B434F /* Build configuration list for PBXNativeTarget "GenerativeAISample" */; + buildPhases = ( + 880BB1102B16519D0014C3DF /* ShellScript */, + 8848C82B2B0D04BC007B434F /* Sources */, + 8848C82C2B0D04BC007B434F /* Frameworks */, + 8848C82D2B0D04BC007B434F /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = GenerativeAISample; + packageProductDependencies = ( + 886F95D72B17BA420036F07A /* MarkdownUI */, + 886F95D92B17BA8D0036F07A /* GoogleGenerativeAI */, + 886F95E22B17D6630036F07A /* GenerativeAIUIComponents */, + ); + productName = GenerativeAISample; + productReference = 8848C82F2B0D04BC007B434F /* GenerativeAISample.app */; + productType = "com.apple.product-type.application"; + }; + 8848C8432B0D051E007B434F /* GenerativeAITextSample */ = { + isa = PBXNativeTarget; + buildConfigurationList = 8848C84F2B0D051F007B434F /* Build configuration list for PBXNativeTarget "GenerativeAITextSample" */; + buildPhases = ( + 880BB10F2B1651650014C3DF /* ShellScript */, + 8848C8402B0D051E007B434F /* Sources */, + 8848C8412B0D051E007B434F /* Frameworks */, + 8848C8422B0D051E007B434F /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = GenerativeAITextSample; + packageProductDependencies = ( + 88209C232B0FBE1700F64795 /* MarkdownUI */, + 88209C252B0FBF7100F64795 /* GoogleGenerativeAI */, + ); + productName = GenerativeAITextSample; + productReference = 8848C8442B0D051E007B434F /* GenerativeAITextSample.app */; + productType = "com.apple.product-type.application"; + }; + 8848C8552B0D056C007B434F /* GenerativeAIMultimodalSample */ = { + isa = PBXNativeTarget; + buildConfigurationList = 8848C8612B0D056D007B434F /* Build configuration list for PBXNativeTarget "GenerativeAIMultimodalSample" */; + buildPhases = ( + 880BB10E2B1651210014C3DF /* ShellScript */, + 8848C8522B0D056C007B434F /* Sources */, + 8848C8532B0D056C007B434F /* Frameworks */, + 8848C8542B0D056C007B434F /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = GenerativeAIMultimodalSample; + packageProductDependencies = ( + 88209C282B0FBFA200F64795 /* GoogleGenerativeAI */, + 88B8A91D2B0FC55100424728 /* MarkdownUI */, + 88B8A9362B0FCBE700424728 /* GenerativeAIUIComponents */, + ); + productName = GenerativeAIMultimodalSample; + productReference = 8848C8562B0D056C007B434F /* GenerativeAIMultimodalSample.app */; + productType = "com.apple.product-type.application"; + }; + 88E10F412B110D5300C08E95 /* ChatSample */ = { + isa = PBXNativeTarget; + buildConfigurationList = 88E10F4F2B110D5400C08E95 /* Build configuration list for PBXNativeTarget "ChatSample" */; + buildPhases = ( + 880BB10C2B164F790014C3DF /* ShellScript */, + 88E10F3E2B110D5300C08E95 /* Sources */, + 88E10F3F2B110D5300C08E95 /* Frameworks */, + 88E10F402B110D5300C08E95 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = ChatSample; + packageProductDependencies = ( + 88E10F602B11162B00C08E95 /* GoogleGenerativeAI */, + 88D9474C2B14F27E008B5580 /* MarkdownUI */, + ); + productName = ChatSample; + productReference = 88E10F422B110D5300C08E95 /* ChatSample.app */; + productType = "com.apple.product-type.application"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 8848C8272B0D04BC007B434F /* Project object */ = { + isa = PBXProject; + attributes = { + BuildIndependentTargetsInParallel = 1; + LastSwiftUpdateCheck = 1510; + LastUpgradeCheck = 1510; + TargetAttributes = { + 8848C82E2B0D04BC007B434F = { + CreatedOnToolsVersion = 15.1; + }; + 8848C8432B0D051E007B434F = { + CreatedOnToolsVersion = 15.1; + }; + 8848C8552B0D056C007B434F = { + CreatedOnToolsVersion = 15.1; + }; + 88E10F412B110D5300C08E95 = { + CreatedOnToolsVersion = 15.1; + }; + }; + }; + buildConfigurationList = 8848C82A2B0D04BC007B434F /* Build configuration list for PBXProject "GenerativeAISample" */; + compatibilityVersion = "Xcode 14.0"; + developmentRegion = en; + hasScannedForEncodings = 0; + knownRegions = ( + en, + Base, + ); + mainGroup = 8848C8262B0D04BC007B434F; + packageReferences = ( + 88209C212B0FBDF700F64795 /* XCRemoteSwiftPackageReference "swift-markdown-ui" */, + 88209C272B0FBFA200F64795 /* XCLocalSwiftPackageReference "../.." */, + DEA09AC32B1FCE22001962D9 /* XCRemoteSwiftPackageReference "NetworkImage" */, + ); + productRefGroup = 8848C8302B0D04BC007B434F /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + 8848C82E2B0D04BC007B434F /* GenerativeAISample */, + 8848C8432B0D051E007B434F /* GenerativeAITextSample */, + 8848C8552B0D056C007B434F /* GenerativeAIMultimodalSample */, + 88E10F412B110D5300C08E95 /* ChatSample */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXResourcesBuildPhase section */ + 8848C82D2B0D04BC007B434F /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 8848C83A2B0D04BD007B434F /* Preview Assets.xcassets in Resources */, + 8848C8372B0D04BD007B434F /* Assets.xcassets in Resources */, + 88209C152B0F928F00F64795 /* GenerativeAI-Info.plist in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 8848C8422B0D051E007B434F /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 8848C84E2B0D051F007B434F /* Preview Assets.xcassets in Resources */, + 8848C84B2B0D051F007B434F /* Assets.xcassets in Resources */, + 88209C162B0F92AF00F64795 /* GenerativeAI-Info.plist in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 8848C8542B0D056C007B434F /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 8848C8602B0D056D007B434F /* Preview Assets.xcassets in Resources */, + 8848C85D2B0D056D007B434F /* Assets.xcassets in Resources */, + 88209C172B0F92AF00F64795 /* GenerativeAI-Info.plist in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 88E10F402B110D5300C08E95 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 88E10F4C2B110D5400C08E95 /* Preview Assets.xcassets in Resources */, + 88E10F492B110D5400C08E95 /* Assets.xcassets in Resources */, + 88E10F5E2B11140200C08E95 /* GenerativeAI-Info.plist in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXShellScriptBuildPhase section */ + 880BB10C2B164F790014C3DF /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputFileListPaths = ( + ); + inputPaths = ( + ); + outputFileListPaths = ( + ); + outputPaths = ( + "$(SRCROOT)/APIKey/GenerativeAI-Info.plist", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "CONFIG_FILE_BASE_NAME=\"GenerativeAI-Info\"\n\nCONFIG_FILE_NAME=${CONFIG_FILE_BASE_NAME}.plist\nSAMPLE_CONFIG_FILE_NAME=${CONFIG_FILE_BASE_NAME}-Sample.plist\n\nCONFIG_FILE_PATH=$SRCROOT/APIKey/$CONFIG_FILE_NAME\nSAMPLE_CONFIG_FILE_PATH=$SRCROOT/APIKey/$SAMPLE_CONFIG_FILE_NAME\n\nif [ -f \"$CONFIG_FILE_PATH\" ]; then\n echo \"$CONFIG_FILE_PATH exists.\"\nelse\n echo \"$CONFIG_FILE_PATH does not exist, copying sample\"\n cp -v \"${SAMPLE_CONFIG_FILE_PATH}\" \"${CONFIG_FILE_PATH}\"\nfi\n"; + }; + 880BB10E2B1651210014C3DF /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputFileListPaths = ( + ); + inputPaths = ( + ); + outputFileListPaths = ( + ); + outputPaths = ( + "$(SRCROOT)/APIKey/GenerativeAI-Info.plist", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "CONFIG_FILE_BASE_NAME=\"GenerativeAI-Info\"\n\nCONFIG_FILE_NAME=${CONFIG_FILE_BASE_NAME}.plist\nSAMPLE_CONFIG_FILE_NAME=${CONFIG_FILE_BASE_NAME}-Sample.plist\n\nCONFIG_FILE_PATH=$SRCROOT/APIKey/$CONFIG_FILE_NAME\nSAMPLE_CONFIG_FILE_PATH=$SRCROOT/APIKey/$SAMPLE_CONFIG_FILE_NAME\n\nif [ -f \"$CONFIG_FILE_PATH\" ]; then\n echo \"$CONFIG_FILE_PATH exists.\"\nelse\n echo \"$CONFIG_FILE_PATH does not exist, copying sample\"\n cp -v \"${SAMPLE_CONFIG_FILE_PATH}\" \"${CONFIG_FILE_PATH}\"\nfi\n"; + }; + 880BB10F2B1651650014C3DF /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputFileListPaths = ( + ); + inputPaths = ( + ); + outputFileListPaths = ( + ); + outputPaths = ( + "$(SRCROOT)/APIKey/GenerativeAI-Info.plist", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "CONFIG_FILE_BASE_NAME=\"GenerativeAI-Info\"\n\nCONFIG_FILE_NAME=${CONFIG_FILE_BASE_NAME}.plist\nSAMPLE_CONFIG_FILE_NAME=${CONFIG_FILE_BASE_NAME}-Sample.plist\n\nCONFIG_FILE_PATH=$SRCROOT/APIKey/$CONFIG_FILE_NAME\nSAMPLE_CONFIG_FILE_PATH=$SRCROOT/APIKey/$SAMPLE_CONFIG_FILE_NAME\n\nif [ -f \"$CONFIG_FILE_PATH\" ]; then\n echo \"$CONFIG_FILE_PATH exists.\"\nelse\n echo \"$CONFIG_FILE_PATH does not exist, copying sample\"\n cp -v \"${SAMPLE_CONFIG_FILE_PATH}\" \"${CONFIG_FILE_PATH}\"\nfi\n"; + }; + 880BB1102B16519D0014C3DF /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputFileListPaths = ( + ); + inputPaths = ( + ); + outputFileListPaths = ( + ); + outputPaths = ( + "$(SRCROOT)/APIKey/GenerativeAI-Info.plist", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "CONFIG_FILE_BASE_NAME=\"GenerativeAI-Info\"\n\nCONFIG_FILE_NAME=${CONFIG_FILE_BASE_NAME}.plist\nSAMPLE_CONFIG_FILE_NAME=${CONFIG_FILE_BASE_NAME}-Sample.plist\n\nCONFIG_FILE_PATH=$SRCROOT/APIKey/$CONFIG_FILE_NAME\nSAMPLE_CONFIG_FILE_PATH=$SRCROOT/APIKey/$SAMPLE_CONFIG_FILE_NAME\n\nif [ -f \"$CONFIG_FILE_PATH\" ]; then\n echo \"$CONFIG_FILE_PATH exists.\"\nelse\n echo \"$CONFIG_FILE_PATH does not exist, copying sample\"\n cp -v \"${SAMPLE_CONFIG_FILE_PATH}\" \"${CONFIG_FILE_PATH}\"\nfi\n"; + }; +/* End PBXShellScriptBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + 8848C82B2B0D04BC007B434F /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 886F95DF2B17D5010036F07A /* BouncingDots.swift in Sources */, + 886F95DE2B17D5010036F07A /* ChatMessage.swift in Sources */, + 88263BF12B239C11008AB09B /* ErrorDetailsView.swift in Sources */, + 8848C8352B0D04BC007B434F /* ContentView.swift in Sources */, + 886F95D52B17BA010036F07A /* SummarizeScreen.swift in Sources */, + 881B753B2B0FDCE600528058 /* APIKey.swift in Sources */, + 8848C8332B0D04BC007B434F /* GenerativeAISampleApp.swift in Sources */, + 886F95E02B17D5010036F07A /* ConversationViewModel.swift in Sources */, + 886F95DD2B17D5010036F07A /* MessageView.swift in Sources */, + 886F95DC2B17BAEF0036F07A /* PhotoReasoningScreen.swift in Sources */, + 886F95DB2B17BAEF0036F07A /* PhotoReasoningViewModel.swift in Sources */, + 886F95E12B17D5010036F07A /* ConversationScreen.swift in Sources */, + 88263BF02B239C09008AB09B /* ErrorView.swift in Sources */, + 886F95D62B17BA010036F07A /* SummarizeViewModel.swift in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 8848C8402B0D051E007B434F /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 88209C1F2B0FBDC300F64795 /* SummarizeScreen.swift in Sources */, + 88209C1E2B0FBDC300F64795 /* APIKey.swift in Sources */, + 8848C8472B0D051E007B434F /* GenerativeAITextSampleApp.swift in Sources */, + 88209C202B0FBDC300F64795 /* SummarizeViewModel.swift in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 8848C8522B0D056C007B434F /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 880266762B0FC39000CF7CB6 /* PhotoReasoningViewModel.swift in Sources */, + 880266792B0FC39000CF7CB6 /* PhotoReasoningScreen.swift in Sources */, + 8848C8592B0D056C007B434F /* GenerativeAIMultimodalSampleApp.swift in Sources */, + 881B753A2B0FDCE600528058 /* APIKey.swift in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 88E10F3E2B110D5300C08E95 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 88E10F5B2B11133E00C08E95 /* MessageView.swift in Sources */, + 88E10F5F2B11140500C08E95 /* APIKey.swift in Sources */, + 88E10F572B1112F600C08E95 /* ConversationViewModel.swift in Sources */, + 88E10F552B1112CA00C08E95 /* ConversationScreen.swift in Sources */, + 88E10F592B11131900C08E95 /* ChatMessage.swift in Sources */, + 88E10F5D2B11135000C08E95 /* BouncingDots.swift in Sources */, + 88E10F452B110D5300C08E95 /* ChatSampleApp.swift in Sources */, + 88263BEF2B239BFE008AB09B /* ErrorView.swift in Sources */, + 889873852B208563005B4896 /* ErrorDetailsView.swift in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin XCBuildConfiguration section */ + 8848C83B2B0D04BD007B434F /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + ASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++20"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + ENABLE_USER_SCRIPT_SANDBOXING = YES; + GCC_C_LANGUAGE_STANDARD = gnu17; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 16.0; + LOCALIZATION_PREFERS_STRING_CATALOGS = YES; + MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE; + MTL_FAST_MATH = YES; + ONLY_ACTIVE_ARCH = YES; + SDKROOT = iphoneos; + SWIFT_ACTIVE_COMPILATION_CONDITIONS = "DEBUG $(inherited)"; + SWIFT_OPTIMIZATION_LEVEL = "-Onone"; + }; + name = Debug; + }; + 8848C83C2B0D04BD007B434F /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + ASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++20"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_USER_SCRIPT_SANDBOXING = YES; + GCC_C_LANGUAGE_STANDARD = gnu17; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 16.0; + LOCALIZATION_PREFERS_STRING_CATALOGS = YES; + MTL_ENABLE_DEBUG_INFO = NO; + MTL_FAST_MATH = YES; + SDKROOT = iphoneos; + SWIFT_COMPILATION_MODE = wholemodule; + VALIDATE_PRODUCT = YES; + }; + name = Release; + }; + 8848C83E2B0D04BD007B434F /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; + CODE_SIGN_STYLE = Automatic; + CURRENT_PROJECT_VERSION = 1; + DEVELOPMENT_ASSET_PATHS = "\"GenerativeAISample/Preview Content\""; + DEVELOPMENT_TEAM = ""; + ENABLE_PREVIEWS = YES; + ENABLE_USER_SCRIPT_SANDBOXING = NO; + GENERATE_INFOPLIST_FILE = YES; + INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; + INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; + INFOPLIST_KEY_UILaunchScreen_Generation = YES; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + MARKETING_VERSION = 1.0; + PRODUCT_BUNDLE_IDENTIFIER = com.example.google.generativeai.GenerativeAISample; + PRODUCT_NAME = "$(TARGET_NAME)"; + SWIFT_EMIT_LOC_STRINGS = YES; + SWIFT_VERSION = 5.0; + TARGETED_DEVICE_FAMILY = "1,2"; + }; + name = Debug; + }; + 8848C83F2B0D04BD007B434F /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; + CODE_SIGN_STYLE = Automatic; + CURRENT_PROJECT_VERSION = 1; + DEVELOPMENT_ASSET_PATHS = "\"GenerativeAISample/Preview Content\""; + DEVELOPMENT_TEAM = ""; + ENABLE_PREVIEWS = YES; + ENABLE_USER_SCRIPT_SANDBOXING = NO; + GENERATE_INFOPLIST_FILE = YES; + INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; + INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; + INFOPLIST_KEY_UILaunchScreen_Generation = YES; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + MARKETING_VERSION = 1.0; + PRODUCT_BUNDLE_IDENTIFIER = com.example.google.generativeai.GenerativeAISample; + PRODUCT_NAME = "$(TARGET_NAME)"; + SWIFT_EMIT_LOC_STRINGS = YES; + SWIFT_VERSION = 5.0; + TARGETED_DEVICE_FAMILY = "1,2"; + }; + name = Release; + }; + 8848C8502B0D051F007B434F /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; + CODE_SIGN_STYLE = Automatic; + CURRENT_PROJECT_VERSION = 1; + DEVELOPMENT_ASSET_PATHS = "\"GenerativeAITextSample/Preview Content\""; + DEVELOPMENT_TEAM = ""; + ENABLE_PREVIEWS = YES; + ENABLE_USER_SCRIPT_SANDBOXING = NO; + GENERATE_INFOPLIST_FILE = YES; + INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; + INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; + INFOPLIST_KEY_UILaunchScreen_Generation = YES; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + IPHONEOS_DEPLOYMENT_TARGET = 16.0; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + MARKETING_VERSION = 1.0; + PRODUCT_BUNDLE_IDENTIFIER = com.example.google.generativeai.GenerativeAITextSample; + PRODUCT_NAME = "$(TARGET_NAME)"; + SWIFT_EMIT_LOC_STRINGS = YES; + SWIFT_VERSION = 5.0; + TARGETED_DEVICE_FAMILY = "1,2"; + }; + name = Debug; + }; + 8848C8512B0D051F007B434F /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; + CODE_SIGN_STYLE = Automatic; + CURRENT_PROJECT_VERSION = 1; + DEVELOPMENT_ASSET_PATHS = "\"GenerativeAITextSample/Preview Content\""; + DEVELOPMENT_TEAM = ""; + ENABLE_PREVIEWS = YES; + ENABLE_USER_SCRIPT_SANDBOXING = NO; + GENERATE_INFOPLIST_FILE = YES; + INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; + INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; + INFOPLIST_KEY_UILaunchScreen_Generation = YES; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + IPHONEOS_DEPLOYMENT_TARGET = 16.0; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + MARKETING_VERSION = 1.0; + PRODUCT_BUNDLE_IDENTIFIER = com.example.google.generativeai.GenerativeAITextSample; + PRODUCT_NAME = "$(TARGET_NAME)"; + SWIFT_EMIT_LOC_STRINGS = YES; + SWIFT_VERSION = 5.0; + TARGETED_DEVICE_FAMILY = "1,2"; + }; + name = Release; + }; + 8848C8622B0D056D007B434F /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; + CODE_SIGN_STYLE = Automatic; + CURRENT_PROJECT_VERSION = 1; + DEVELOPMENT_ASSET_PATHS = "\"GenerativeAIMultimodalSample/Preview Content\""; + DEVELOPMENT_TEAM = ""; + ENABLE_PREVIEWS = YES; + ENABLE_USER_SCRIPT_SANDBOXING = NO; + GENERATE_INFOPLIST_FILE = YES; + INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; + INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; + INFOPLIST_KEY_UILaunchScreen_Generation = YES; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + IPHONEOS_DEPLOYMENT_TARGET = 16.0; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + MARKETING_VERSION = 1.0; + PRODUCT_BUNDLE_IDENTIFIER = com.example.google.generativeai.GenerativeAIMultimodalSample; + PRODUCT_NAME = "$(TARGET_NAME)"; + SWIFT_EMIT_LOC_STRINGS = YES; + SWIFT_VERSION = 5.0; + TARGETED_DEVICE_FAMILY = "1,2"; + }; + name = Debug; + }; + 8848C8632B0D056D007B434F /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; + CODE_SIGN_STYLE = Automatic; + CURRENT_PROJECT_VERSION = 1; + DEVELOPMENT_ASSET_PATHS = "\"GenerativeAIMultimodalSample/Preview Content\""; + DEVELOPMENT_TEAM = ""; + ENABLE_PREVIEWS = YES; + ENABLE_USER_SCRIPT_SANDBOXING = NO; + GENERATE_INFOPLIST_FILE = YES; + INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; + INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; + INFOPLIST_KEY_UILaunchScreen_Generation = YES; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + IPHONEOS_DEPLOYMENT_TARGET = 16.0; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + MARKETING_VERSION = 1.0; + PRODUCT_BUNDLE_IDENTIFIER = com.example.google.generativeai.GenerativeAIMultimodalSample; + PRODUCT_NAME = "$(TARGET_NAME)"; + SWIFT_EMIT_LOC_STRINGS = YES; + SWIFT_VERSION = 5.0; + TARGETED_DEVICE_FAMILY = "1,2"; + }; + name = Release; + }; + 88E10F4D2B110D5400C08E95 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; + CODE_SIGN_STYLE = Automatic; + CURRENT_PROJECT_VERSION = 1; + DEVELOPMENT_ASSET_PATHS = "\"ChatSample/Preview Content\""; + DEVELOPMENT_TEAM = ""; + ENABLE_PREVIEWS = YES; + ENABLE_USER_SCRIPT_SANDBOXING = NO; + GENERATE_INFOPLIST_FILE = YES; + INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; + INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; + INFOPLIST_KEY_UILaunchScreen_Generation = YES; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + IPHONEOS_DEPLOYMENT_TARGET = 16.0; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + MARKETING_VERSION = 1.0; + PRODUCT_BUNDLE_IDENTIFIER = com.example.google.generativeai.ChatSample; + PRODUCT_NAME = "$(TARGET_NAME)"; + SWIFT_EMIT_LOC_STRINGS = YES; + SWIFT_VERSION = 5.0; + TARGETED_DEVICE_FAMILY = "1,2"; + }; + name = Debug; + }; + 88E10F4E2B110D5400C08E95 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; + CODE_SIGN_STYLE = Automatic; + CURRENT_PROJECT_VERSION = 1; + DEVELOPMENT_ASSET_PATHS = "\"ChatSample/Preview Content\""; + DEVELOPMENT_TEAM = ""; + ENABLE_PREVIEWS = YES; + ENABLE_USER_SCRIPT_SANDBOXING = NO; + GENERATE_INFOPLIST_FILE = YES; + INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; + INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; + INFOPLIST_KEY_UILaunchScreen_Generation = YES; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; + IPHONEOS_DEPLOYMENT_TARGET = 16.0; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + MARKETING_VERSION = 1.0; + PRODUCT_BUNDLE_IDENTIFIER = com.example.google.generativeai.ChatSample; + PRODUCT_NAME = "$(TARGET_NAME)"; + SWIFT_EMIT_LOC_STRINGS = YES; + SWIFT_VERSION = 5.0; + TARGETED_DEVICE_FAMILY = "1,2"; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 8848C82A2B0D04BC007B434F /* Build configuration list for PBXProject "GenerativeAISample" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 8848C83B2B0D04BD007B434F /* Debug */, + 8848C83C2B0D04BD007B434F /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 8848C83D2B0D04BD007B434F /* Build configuration list for PBXNativeTarget "GenerativeAISample" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 8848C83E2B0D04BD007B434F /* Debug */, + 8848C83F2B0D04BD007B434F /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 8848C84F2B0D051F007B434F /* Build configuration list for PBXNativeTarget "GenerativeAITextSample" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 8848C8502B0D051F007B434F /* Debug */, + 8848C8512B0D051F007B434F /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 8848C8612B0D056D007B434F /* Build configuration list for PBXNativeTarget "GenerativeAIMultimodalSample" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 8848C8622B0D056D007B434F /* Debug */, + 8848C8632B0D056D007B434F /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 88E10F4F2B110D5400C08E95 /* Build configuration list for PBXNativeTarget "ChatSample" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 88E10F4D2B110D5400C08E95 /* Debug */, + 88E10F4E2B110D5400C08E95 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + +/* Begin XCLocalSwiftPackageReference section */ + 88209C272B0FBFA200F64795 /* XCLocalSwiftPackageReference "../.." */ = { + isa = XCLocalSwiftPackageReference; + relativePath = ../..; + }; +/* End XCLocalSwiftPackageReference section */ + +/* Begin XCRemoteSwiftPackageReference section */ + 88209C212B0FBDF700F64795 /* XCRemoteSwiftPackageReference "swift-markdown-ui" */ = { + isa = XCRemoteSwiftPackageReference; + repositoryURL = "https://github.com/gonzalezreal/swift-markdown-ui"; + requirement = { + kind = revision; + revision = 5df8a4adedd6ae4eb2455ef60ff75183984daeb8; + }; + }; + DEA09AC32B1FCE22001962D9 /* XCRemoteSwiftPackageReference "NetworkImage" */ = { + isa = XCRemoteSwiftPackageReference; + repositoryURL = "https://github.com/gonzalezreal/NetworkImage"; + requirement = { + kind = revision; + revision = 7aff8d1b31148d32c5933d75557d42f6323ee3d1; + }; + }; +/* End XCRemoteSwiftPackageReference section */ + +/* Begin XCSwiftPackageProductDependency section */ + 88209C232B0FBE1700F64795 /* MarkdownUI */ = { + isa = XCSwiftPackageProductDependency; + package = 88209C212B0FBDF700F64795 /* XCRemoteSwiftPackageReference "swift-markdown-ui" */; + productName = MarkdownUI; + }; + 88209C252B0FBF7100F64795 /* GoogleGenerativeAI */ = { + isa = XCSwiftPackageProductDependency; + package = 88209C212B0FBDF700F64795 /* XCRemoteSwiftPackageReference "swift-markdown-ui" */; + productName = GoogleGenerativeAI; + }; + 88209C282B0FBFA200F64795 /* GoogleGenerativeAI */ = { + isa = XCSwiftPackageProductDependency; + productName = GoogleGenerativeAI; + }; + 886F95D72B17BA420036F07A /* MarkdownUI */ = { + isa = XCSwiftPackageProductDependency; + package = 88209C212B0FBDF700F64795 /* XCRemoteSwiftPackageReference "swift-markdown-ui" */; + productName = MarkdownUI; + }; + 886F95D92B17BA8D0036F07A /* GoogleGenerativeAI */ = { + isa = XCSwiftPackageProductDependency; + productName = GoogleGenerativeAI; + }; + 886F95E22B17D6630036F07A /* GenerativeAIUIComponents */ = { + isa = XCSwiftPackageProductDependency; + productName = GenerativeAIUIComponents; + }; + 88B8A91D2B0FC55100424728 /* MarkdownUI */ = { + isa = XCSwiftPackageProductDependency; + package = 88209C212B0FBDF700F64795 /* XCRemoteSwiftPackageReference "swift-markdown-ui" */; + productName = MarkdownUI; + }; + 88B8A9362B0FCBE700424728 /* GenerativeAIUIComponents */ = { + isa = XCSwiftPackageProductDependency; + productName = GenerativeAIUIComponents; + }; + 88D9474C2B14F27E008B5580 /* MarkdownUI */ = { + isa = XCSwiftPackageProductDependency; + package = 88209C212B0FBDF700F64795 /* XCRemoteSwiftPackageReference "swift-markdown-ui" */; + productName = MarkdownUI; + }; + 88E10F602B11162B00C08E95 /* GoogleGenerativeAI */ = { + isa = XCSwiftPackageProductDependency; + package = 88209C212B0FBDF700F64795 /* XCRemoteSwiftPackageReference "swift-markdown-ui" */; + productName = GoogleGenerativeAI; + }; +/* End XCSwiftPackageProductDependency section */ + }; + rootObject = 8848C8272B0D04BC007B434F /* Project object */; +} diff --git a/Examples/PaLMText/PaLMText/Assets.xcassets/AccentColor.colorset/Contents.json b/Examples/GenerativeAISample/GenerativeAISample/Assets.xcassets/AccentColor.colorset/Contents.json similarity index 100% rename from Examples/PaLMText/PaLMText/Assets.xcassets/AccentColor.colorset/Contents.json rename to Examples/GenerativeAISample/GenerativeAISample/Assets.xcassets/AccentColor.colorset/Contents.json diff --git a/Examples/PaLMText/PaLMText/Assets.xcassets/AppIcon.appiconset/Contents.json b/Examples/GenerativeAISample/GenerativeAISample/Assets.xcassets/AppIcon.appiconset/Contents.json similarity index 100% rename from Examples/PaLMText/PaLMText/Assets.xcassets/AppIcon.appiconset/Contents.json rename to Examples/GenerativeAISample/GenerativeAISample/Assets.xcassets/AppIcon.appiconset/Contents.json diff --git a/Examples/PaLMText/PaLMText/Assets.xcassets/Contents.json b/Examples/GenerativeAISample/GenerativeAISample/Assets.xcassets/Contents.json similarity index 100% rename from Examples/PaLMText/PaLMText/Assets.xcassets/Contents.json rename to Examples/GenerativeAISample/GenerativeAISample/Assets.xcassets/Contents.json diff --git a/Examples/GenerativeAISample/GenerativeAISample/ContentView.swift b/Examples/GenerativeAISample/GenerativeAISample/ContentView.swift new file mode 100644 index 0000000..34331bf --- /dev/null +++ b/Examples/GenerativeAISample/GenerativeAISample/ContentView.swift @@ -0,0 +1,48 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import SwiftUI + +struct ContentView: View { + @StateObject + var viewModel = ConversationViewModel() + + var body: some View { + NavigationStack { + List { + NavigationLink { + SummarizeScreen() + } label: { + Label("Text", systemImage: "doc.text") + } + NavigationLink { + PhotoReasoningScreen() + } label: { + Label("Multi-modal", systemImage: "doc.richtext") + } + NavigationLink { + ConversationScreen() + .environmentObject(viewModel) + } label: { + Label("Chat", systemImage: "ellipsis.message.fill") + } + } + .navigationTitle("Generative AI Samples") + } + } +} + +#Preview { + ContentView() +} diff --git a/Examples/PaLMChat/PaLMChat/PaLMChatApp.swift b/Examples/GenerativeAISample/GenerativeAISample/GenerativeAISampleApp.swift similarity index 92% rename from Examples/PaLMChat/PaLMChat/PaLMChatApp.swift rename to Examples/GenerativeAISample/GenerativeAISample/GenerativeAISampleApp.swift index 5f180e3..b29edbb 100644 --- a/Examples/PaLMChat/PaLMChat/PaLMChatApp.swift +++ b/Examples/GenerativeAISample/GenerativeAISample/GenerativeAISampleApp.swift @@ -15,10 +15,10 @@ import SwiftUI @main -struct PaLMChatApp: App { +struct GenerativeAISampleApp: App { var body: some Scene { WindowGroup { - ConversationScreen() + ContentView() } } } diff --git a/Examples/PaLMText/PaLMText/Preview Content/Preview Assets.xcassets/Contents.json b/Examples/GenerativeAISample/GenerativeAISample/Preview Content/Preview Assets.xcassets/Contents.json similarity index 100% rename from Examples/PaLMText/PaLMText/Preview Content/Preview Assets.xcassets/Contents.json rename to Examples/GenerativeAISample/GenerativeAISample/Preview Content/Preview Assets.xcassets/Contents.json diff --git a/Examples/GenerativeAISample/GenerativeAITextSample/Assets.xcassets/AccentColor.colorset/Contents.json b/Examples/GenerativeAISample/GenerativeAITextSample/Assets.xcassets/AccentColor.colorset/Contents.json new file mode 100644 index 0000000..eb87897 --- /dev/null +++ b/Examples/GenerativeAISample/GenerativeAITextSample/Assets.xcassets/AccentColor.colorset/Contents.json @@ -0,0 +1,11 @@ +{ + "colors" : [ + { + "idiom" : "universal" + } + ], + "info" : { + "author" : "xcode", + "version" : 1 + } +} diff --git a/Examples/GenerativeAISample/GenerativeAITextSample/Assets.xcassets/AppIcon.appiconset/Contents.json b/Examples/GenerativeAISample/GenerativeAITextSample/Assets.xcassets/AppIcon.appiconset/Contents.json new file mode 100644 index 0000000..13613e3 --- /dev/null +++ b/Examples/GenerativeAISample/GenerativeAITextSample/Assets.xcassets/AppIcon.appiconset/Contents.json @@ -0,0 +1,13 @@ +{ + "images" : [ + { + "idiom" : "universal", + "platform" : "ios", + "size" : "1024x1024" + } + ], + "info" : { + "author" : "xcode", + "version" : 1 + } +} diff --git a/Examples/GenerativeAISample/GenerativeAITextSample/Assets.xcassets/Contents.json b/Examples/GenerativeAISample/GenerativeAITextSample/Assets.xcassets/Contents.json new file mode 100644 index 0000000..73c0059 --- /dev/null +++ b/Examples/GenerativeAISample/GenerativeAITextSample/Assets.xcassets/Contents.json @@ -0,0 +1,6 @@ +{ + "info" : { + "author" : "xcode", + "version" : 1 + } +} diff --git a/Examples/PaLMText/PaLMText/PaLMTextApp.swift b/Examples/GenerativeAISample/GenerativeAITextSample/GenerativeAITextSampleApp.swift similarity index 87% rename from Examples/PaLMText/PaLMText/PaLMTextApp.swift rename to Examples/GenerativeAISample/GenerativeAITextSample/GenerativeAITextSampleApp.swift index d1d7d3c..62bd6c9 100644 --- a/Examples/PaLMText/PaLMText/PaLMTextApp.swift +++ b/Examples/GenerativeAISample/GenerativeAITextSample/GenerativeAITextSampleApp.swift @@ -15,10 +15,12 @@ import SwiftUI @main -struct PaLMTextApp: App { +struct GenerativeAITextSampleApp: App { var body: some Scene { WindowGroup { - SummarizeScreen() + NavigationStack { + SummarizeScreen() + } } } } diff --git a/Examples/GenerativeAISample/GenerativeAITextSample/Preview Content/Preview Assets.xcassets/Contents.json b/Examples/GenerativeAISample/GenerativeAITextSample/Preview Content/Preview Assets.xcassets/Contents.json new file mode 100644 index 0000000..73c0059 --- /dev/null +++ b/Examples/GenerativeAISample/GenerativeAITextSample/Preview Content/Preview Assets.xcassets/Contents.json @@ -0,0 +1,6 @@ +{ + "info" : { + "author" : "xcode", + "version" : 1 + } +} diff --git a/Examples/PaLMText/PaLMText/Screens/SummarizeScreen.swift b/Examples/GenerativeAISample/GenerativeAITextSample/Screens/SummarizeScreen.swift similarity index 50% rename from Examples/PaLMText/PaLMText/Screens/SummarizeScreen.swift rename to Examples/GenerativeAISample/GenerativeAITextSample/Screens/SummarizeScreen.swift index 2fc1e92..8fbb89f 100644 --- a/Examples/PaLMText/PaLMText/Screens/SummarizeScreen.swift +++ b/Examples/GenerativeAISample/GenerativeAITextSample/Screens/SummarizeScreen.swift @@ -12,54 +12,63 @@ // See the License for the specific language governing permissions and // limitations under the License. +import MarkdownUI import SwiftUI struct SummarizeScreen: View { @StateObject var viewModel = SummarizeViewModel() + @State var userInput = "" + + enum FocusedField: Hashable { + case message + } + + @FocusState + var focusedField: FocusedField? var body: some View { - NavigationStack { - VStack { - TextField("Text", text: $viewModel.inputText, axis: .vertical) - .lineLimit(10, reservesSpace: true) + VStack { + Text("Enter some text, then tap on _Go_ to summarize it.") + HStack(alignment: .top) { + TextField("Enter text summarize", text: $userInput, axis: .vertical) .textFieldStyle(.roundedBorder) + .onSubmit { + onSummarizeTapped() + } + Button("Go") { + onSummarizeTapped() + } + .padding(.top, 4) + } + .padding([.horizontal, .bottom]) - Button(action: onSummarizeTapped) { + List { + HStack(alignment: .top) { if viewModel.inProgress { ProgressView() - .progressViewStyle(CircularProgressViewStyle(tint: .white)) - .frame(maxWidth: .infinity, maxHeight: 8) - .padding(6) - } - else { - Text("Summarize") - .frame(maxWidth: .infinity, maxHeight: 50) + } else { + Image(systemName: "cloud.circle.fill") + .font(.title2) } - } - .frame(maxWidth: .infinity, maxHeight: 50) - .buttonStyle(.borderedProminent) - .controlSize(.large) - .padding(.vertical) - TextField("Result", text: $viewModel.outputText, axis: .vertical) - .lineLimit(10, reservesSpace: true) - .textFieldStyle(.roundedBorder) - Spacer() + Markdown("\(viewModel.outputText)") + } + .listRowSeparator(.hidden) } - .navigationTitle("Text") - .padding() + .listStyle(.plain) } + .navigationTitle("Text sample") } private func onSummarizeTapped() { Task { - await viewModel.summarize() + await viewModel.summarize(inputText: userInput) } } } -struct SummarizeScreen_Previews: PreviewProvider { - static var previews: some View { +#Preview { + NavigationStack { SummarizeScreen() } } diff --git a/Examples/GenerativeAISample/GenerativeAITextSample/ViewModels/SummarizeViewModel.swift b/Examples/GenerativeAISample/GenerativeAITextSample/ViewModels/SummarizeViewModel.swift new file mode 100644 index 0000000..55289e4 --- /dev/null +++ b/Examples/GenerativeAISample/GenerativeAITextSample/ViewModels/SummarizeViewModel.swift @@ -0,0 +1,68 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation +import GoogleGenerativeAI +import OSLog + +@MainActor +class SummarizeViewModel: ObservableObject { + private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") + + @Published + var outputText = "" + + @Published + var errorMessage: String? + + @Published + var inProgress = false + + private var model: GenerativeModel? + + init() { + model = GenerativeModel(name: "gemini-pro", apiKey: APIKey.default) + } + + func summarize(inputText: String) async { + defer { + inProgress = false + } + guard let model else { + return + } + + do { + inProgress = true + errorMessage = nil + outputText = "" + + let prompt = "Summarize the following text for me: \(inputText)" + + let outputContentStream = model.generateContentStream(prompt) + + // stream response + for try await outputContent in outputContentStream { + guard let line = outputContent.text else { + return + } + + outputText = outputText + line + } + } catch { + logger.error("\(error.localizedDescription)") + errorMessage = error.localizedDescription + } + } +} diff --git a/Examples/GenerativeAISample/GenerativeAIUIComponents/Package.swift b/Examples/GenerativeAISample/GenerativeAIUIComponents/Package.swift new file mode 100644 index 0000000..808f5f4 --- /dev/null +++ b/Examples/GenerativeAISample/GenerativeAIUIComponents/Package.swift @@ -0,0 +1,35 @@ +// swift-tools-version: 5.9 +// The swift-tools-version declares the minimum version of Swift required to build this package. + +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +import PackageDescription + +let package = Package( + name: "GenerativeAIUIComponents", + platforms: [ + .iOS(.v16), + ], + products: [ + .library( + name: "GenerativeAIUIComponents", + targets: ["GenerativeAIUIComponents"] + ), + ], + targets: [ + .target( + name: "GenerativeAIUIComponents" + ), + ] +) diff --git a/Examples/GenerativeAISample/GenerativeAIUIComponents/Sources/GenerativeAIUIComponents/MultimodalInputField.swift b/Examples/GenerativeAISample/GenerativeAIUIComponents/Sources/GenerativeAIUIComponents/MultimodalInputField.swift new file mode 100644 index 0000000..9519fb3 --- /dev/null +++ b/Examples/GenerativeAISample/GenerativeAIUIComponents/Sources/GenerativeAIUIComponents/MultimodalInputField.swift @@ -0,0 +1,183 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import PhotosUI +import SwiftUI + +struct MultimodalInputFieldSubmitHandler: EnvironmentKey { + static var defaultValue: (() -> Void)? +} + +extension EnvironmentValues { + var submitHandler: (() -> Void)? { + get { self[MultimodalInputFieldSubmitHandler.self] } + set { self[MultimodalInputFieldSubmitHandler.self] = newValue } + } +} + +public extension View { + func onSubmit(submitHandler: @escaping () -> Void) -> some View { + environment(\.submitHandler, submitHandler) + } +} + +public struct MultimodalInputField: View { + @Binding public var text: String + @Binding public var selection: [PhotosPickerItem] + + @Environment(\.submitHandler) var submitHandler + + @State private var selectedImages = [Image]() + + @State private var isChooseAttachmentTypePickerShowing = false + @State private var isAttachmentPickerShowing = false + + private func showChooseAttachmentTypePicker() { + isChooseAttachmentTypePickerShowing.toggle() + } + + private func showAttachmentPicker() { + isAttachmentPickerShowing.toggle() + } + + private func submit() { + if let submitHandler { + submitHandler() + } + } + + public init(text: Binding, + selection: Binding<[PhotosPickerItem]>) { + _text = text + _selection = selection + } + + public var body: some View { + VStack(alignment: .leading) { + HStack(alignment: .top) { + Button(action: showChooseAttachmentTypePicker) { + Image(systemName: "plus") + } + .padding(.top, 4) + + VStack(alignment: .leading) { + TextField( + "Upload an image, and then ask a question about it", + text: $text, + axis: .vertical + ) + .padding(.vertical, 4) + .onSubmit(submit) + + if selectedImages.count > 0 { + ScrollView(.horizontal) { + LazyHStack { + ForEach(0 ..< selectedImages.count, id: \.self) { i in + HStack { + selectedImages[i] + .resizable() + .scaledToFill() + .frame(width: 50, height: 50) + .cornerRadius(8) + } + } + } + } + .frame(height: 50) + } + } + .padding(.horizontal, 8) + .padding(.vertical, 4) + .overlay { + RoundedRectangle( + cornerRadius: 8, + style: .continuous + ) + .stroke(Color(UIColor.systemFill), lineWidth: 1) + } + + Button(action: submit) { + Text("Go") + } + .padding(.top, 4) + } + } + .padding(.horizontal) + .confirmationDialog( + "Select an image", + isPresented: $isChooseAttachmentTypePickerShowing, + titleVisibility: .hidden + ) { + Button(action: showAttachmentPicker) { + Text("Photo & Video Library") + } + } + .photosPicker(isPresented: $isAttachmentPickerShowing, selection: $selection) + .onChange(of: selection) { _ in + Task { + selectedImages.removeAll() + + for item in selection { + if let data = try? await item.loadTransferable(type: Data.self) { + if let uiImage = UIImage(data: data) { + let image = Image(uiImage: uiImage) + selectedImages.append(image) + } + } + } + } + } + } +} + +#Preview { + struct Wrapper: View { + @State var userInput: String = "" + @State var selectedItems = [PhotosPickerItem]() + + @State private var selectedImages = [Image]() + + var body: some View { + MultimodalInputField(text: $userInput, selection: $selectedItems) + .onChange(of: selectedItems) { _ in + Task { + selectedImages.removeAll() + + for item in selectedItems { + if let data = try? await item.loadTransferable(type: Data.self) { + if let uiImage = UIImage(data: data) { + let image = Image(uiImage: uiImage) + selectedImages.append(image) + } + } + } + } + } + + List { + ForEach(0 ..< $selectedImages.count, id: \.self) { i in + HStack { + selectedImages[i] + .resizable() + .scaledToFill() + .frame(width: .infinity) + .cornerRadius(8) + } + } + } + } + } + + return Wrapper() +} diff --git a/Examples/PaLMChat/PaLMChat.xcodeproj/project.pbxproj b/Examples/PaLMChat/PaLMChat.xcodeproj/project.pbxproj deleted file mode 100644 index 9c5ab89..0000000 --- a/Examples/PaLMChat/PaLMChat.xcodeproj/project.pbxproj +++ /dev/null @@ -1,449 +0,0 @@ -// !$*UTF8*$! -{ - archiveVersion = 1; - classes = { - }; - objectVersion = 56; - objects = { - -/* Begin PBXBuildFile section */ - 88391C8829D2186200C54BCE /* PaLMChatApp.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88391C8729D2186200C54BCE /* PaLMChatApp.swift */; }; - 88391C8A29D2186200C54BCE /* ConversationScreen.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88391C8929D2186200C54BCE /* ConversationScreen.swift */; }; - 88391C8C29D2186300C54BCE /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 88391C8B29D2186300C54BCE /* Assets.xcassets */; }; - 88391C8F29D2186300C54BCE /* Preview Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 88391C8E29D2186300C54BCE /* Preview Assets.xcassets */; }; - 88391C9E29D2313100C54BCE /* ChatMessage.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88391C9D29D2313100C54BCE /* ChatMessage.swift */; }; - 88391CA029D2392600C54BCE /* MessageView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88391C9F29D2392600C54BCE /* MessageView.swift */; }; - 88391CA229D23AB100C54BCE /* ConversationViewModel.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88391CA129D23AB100C54BCE /* ConversationViewModel.swift */; }; - 88391CA529D241A000C54BCE /* BouncingDots.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88391CA429D241A000C54BCE /* BouncingDots.swift */; }; - 88B72DB72A005B9A002725BC /* GoogleGenerativeAI in Frameworks */ = {isa = PBXBuildFile; productRef = 88B72DB62A005B9A002725BC /* GoogleGenerativeAI */; }; - 88BFC51629D35B3900319EC5 /* PaLM-Info.plist in Resources */ = {isa = PBXBuildFile; fileRef = 88BFC51529D35B3900319EC5 /* PaLM-Info.plist */; }; -/* End PBXBuildFile section */ - -/* Begin PBXFileReference section */ - 8810D7752A00580E00D4BAF4 /* generative-ai-swift */ = {isa = PBXFileReference; lastKnownFileType = wrapper; name = "generative-ai-swift"; path = ../..; sourceTree = ""; }; - 88391C8429D2186200C54BCE /* PaLMChat.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = PaLMChat.app; sourceTree = BUILT_PRODUCTS_DIR; }; - 88391C8729D2186200C54BCE /* PaLMChatApp.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = PaLMChatApp.swift; sourceTree = ""; }; - 88391C8929D2186200C54BCE /* ConversationScreen.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ConversationScreen.swift; sourceTree = ""; }; - 88391C8B29D2186300C54BCE /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; - 88391C8E29D2186300C54BCE /* Preview Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = "Preview Assets.xcassets"; sourceTree = ""; }; - 88391C9D29D2313100C54BCE /* ChatMessage.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatMessage.swift; sourceTree = ""; }; - 88391C9F29D2392600C54BCE /* MessageView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = MessageView.swift; sourceTree = ""; }; - 88391CA129D23AB100C54BCE /* ConversationViewModel.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ConversationViewModel.swift; sourceTree = ""; }; - 88391CA429D241A000C54BCE /* BouncingDots.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = BouncingDots.swift; sourceTree = ""; }; - 88BFC51529D35B3900319EC5 /* PaLM-Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = "PaLM-Info.plist"; sourceTree = ""; }; -/* End PBXFileReference section */ - -/* Begin PBXFrameworksBuildPhase section */ - 88391C8129D2186200C54BCE /* Frameworks */ = { - isa = PBXFrameworksBuildPhase; - buildActionMask = 2147483647; - files = ( - 88B72DB72A005B9A002725BC /* GoogleGenerativeAI in Frameworks */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; -/* End PBXFrameworksBuildPhase section */ - -/* Begin PBXGroup section */ - 8810D7742A00580E00D4BAF4 /* Packages */ = { - isa = PBXGroup; - children = ( - 8810D7752A00580E00D4BAF4 /* generative-ai-swift */, - ); - name = Packages; - sourceTree = ""; - }; - 88391C7B29D2186200C54BCE = { - isa = PBXGroup; - children = ( - 8810D7742A00580E00D4BAF4 /* Packages */, - 88391C8629D2186200C54BCE /* PaLMChat */, - 88391C8529D2186200C54BCE /* Products */, - 88391C9729D2196400C54BCE /* Frameworks */, - ); - sourceTree = ""; - }; - 88391C8529D2186200C54BCE /* Products */ = { - isa = PBXGroup; - children = ( - 88391C8429D2186200C54BCE /* PaLMChat.app */, - ); - name = Products; - sourceTree = ""; - }; - 88391C8629D2186200C54BCE /* PaLMChat */ = { - isa = PBXGroup; - children = ( - 88391C9C29D2310800C54BCE /* ViewModels */, - 88391C9B29D230EC00C54BCE /* Models */, - 88391C9A29D230E400C54BCE /* Views */, - 88391CA329D23FA300C54BCE /* Screens */, - 88391C8729D2186200C54BCE /* PaLMChatApp.swift */, - 88391C8B29D2186300C54BCE /* Assets.xcassets */, - 88BFC51529D35B3900319EC5 /* PaLM-Info.plist */, - 88391C8D29D2186300C54BCE /* Preview Content */, - ); - path = PaLMChat; - sourceTree = ""; - }; - 88391C8D29D2186300C54BCE /* Preview Content */ = { - isa = PBXGroup; - children = ( - 88391C8E29D2186300C54BCE /* Preview Assets.xcassets */, - ); - path = "Preview Content"; - sourceTree = ""; - }; - 88391C9729D2196400C54BCE /* Frameworks */ = { - isa = PBXGroup; - children = ( - ); - name = Frameworks; - sourceTree = ""; - }; - 88391C9A29D230E400C54BCE /* Views */ = { - isa = PBXGroup; - children = ( - 88391C9F29D2392600C54BCE /* MessageView.swift */, - 88391CA429D241A000C54BCE /* BouncingDots.swift */, - ); - path = Views; - sourceTree = ""; - }; - 88391C9B29D230EC00C54BCE /* Models */ = { - isa = PBXGroup; - children = ( - 88391C9D29D2313100C54BCE /* ChatMessage.swift */, - ); - path = Models; - sourceTree = ""; - }; - 88391C9C29D2310800C54BCE /* ViewModels */ = { - isa = PBXGroup; - children = ( - 88391CA129D23AB100C54BCE /* ConversationViewModel.swift */, - ); - path = ViewModels; - sourceTree = ""; - }; - 88391CA329D23FA300C54BCE /* Screens */ = { - isa = PBXGroup; - children = ( - 88391C8929D2186200C54BCE /* ConversationScreen.swift */, - ); - path = Screens; - sourceTree = ""; - }; -/* End PBXGroup section */ - -/* Begin PBXNativeTarget section */ - 88391C8329D2186200C54BCE /* PaLMChat */ = { - isa = PBXNativeTarget; - buildConfigurationList = 88391C9229D2186300C54BCE /* Build configuration list for PBXNativeTarget "PaLMChat" */; - buildPhases = ( - 8811FA9129D2D468000550E4 /* Run Script */, - 88391C8029D2186200C54BCE /* Sources */, - 88391C8129D2186200C54BCE /* Frameworks */, - 88391C8229D2186200C54BCE /* Resources */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = PaLMChat; - packageProductDependencies = ( - 88B72DB62A005B9A002725BC /* GoogleGenerativeAI */, - ); - productName = PaLMChat; - productReference = 88391C8429D2186200C54BCE /* PaLMChat.app */; - productType = "com.apple.product-type.application"; - }; -/* End PBXNativeTarget section */ - -/* Begin PBXProject section */ - 88391C7C29D2186200C54BCE /* Project object */ = { - isa = PBXProject; - attributes = { - BuildIndependentTargetsInParallel = 1; - LastSwiftUpdateCheck = 1430; - LastUpgradeCheck = 1430; - TargetAttributes = { - 88391C8329D2186200C54BCE = { - CreatedOnToolsVersion = 14.3; - }; - }; - }; - buildConfigurationList = 88391C7F29D2186200C54BCE /* Build configuration list for PBXProject "PaLMChat" */; - compatibilityVersion = "Xcode 14.0"; - developmentRegion = en; - hasScannedForEncodings = 0; - knownRegions = ( - en, - Base, - ); - mainGroup = 88391C7B29D2186200C54BCE; - productRefGroup = 88391C8529D2186200C54BCE /* Products */; - projectDirPath = ""; - projectRoot = ""; - targets = ( - 88391C8329D2186200C54BCE /* PaLMChat */, - ); - }; -/* End PBXProject section */ - -/* Begin PBXResourcesBuildPhase section */ - 88391C8229D2186200C54BCE /* Resources */ = { - isa = PBXResourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 88391C8F29D2186300C54BCE /* Preview Assets.xcassets in Resources */, - 88391C8C29D2186300C54BCE /* Assets.xcassets in Resources */, - 88BFC51629D35B3900319EC5 /* PaLM-Info.plist in Resources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; -/* End PBXResourcesBuildPhase section */ - -/* Begin PBXShellScriptBuildPhase section */ - 8811FA9129D2D468000550E4 /* Run Script */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - inputFileListPaths = ( - ); - inputPaths = ( - ); - name = "Run Script"; - outputFileListPaths = ( - ); - outputPaths = ( - "$(SRCROOT)/${PRODUCT_NAME}/PaLM-Info.plist", - ); - runOnlyForDeploymentPostprocessing = 0; - shellPath = /bin/sh; - shellScript = "CONFIG_FILE_BASE_NAME=\"PaLM-Info\"\n\nCONFIG_FILE_NAME=${CONFIG_FILE_BASE_NAME}.plist\nSAMPLE_CONFIG_FILE_NAME=${CONFIG_FILE_BASE_NAME}-Sample.plist\n\nCONFIG_FILE_PATH=$SRCROOT/$PRODUCT_NAME/$CONFIG_FILE_NAME\nSAMPLE_CONFIG_FILE_PATH=$SRCROOT/$PRODUCT_NAME/$SAMPLE_CONFIG_FILE_NAME\n\nif [ -f \"$CONFIG_FILE_PATH\" ]; then\n echo \"$CONFIG_FILE_PATH exists.\"\nelse\n echo \"$CONFIG_FILE_PATH does not exist, copying sample\"\n cp -v \"${SAMPLE_CONFIG_FILE_PATH}\" \"${CONFIG_FILE_PATH}\"\nfi\n"; - }; -/* End PBXShellScriptBuildPhase section */ - -/* Begin PBXSourcesBuildPhase section */ - 88391C8029D2186200C54BCE /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 88391CA029D2392600C54BCE /* MessageView.swift in Sources */, - 88391C9E29D2313100C54BCE /* ChatMessage.swift in Sources */, - 88391CA529D241A000C54BCE /* BouncingDots.swift in Sources */, - 88391C8A29D2186200C54BCE /* ConversationScreen.swift in Sources */, - 88391C8829D2186200C54BCE /* PaLMChatApp.swift in Sources */, - 88391CA229D23AB100C54BCE /* ConversationViewModel.swift in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; -/* End PBXSourcesBuildPhase section */ - -/* Begin XCBuildConfiguration section */ - 88391C9029D2186300C54BCE /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - CLANG_ANALYZER_NONNULL = YES; - CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; - CLANG_CXX_LANGUAGE_STANDARD = "gnu++20"; - CLANG_ENABLE_MODULES = YES; - CLANG_ENABLE_OBJC_ARC = YES; - CLANG_ENABLE_OBJC_WEAK = YES; - CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; - CLANG_WARN_BOOL_CONVERSION = YES; - CLANG_WARN_COMMA = YES; - CLANG_WARN_CONSTANT_CONVERSION = YES; - CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; - CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; - CLANG_WARN_DOCUMENTATION_COMMENTS = YES; - CLANG_WARN_EMPTY_BODY = YES; - CLANG_WARN_ENUM_CONVERSION = YES; - CLANG_WARN_INFINITE_RECURSION = YES; - CLANG_WARN_INT_CONVERSION = YES; - CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; - CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; - CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; - CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; - CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; - CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; - CLANG_WARN_STRICT_PROTOTYPES = YES; - CLANG_WARN_SUSPICIOUS_MOVE = YES; - CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; - CLANG_WARN_UNREACHABLE_CODE = YES; - CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; - COPY_PHASE_STRIP = NO; - DEBUG_INFORMATION_FORMAT = dwarf; - ENABLE_STRICT_OBJC_MSGSEND = YES; - ENABLE_TESTABILITY = YES; - GCC_C_LANGUAGE_STANDARD = gnu11; - GCC_DYNAMIC_NO_PIC = NO; - GCC_NO_COMMON_BLOCKS = YES; - GCC_OPTIMIZATION_LEVEL = 0; - GCC_PREPROCESSOR_DEFINITIONS = ( - "DEBUG=1", - "$(inherited)", - ); - GCC_WARN_64_TO_32_BIT_CONVERSION = YES; - GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; - GCC_WARN_UNDECLARED_SELECTOR = YES; - GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; - GCC_WARN_UNUSED_FUNCTION = YES; - GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 16.4; - MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE; - MTL_FAST_MATH = YES; - ONLY_ACTIVE_ARCH = YES; - SDKROOT = iphoneos; - SWIFT_ACTIVE_COMPILATION_CONDITIONS = DEBUG; - SWIFT_OPTIMIZATION_LEVEL = "-Onone"; - }; - name = Debug; - }; - 88391C9129D2186300C54BCE /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - CLANG_ANALYZER_NONNULL = YES; - CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; - CLANG_CXX_LANGUAGE_STANDARD = "gnu++20"; - CLANG_ENABLE_MODULES = YES; - CLANG_ENABLE_OBJC_ARC = YES; - CLANG_ENABLE_OBJC_WEAK = YES; - CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; - CLANG_WARN_BOOL_CONVERSION = YES; - CLANG_WARN_COMMA = YES; - CLANG_WARN_CONSTANT_CONVERSION = YES; - CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; - CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; - CLANG_WARN_DOCUMENTATION_COMMENTS = YES; - CLANG_WARN_EMPTY_BODY = YES; - CLANG_WARN_ENUM_CONVERSION = YES; - CLANG_WARN_INFINITE_RECURSION = YES; - CLANG_WARN_INT_CONVERSION = YES; - CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; - CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; - CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; - CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; - CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; - CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; - CLANG_WARN_STRICT_PROTOTYPES = YES; - CLANG_WARN_SUSPICIOUS_MOVE = YES; - CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; - CLANG_WARN_UNREACHABLE_CODE = YES; - CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; - COPY_PHASE_STRIP = NO; - DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; - ENABLE_NS_ASSERTIONS = NO; - ENABLE_STRICT_OBJC_MSGSEND = YES; - GCC_C_LANGUAGE_STANDARD = gnu11; - GCC_NO_COMMON_BLOCKS = YES; - GCC_WARN_64_TO_32_BIT_CONVERSION = YES; - GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; - GCC_WARN_UNDECLARED_SELECTOR = YES; - GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; - GCC_WARN_UNUSED_FUNCTION = YES; - GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 16.4; - MTL_ENABLE_DEBUG_INFO = NO; - MTL_FAST_MATH = YES; - SDKROOT = iphoneos; - SWIFT_COMPILATION_MODE = wholemodule; - SWIFT_OPTIMIZATION_LEVEL = "-O"; - VALIDATE_PRODUCT = YES; - }; - name = Release; - }; - 88391C9329D2186300C54BCE /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; - ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; - CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 1; - DEVELOPMENT_ASSET_PATHS = "\"PaLMChat/Preview Content\""; - DEVELOPMENT_TEAM = ""; - ENABLE_PREVIEWS = YES; - GENERATE_INFOPLIST_FILE = YES; - INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; - INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; - INFOPLIST_KEY_UILaunchScreen_Generation = YES; - INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; - INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; - LD_RUNPATH_SEARCH_PATHS = ( - "$(inherited)", - "@executable_path/Frameworks", - ); - MARKETING_VERSION = 1.0; - PRODUCT_BUNDLE_IDENTIFIER = com.example.google.generativeai.PaLMChat; - PRODUCT_NAME = "$(TARGET_NAME)"; - SWIFT_EMIT_LOC_STRINGS = YES; - SWIFT_VERSION = 5.0; - TARGETED_DEVICE_FAMILY = "1,2"; - }; - name = Debug; - }; - 88391C9429D2186300C54BCE /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; - ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; - CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 1; - DEVELOPMENT_ASSET_PATHS = "\"PaLMChat/Preview Content\""; - DEVELOPMENT_TEAM = ""; - ENABLE_PREVIEWS = YES; - GENERATE_INFOPLIST_FILE = YES; - INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; - INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; - INFOPLIST_KEY_UILaunchScreen_Generation = YES; - INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; - INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; - LD_RUNPATH_SEARCH_PATHS = ( - "$(inherited)", - "@executable_path/Frameworks", - ); - MARKETING_VERSION = 1.0; - PRODUCT_BUNDLE_IDENTIFIER = com.example.google.generativeai.PaLMChat; - PRODUCT_NAME = "$(TARGET_NAME)"; - SWIFT_EMIT_LOC_STRINGS = YES; - SWIFT_VERSION = 5.0; - TARGETED_DEVICE_FAMILY = "1,2"; - }; - name = Release; - }; -/* End XCBuildConfiguration section */ - -/* Begin XCConfigurationList section */ - 88391C7F29D2186200C54BCE /* Build configuration list for PBXProject "PaLMChat" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 88391C9029D2186300C54BCE /* Debug */, - 88391C9129D2186300C54BCE /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - 88391C9229D2186300C54BCE /* Build configuration list for PBXNativeTarget "PaLMChat" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 88391C9329D2186300C54BCE /* Debug */, - 88391C9429D2186300C54BCE /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; -/* End XCConfigurationList section */ - -/* Begin XCSwiftPackageProductDependency section */ - 88B72DB62A005B9A002725BC /* GoogleGenerativeAI */ = { - isa = XCSwiftPackageProductDependency; - productName = GoogleGenerativeAI; - }; -/* End XCSwiftPackageProductDependency section */ - }; - rootObject = 88391C7C29D2186200C54BCE /* Project object */; -} diff --git a/Examples/PaLMChat/PaLMChat.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/Examples/PaLMChat/PaLMChat.xcodeproj/project.xcworkspace/contents.xcworkspacedata deleted file mode 100644 index 919434a..0000000 --- a/Examples/PaLMChat/PaLMChat.xcodeproj/project.xcworkspace/contents.xcworkspacedata +++ /dev/null @@ -1,7 +0,0 @@ - - - - - diff --git a/Examples/PaLMChat/PaLMChat.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist b/Examples/PaLMChat/PaLMChat.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist deleted file mode 100644 index 18d9810..0000000 --- a/Examples/PaLMChat/PaLMChat.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist +++ /dev/null @@ -1,8 +0,0 @@ - - - - - IDEDidComputeMac32BitWarning - - - diff --git a/Examples/PaLMChat/PaLMChat/Screens/ConversationScreen.swift b/Examples/PaLMChat/PaLMChat/Screens/ConversationScreen.swift deleted file mode 100644 index 20dfc32..0000000 --- a/Examples/PaLMChat/PaLMChat/Screens/ConversationScreen.swift +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import SwiftUI -import GoogleGenerativeAI - -struct ConversationScreen: View { - @StateObject - private var viewModel = ConversationViewModel() - - @State - private var userPrompt = "" - - enum FocusedField: Hashable { - case message - } - - @FocusState - var focusedField: FocusedField? - - var body: some View { - VStack { - ScrollViewReader { scrollViewProxy in - List(viewModel.messages) { message in - MessageView(message: message) - } - .listStyle(.plain) - .onChange(of: viewModel.messages, perform: { newValue in - guard let lastMessage = viewModel.messages.last else { return } - - // wait for a short moment to make sure we can actually scroll to the bottom - DispatchQueue.main.asyncAfter(deadline: .now() + 0.05) { - withAnimation { - scrollViewProxy.scrollTo(lastMessage.id, anchor: .top) - } - focusedField = .message - } - }) - } - HStack { - TextField("Message...", text: $userPrompt) - .focused($focusedField, equals: .message) - .textFieldStyle(.roundedBorder) - .frame(minHeight: CGFloat(30)) - .onSubmit { sendMessage() } - Button(action: { sendMessage() }) { - Text("Send") - } - } - .padding(.horizontal) - } - .onAppear() { - focusedField = .message - } - } - - private func sendMessage() { - Task { - let prompt = userPrompt - userPrompt = "" - await viewModel.sendMessage(prompt) - } - } -} - -struct ConversationScreen_Previews: PreviewProvider { - static var previews: some View { - ConversationScreen() - } -} diff --git a/Examples/PaLMChat/PaLMChat/ViewModels/ConversationViewModel.swift b/Examples/PaLMChat/PaLMChat/ViewModels/ConversationViewModel.swift deleted file mode 100644 index afdcb64..0000000 --- a/Examples/PaLMChat/PaLMChat/ViewModels/ConversationViewModel.swift +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation -import GoogleGenerativeAI - -@MainActor -class ConversationViewModel: ObservableObject { - - /// This array holds both the user's and the system's chat messages - @Published var messages = [ChatMessage]() - - // Chat history. This is used by the LLM to provide a coherent conversation with the user. - private var history = [Message]() - - /// Fetch the API key from `PaLM-Info.plist` - private var apiKey: String { - get { - guard let filePath = Bundle.main.path(forResource: "PaLM-Info", ofType: "plist") else { - fatalError("Couldn't find file 'PaLM-Info.plist'.") - } - let plist = NSDictionary(contentsOfFile: filePath) - guard let value = plist?.object(forKey: "API_KEY") as? String else { - fatalError("Couldn't find key 'API_KEY' in 'PaLM-Info.plist'.") - } - if (value.starts(with: "_")) { - fatalError("Follow the instructions at https://developers.generativeai.google/tutorials/setup to get a PaLM API key.") - } - return value - } - } - - private var palmClient: GenerativeLanguage? - - init() { - palmClient = GenerativeLanguage(apiKey: apiKey) - } - - func sendMessage(_ text: String) async { - // first, add the user's message to the chat - let userMessage = ChatMessage(message: text, participant: .user) - messages.append(userMessage) - - // add an empty (pending) chat message to show the bouncing dots animatin - // while we wait for a response from the backend - var systemMessage = ChatMessage(message: "", participant: .system, pending: true) - messages.append(systemMessage) - - do { - var response: GenerateMessageResponse? - - if history.isEmpty { - // this is the user's first message - response = try await palmClient?.chat(message: userMessage.message) - } - else { - // send previous chat messages *and* the user's new message to the backend - response = try await palmClient?.chat(message: userMessage.message, history: history) - } - - if let candidate = response?.candidates?.first, let text = candidate.content { - // remove bouncing dots and insert a chat message with the backend's response into the chat - systemMessage.message = text - systemMessage.pending = false - messages.removeLast() - messages.append(systemMessage) - - if let historicMessages = response?.messages { - history = historicMessages - history.append(candidate) - } - } - } - catch { - // display error message as a chat bubble - let errorMessage = ChatMessage(message: error.localizedDescription, participant: .system) - messages.removeLast() - messages.append(errorMessage) - } - } -} diff --git a/Examples/PaLMEmbeddings/PaLMEmbeddings.xcodeproj/project.pbxproj b/Examples/PaLMEmbeddings/PaLMEmbeddings.xcodeproj/project.pbxproj deleted file mode 100644 index c13ad6c..0000000 --- a/Examples/PaLMEmbeddings/PaLMEmbeddings.xcodeproj/project.pbxproj +++ /dev/null @@ -1,420 +0,0 @@ -// !$*UTF8*$! -{ - archiveVersion = 1; - classes = { - }; - objectVersion = 56; - objects = { - -/* Begin PBXBuildFile section */ - 88269B532A2A196A00FC6503 /* EmbeddingsViewModel.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88269B522A2A196A00FC6503 /* EmbeddingsViewModel.swift */; }; - 88269B572A2A19B900FC6503 /* PaLM-Info.plist in Resources */ = {isa = PBXBuildFile; fileRef = 88269B562A2A19B900FC6503 /* PaLM-Info.plist */; }; - 8877C1702A2A0DC400C0B1D8 /* PaLMEmbeddingsApp.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8877C16F2A2A0DC400C0B1D8 /* PaLMEmbeddingsApp.swift */; }; - 8877C1722A2A0DC400C0B1D8 /* EmbeddingsScreen.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8877C1712A2A0DC400C0B1D8 /* EmbeddingsScreen.swift */; }; - 8877C1742A2A0DC500C0B1D8 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 8877C1732A2A0DC500C0B1D8 /* Assets.xcassets */; }; - 8877C1772A2A0DC500C0B1D8 /* Preview Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 8877C1762A2A0DC500C0B1D8 /* Preview Assets.xcassets */; }; - 88CD8EB72A2A1A0000934BBA /* GoogleGenerativeAI in Frameworks */ = {isa = PBXBuildFile; productRef = 88CD8EB62A2A1A0000934BBA /* GoogleGenerativeAI */; }; -/* End PBXBuildFile section */ - -/* Begin PBXFileReference section */ - 88269B522A2A196A00FC6503 /* EmbeddingsViewModel.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = EmbeddingsViewModel.swift; sourceTree = ""; }; - 88269B552A2A199000FC6503 /* generative-ai-swift */ = {isa = PBXFileReference; lastKnownFileType = wrapper; name = "generative-ai-swift"; path = ../..; sourceTree = ""; }; - 88269B562A2A19B900FC6503 /* PaLM-Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = "PaLM-Info.plist"; sourceTree = ""; }; - 8877C16C2A2A0DC400C0B1D8 /* PaLMEmbeddings.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = PaLMEmbeddings.app; sourceTree = BUILT_PRODUCTS_DIR; }; - 8877C16F2A2A0DC400C0B1D8 /* PaLMEmbeddingsApp.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = PaLMEmbeddingsApp.swift; sourceTree = ""; }; - 8877C1712A2A0DC400C0B1D8 /* EmbeddingsScreen.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = EmbeddingsScreen.swift; sourceTree = ""; }; - 8877C1732A2A0DC500C0B1D8 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; - 8877C1762A2A0DC500C0B1D8 /* Preview Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = "Preview Assets.xcassets"; sourceTree = ""; }; -/* End PBXFileReference section */ - -/* Begin PBXFrameworksBuildPhase section */ - 8877C1692A2A0DC400C0B1D8 /* Frameworks */ = { - isa = PBXFrameworksBuildPhase; - buildActionMask = 2147483647; - files = ( - 88CD8EB72A2A1A0000934BBA /* GoogleGenerativeAI in Frameworks */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; -/* End PBXFrameworksBuildPhase section */ - -/* Begin PBXGroup section */ - 88269B542A2A199000FC6503 /* Packages */ = { - isa = PBXGroup; - children = ( - 88269B552A2A199000FC6503 /* generative-ai-swift */, - ); - name = Packages; - sourceTree = ""; - }; - 8877C1632A2A0DC400C0B1D8 = { - isa = PBXGroup; - children = ( - 88269B542A2A199000FC6503 /* Packages */, - 8877C16E2A2A0DC400C0B1D8 /* PaLMEmbeddings */, - 8877C16D2A2A0DC400C0B1D8 /* Products */, - 88CD8EB52A2A1A0000934BBA /* Frameworks */, - ); - sourceTree = ""; - }; - 8877C16D2A2A0DC400C0B1D8 /* Products */ = { - isa = PBXGroup; - children = ( - 8877C16C2A2A0DC400C0B1D8 /* PaLMEmbeddings.app */, - ); - name = Products; - sourceTree = ""; - }; - 8877C16E2A2A0DC400C0B1D8 /* PaLMEmbeddings */ = { - isa = PBXGroup; - children = ( - 8877C17E2A2A0DFF00C0B1D8 /* ViewModels */, - 8877C17D2A2A0DF400C0B1D8 /* Screens */, - 8877C16F2A2A0DC400C0B1D8 /* PaLMEmbeddingsApp.swift */, - 8877C1732A2A0DC500C0B1D8 /* Assets.xcassets */, - 88269B562A2A19B900FC6503 /* PaLM-Info.plist */, - 8877C1752A2A0DC500C0B1D8 /* Preview Content */, - ); - path = PaLMEmbeddings; - sourceTree = ""; - }; - 8877C1752A2A0DC500C0B1D8 /* Preview Content */ = { - isa = PBXGroup; - children = ( - 8877C1762A2A0DC500C0B1D8 /* Preview Assets.xcassets */, - ); - path = "Preview Content"; - sourceTree = ""; - }; - 8877C17D2A2A0DF400C0B1D8 /* Screens */ = { - isa = PBXGroup; - children = ( - 8877C1712A2A0DC400C0B1D8 /* EmbeddingsScreen.swift */, - ); - path = Screens; - sourceTree = ""; - }; - 8877C17E2A2A0DFF00C0B1D8 /* ViewModels */ = { - isa = PBXGroup; - children = ( - 88269B522A2A196A00FC6503 /* EmbeddingsViewModel.swift */, - ); - path = ViewModels; - sourceTree = ""; - }; - 88CD8EB52A2A1A0000934BBA /* Frameworks */ = { - isa = PBXGroup; - children = ( - ); - name = Frameworks; - sourceTree = ""; - }; -/* End PBXGroup section */ - -/* Begin PBXNativeTarget section */ - 8877C16B2A2A0DC400C0B1D8 /* PaLMEmbeddings */ = { - isa = PBXNativeTarget; - buildConfigurationList = 8877C17A2A2A0DC500C0B1D8 /* Build configuration list for PBXNativeTarget "PaLMEmbeddings" */; - buildPhases = ( - 8883DF5D2A2F308100F1DE21 /* ShellScript */, - 8877C1682A2A0DC400C0B1D8 /* Sources */, - 8877C1692A2A0DC400C0B1D8 /* Frameworks */, - 8877C16A2A2A0DC400C0B1D8 /* Resources */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = PaLMEmbeddings; - packageProductDependencies = ( - 88CD8EB62A2A1A0000934BBA /* GoogleGenerativeAI */, - ); - productName = PaLMEmbeddings; - productReference = 8877C16C2A2A0DC400C0B1D8 /* PaLMEmbeddings.app */; - productType = "com.apple.product-type.application"; - }; -/* End PBXNativeTarget section */ - -/* Begin PBXProject section */ - 8877C1642A2A0DC400C0B1D8 /* Project object */ = { - isa = PBXProject; - attributes = { - BuildIndependentTargetsInParallel = 1; - LastSwiftUpdateCheck = 1430; - LastUpgradeCheck = 1430; - TargetAttributes = { - 8877C16B2A2A0DC400C0B1D8 = { - CreatedOnToolsVersion = 14.3; - }; - }; - }; - buildConfigurationList = 8877C1672A2A0DC400C0B1D8 /* Build configuration list for PBXProject "PaLMEmbeddings" */; - compatibilityVersion = "Xcode 14.0"; - developmentRegion = en; - hasScannedForEncodings = 0; - knownRegions = ( - en, - Base, - ); - mainGroup = 8877C1632A2A0DC400C0B1D8; - productRefGroup = 8877C16D2A2A0DC400C0B1D8 /* Products */; - projectDirPath = ""; - projectRoot = ""; - targets = ( - 8877C16B2A2A0DC400C0B1D8 /* PaLMEmbeddings */, - ); - }; -/* End PBXProject section */ - -/* Begin PBXResourcesBuildPhase section */ - 8877C16A2A2A0DC400C0B1D8 /* Resources */ = { - isa = PBXResourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 8877C1772A2A0DC500C0B1D8 /* Preview Assets.xcassets in Resources */, - 8877C1742A2A0DC500C0B1D8 /* Assets.xcassets in Resources */, - 88269B572A2A19B900FC6503 /* PaLM-Info.plist in Resources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; -/* End PBXResourcesBuildPhase section */ - -/* Begin PBXShellScriptBuildPhase section */ - 8883DF5D2A2F308100F1DE21 /* ShellScript */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - inputFileListPaths = ( - ); - inputPaths = ( - ); - outputFileListPaths = ( - ); - outputPaths = ( - "$(SRCROOT)/${PRODUCT_NAME}/PaLM-Info.plist", - ); - runOnlyForDeploymentPostprocessing = 0; - shellPath = /bin/sh; - shellScript = "CONFIG_FILE_BASE_NAME=\"PaLM-Info\"\n\nCONFIG_FILE_NAME=${CONFIG_FILE_BASE_NAME}.plist\nSAMPLE_CONFIG_FILE_NAME=${CONFIG_FILE_BASE_NAME}-Sample.plist\n\nCONFIG_FILE_PATH=$SRCROOT/$PRODUCT_NAME/$CONFIG_FILE_NAME\nSAMPLE_CONFIG_FILE_PATH=$SRCROOT/$PRODUCT_NAME/$SAMPLE_CONFIG_FILE_NAME\n\nif [ -f \"$CONFIG_FILE_PATH\" ]; then\n echo \"$CONFIG_FILE_PATH exists.\"\nelse\n echo \"$CONFIG_FILE_PATH does not exist, copying sample\"\n cp -v \"${SAMPLE_CONFIG_FILE_PATH}\" \"${CONFIG_FILE_PATH}\"\nfi\n"; - }; -/* End PBXShellScriptBuildPhase section */ - -/* Begin PBXSourcesBuildPhase section */ - 8877C1682A2A0DC400C0B1D8 /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 88269B532A2A196A00FC6503 /* EmbeddingsViewModel.swift in Sources */, - 8877C1722A2A0DC400C0B1D8 /* EmbeddingsScreen.swift in Sources */, - 8877C1702A2A0DC400C0B1D8 /* PaLMEmbeddingsApp.swift in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; -/* End PBXSourcesBuildPhase section */ - -/* Begin XCBuildConfiguration section */ - 8877C1782A2A0DC500C0B1D8 /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - CLANG_ANALYZER_NONNULL = YES; - CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; - CLANG_CXX_LANGUAGE_STANDARD = "gnu++20"; - CLANG_ENABLE_MODULES = YES; - CLANG_ENABLE_OBJC_ARC = YES; - CLANG_ENABLE_OBJC_WEAK = YES; - CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; - CLANG_WARN_BOOL_CONVERSION = YES; - CLANG_WARN_COMMA = YES; - CLANG_WARN_CONSTANT_CONVERSION = YES; - CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; - CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; - CLANG_WARN_DOCUMENTATION_COMMENTS = YES; - CLANG_WARN_EMPTY_BODY = YES; - CLANG_WARN_ENUM_CONVERSION = YES; - CLANG_WARN_INFINITE_RECURSION = YES; - CLANG_WARN_INT_CONVERSION = YES; - CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; - CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; - CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; - CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; - CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; - CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; - CLANG_WARN_STRICT_PROTOTYPES = YES; - CLANG_WARN_SUSPICIOUS_MOVE = YES; - CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; - CLANG_WARN_UNREACHABLE_CODE = YES; - CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; - COPY_PHASE_STRIP = NO; - DEBUG_INFORMATION_FORMAT = dwarf; - ENABLE_STRICT_OBJC_MSGSEND = YES; - ENABLE_TESTABILITY = YES; - GCC_C_LANGUAGE_STANDARD = gnu11; - GCC_DYNAMIC_NO_PIC = NO; - GCC_NO_COMMON_BLOCKS = YES; - GCC_OPTIMIZATION_LEVEL = 0; - GCC_PREPROCESSOR_DEFINITIONS = ( - "DEBUG=1", - "$(inherited)", - ); - GCC_WARN_64_TO_32_BIT_CONVERSION = YES; - GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; - GCC_WARN_UNDECLARED_SELECTOR = YES; - GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; - GCC_WARN_UNUSED_FUNCTION = YES; - GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 16.4; - MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE; - MTL_FAST_MATH = YES; - ONLY_ACTIVE_ARCH = YES; - SDKROOT = iphoneos; - SWIFT_ACTIVE_COMPILATION_CONDITIONS = DEBUG; - SWIFT_OPTIMIZATION_LEVEL = "-Onone"; - }; - name = Debug; - }; - 8877C1792A2A0DC500C0B1D8 /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - CLANG_ANALYZER_NONNULL = YES; - CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; - CLANG_CXX_LANGUAGE_STANDARD = "gnu++20"; - CLANG_ENABLE_MODULES = YES; - CLANG_ENABLE_OBJC_ARC = YES; - CLANG_ENABLE_OBJC_WEAK = YES; - CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; - CLANG_WARN_BOOL_CONVERSION = YES; - CLANG_WARN_COMMA = YES; - CLANG_WARN_CONSTANT_CONVERSION = YES; - CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; - CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; - CLANG_WARN_DOCUMENTATION_COMMENTS = YES; - CLANG_WARN_EMPTY_BODY = YES; - CLANG_WARN_ENUM_CONVERSION = YES; - CLANG_WARN_INFINITE_RECURSION = YES; - CLANG_WARN_INT_CONVERSION = YES; - CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; - CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; - CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; - CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; - CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; - CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; - CLANG_WARN_STRICT_PROTOTYPES = YES; - CLANG_WARN_SUSPICIOUS_MOVE = YES; - CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; - CLANG_WARN_UNREACHABLE_CODE = YES; - CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; - COPY_PHASE_STRIP = NO; - DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; - ENABLE_NS_ASSERTIONS = NO; - ENABLE_STRICT_OBJC_MSGSEND = YES; - GCC_C_LANGUAGE_STANDARD = gnu11; - GCC_NO_COMMON_BLOCKS = YES; - GCC_WARN_64_TO_32_BIT_CONVERSION = YES; - GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; - GCC_WARN_UNDECLARED_SELECTOR = YES; - GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; - GCC_WARN_UNUSED_FUNCTION = YES; - GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 16.4; - MTL_ENABLE_DEBUG_INFO = NO; - MTL_FAST_MATH = YES; - SDKROOT = iphoneos; - SWIFT_COMPILATION_MODE = wholemodule; - SWIFT_OPTIMIZATION_LEVEL = "-O"; - VALIDATE_PRODUCT = YES; - }; - name = Release; - }; - 8877C17B2A2A0DC500C0B1D8 /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; - ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; - CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 1; - DEVELOPMENT_ASSET_PATHS = "\"PaLMEmbeddings/Preview Content\""; - DEVELOPMENT_TEAM = ""; - ENABLE_PREVIEWS = YES; - GENERATE_INFOPLIST_FILE = YES; - INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; - INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; - INFOPLIST_KEY_UILaunchScreen_Generation = YES; - INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; - INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; - LD_RUNPATH_SEARCH_PATHS = ( - "$(inherited)", - "@executable_path/Frameworks", - ); - MARKETING_VERSION = 1.0; - PRODUCT_BUNDLE_IDENTIFIER = com.example.google.generativeai.PaLMEmbeddings; - PRODUCT_NAME = "$(TARGET_NAME)"; - SWIFT_EMIT_LOC_STRINGS = YES; - SWIFT_VERSION = 5.0; - TARGETED_DEVICE_FAMILY = "1,2"; - }; - name = Debug; - }; - 8877C17C2A2A0DC500C0B1D8 /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; - ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; - CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 1; - DEVELOPMENT_ASSET_PATHS = "\"PaLMEmbeddings/Preview Content\""; - DEVELOPMENT_TEAM = ""; - ENABLE_PREVIEWS = YES; - GENERATE_INFOPLIST_FILE = YES; - INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; - INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; - INFOPLIST_KEY_UILaunchScreen_Generation = YES; - INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; - INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; - LD_RUNPATH_SEARCH_PATHS = ( - "$(inherited)", - "@executable_path/Frameworks", - ); - MARKETING_VERSION = 1.0; - PRODUCT_BUNDLE_IDENTIFIER = com.example.google.generativeai.PaLMEmbeddings; - PRODUCT_NAME = "$(TARGET_NAME)"; - SWIFT_EMIT_LOC_STRINGS = YES; - SWIFT_VERSION = 5.0; - TARGETED_DEVICE_FAMILY = "1,2"; - }; - name = Release; - }; -/* End XCBuildConfiguration section */ - -/* Begin XCConfigurationList section */ - 8877C1672A2A0DC400C0B1D8 /* Build configuration list for PBXProject "PaLMEmbeddings" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 8877C1782A2A0DC500C0B1D8 /* Debug */, - 8877C1792A2A0DC500C0B1D8 /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - 8877C17A2A2A0DC500C0B1D8 /* Build configuration list for PBXNativeTarget "PaLMEmbeddings" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 8877C17B2A2A0DC500C0B1D8 /* Debug */, - 8877C17C2A2A0DC500C0B1D8 /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; -/* End XCConfigurationList section */ - -/* Begin XCSwiftPackageProductDependency section */ - 88CD8EB62A2A1A0000934BBA /* GoogleGenerativeAI */ = { - isa = XCSwiftPackageProductDependency; - productName = GoogleGenerativeAI; - }; -/* End XCSwiftPackageProductDependency section */ - }; - rootObject = 8877C1642A2A0DC400C0B1D8 /* Project object */; -} diff --git a/Examples/PaLMEmbeddings/PaLMEmbeddings.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/Examples/PaLMEmbeddings/PaLMEmbeddings.xcodeproj/project.xcworkspace/contents.xcworkspacedata deleted file mode 100644 index 919434a..0000000 --- a/Examples/PaLMEmbeddings/PaLMEmbeddings.xcodeproj/project.xcworkspace/contents.xcworkspacedata +++ /dev/null @@ -1,7 +0,0 @@ - - - - - diff --git a/Examples/PaLMEmbeddings/PaLMEmbeddings.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist b/Examples/PaLMEmbeddings/PaLMEmbeddings.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist deleted file mode 100644 index 18d9810..0000000 --- a/Examples/PaLMEmbeddings/PaLMEmbeddings.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist +++ /dev/null @@ -1,8 +0,0 @@ - - - - - IDEDidComputeMac32BitWarning - - - diff --git a/Examples/PaLMEmbeddings/PaLMEmbeddings.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved b/Examples/PaLMEmbeddings/PaLMEmbeddings.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved deleted file mode 100644 index caac53b..0000000 --- a/Examples/PaLMEmbeddings/PaLMEmbeddings.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved +++ /dev/null @@ -1,23 +0,0 @@ -{ - "pins" : [ - { - "identity" : "get", - "kind" : "remoteSourceControl", - "location" : "https://github.com/kean/Get", - "state" : { - "revision" : "12830cc64f31789ae6f4352d2d51d03a25fc3741", - "version" : "2.1.6" - } - }, - { - "identity" : "urlqueryencoder", - "kind" : "remoteSourceControl", - "location" : "https://github.com/CreateAPI/URLQueryEncoder", - "state" : { - "revision" : "4ce950479707ea109f229d7230ec074a133b15d7", - "version" : "0.2.1" - } - } - ], - "version" : 2 -} diff --git a/Examples/PaLMEmbeddings/PaLMEmbeddings.xcodeproj/xcshareddata/xcschemes/PaLMEmbeddings.xcscheme b/Examples/PaLMEmbeddings/PaLMEmbeddings.xcodeproj/xcshareddata/xcschemes/PaLMEmbeddings.xcscheme deleted file mode 100644 index 9b19f97..0000000 --- a/Examples/PaLMEmbeddings/PaLMEmbeddings.xcodeproj/xcshareddata/xcschemes/PaLMEmbeddings.xcscheme +++ /dev/null @@ -1,77 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/Examples/PaLMEmbeddings/PaLMEmbeddings/PaLM-Info-Sample.plist b/Examples/PaLMEmbeddings/PaLMEmbeddings/PaLM-Info-Sample.plist deleted file mode 100644 index 43401eb..0000000 --- a/Examples/PaLMEmbeddings/PaLMEmbeddings/PaLM-Info-Sample.plist +++ /dev/null @@ -1,8 +0,0 @@ - - - - - API_KEY - _API_KEY_ - - diff --git a/Examples/PaLMEmbeddings/PaLMEmbeddings/Screens/EmbeddingsScreen.swift b/Examples/PaLMEmbeddings/PaLMEmbeddings/Screens/EmbeddingsScreen.swift deleted file mode 100644 index 42253c7..0000000 --- a/Examples/PaLMEmbeddings/PaLMEmbeddings/Screens/EmbeddingsScreen.swift +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import SwiftUI - -struct EmbeddingsScreen: View { - @StateObject var viewModel = EmbeddingsViewModel() - - var body: some View { - NavigationStack { - VStack { - TextField("Text", text: $viewModel.inputText, axis: .vertical) - .lineLimit(10, reservesSpace: true) - .textFieldStyle(.roundedBorder) - - Button(action: onSummarizeTapped) { - if viewModel.inProgress { - ProgressView() - .progressViewStyle(CircularProgressViewStyle(tint: .white)) - .frame(maxWidth: .infinity, maxHeight: 8) - .padding(6) - } - else { - Text("Generate Embeddings") - .frame(maxWidth: .infinity, maxHeight: 50) - } - } - .frame(maxWidth: .infinity, maxHeight: 50) - .buttonStyle(.borderedProminent) - .controlSize(.large) - .padding(.vertical) - - TextField("Result", text: $viewModel.outputText, axis: .vertical) - .lineLimit(10, reservesSpace: true) - .textFieldStyle(.roundedBorder) - Spacer() - } - .navigationTitle("Text") - .padding() - } - } - - private func onSummarizeTapped() { - Task { - await viewModel.generateEmbeddings() - } - } -} - -struct EmbeddingsScreen_Previews: PreviewProvider { - static var previews: some View { - EmbeddingsScreen() - } -} diff --git a/Examples/PaLMEmbeddings/PaLMEmbeddings/ViewModels/EmbeddingsViewModel.swift b/Examples/PaLMEmbeddings/PaLMEmbeddings/ViewModels/EmbeddingsViewModel.swift deleted file mode 100644 index eb588e0..0000000 --- a/Examples/PaLMEmbeddings/PaLMEmbeddings/ViewModels/EmbeddingsViewModel.swift +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation -import GoogleGenerativeAI - -@MainActor -class EmbeddingsViewModel: ObservableObject { - @Published - var inputText = "" - - @Published - var embeddings = [Float]() - - @Published - var outputText = "" - - @Published - var inProgress = false - - /// Fetch the API key from `PaLM-Info.plist` - private var apiKey: String { - get { - guard let filePath = Bundle.main.path(forResource: "PaLM-Info", ofType: "plist") else { - fatalError("Couldn't find file 'PaLM-Info.plist'.") - } - let plist = NSDictionary(contentsOfFile: filePath) - guard let value = plist?.object(forKey: "API_KEY") as? String else { - fatalError("Couldn't find key 'API_KEY' in 'PaLM-Info.plist'.") - } - if (value.starts(with: "_")) { - fatalError("Follow the instructions at https://developers.generativeai.google/tutorials/setup to get a PaLM API key.") - } - return value - } - } - - private var palmClient: GenerativeLanguage? - - init() { - palmClient = GenerativeLanguage(apiKey: apiKey) - - $embeddings - .map { embeddings in - embeddings.map { String($0) } - } - .map { stringValues in - stringValues.joined(separator: "; ") - } - .assign(to: &$outputText) - } - - func generateEmbeddings() async { - do { - inProgress = true - let response = try await palmClient?.generateEmbeddings(from: inputText) - inProgress = false - - if let embedding = response?.embedding, let value = embedding.value { - self.embeddings = value - } - } - catch { - print(error.localizedDescription) - } - } -} diff --git a/Examples/PaLMText/PaLMText.xcodeproj/project.pbxproj b/Examples/PaLMText/PaLMText.xcodeproj/project.pbxproj deleted file mode 100644 index c45b49d..0000000 --- a/Examples/PaLMText/PaLMText.xcodeproj/project.pbxproj +++ /dev/null @@ -1,420 +0,0 @@ -// !$*UTF8*$! -{ - archiveVersion = 1; - classes = { - }; - objectVersion = 56; - objects = { - -/* Begin PBXBuildFile section */ - 88B372C42A0648D5008DF7EE /* GoogleGenerativeAI in Frameworks */ = {isa = PBXBuildFile; productRef = 88B372C32A0648D5008DF7EE /* GoogleGenerativeAI */; }; - 88B372C72A064AF0008DF7EE /* PaLM-Info.plist in Resources */ = {isa = PBXBuildFile; fileRef = 88B372C62A064AF0008DF7EE /* PaLM-Info.plist */; }; - 88D3FF822A05B05000B389CF /* PaLMTextApp.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88D3FF812A05B05000B389CF /* PaLMTextApp.swift */; }; - 88D3FF842A05B05000B389CF /* SummarizeScreen.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88D3FF832A05B05000B389CF /* SummarizeScreen.swift */; }; - 88D3FF862A05B05100B389CF /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 88D3FF852A05B05100B389CF /* Assets.xcassets */; }; - 88D3FF892A05B05100B389CF /* Preview Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 88D3FF882A05B05100B389CF /* Preview Assets.xcassets */; }; - 88D3FF942A062F4000B389CF /* SummarizeViewModel.swift in Sources */ = {isa = PBXBuildFile; fileRef = 88D3FF932A062F4000B389CF /* SummarizeViewModel.swift */; }; -/* End PBXBuildFile section */ - -/* Begin PBXFileReference section */ - 88B372C12A064878008DF7EE /* generative-ai-swift */ = {isa = PBXFileReference; lastKnownFileType = wrapper; name = "generative-ai-swift"; path = ../..; sourceTree = ""; }; - 88B372C62A064AF0008DF7EE /* PaLM-Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = "PaLM-Info.plist"; sourceTree = ""; }; - 88D3FF7E2A05B05000B389CF /* PaLMText.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = PaLMText.app; sourceTree = BUILT_PRODUCTS_DIR; }; - 88D3FF812A05B05000B389CF /* PaLMTextApp.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = PaLMTextApp.swift; sourceTree = ""; }; - 88D3FF832A05B05000B389CF /* SummarizeScreen.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SummarizeScreen.swift; sourceTree = ""; }; - 88D3FF852A05B05100B389CF /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; - 88D3FF882A05B05100B389CF /* Preview Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = "Preview Assets.xcassets"; sourceTree = ""; }; - 88D3FF932A062F4000B389CF /* SummarizeViewModel.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SummarizeViewModel.swift; sourceTree = ""; }; -/* End PBXFileReference section */ - -/* Begin PBXFrameworksBuildPhase section */ - 88D3FF7B2A05B05000B389CF /* Frameworks */ = { - isa = PBXFrameworksBuildPhase; - buildActionMask = 2147483647; - files = ( - 88B372C42A0648D5008DF7EE /* GoogleGenerativeAI in Frameworks */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; -/* End PBXFrameworksBuildPhase section */ - -/* Begin PBXGroup section */ - 88B372C02A064878008DF7EE /* Packages */ = { - isa = PBXGroup; - children = ( - 88B372C12A064878008DF7EE /* generative-ai-swift */, - ); - name = Packages; - sourceTree = ""; - }; - 88B372C22A0648D5008DF7EE /* Frameworks */ = { - isa = PBXGroup; - children = ( - ); - name = Frameworks; - sourceTree = ""; - }; - 88D3FF752A05B05000B389CF = { - isa = PBXGroup; - children = ( - 88B372C02A064878008DF7EE /* Packages */, - 88D3FF802A05B05000B389CF /* PaLMText */, - 88D3FF7F2A05B05000B389CF /* Products */, - 88B372C22A0648D5008DF7EE /* Frameworks */, - ); - sourceTree = ""; - }; - 88D3FF7F2A05B05000B389CF /* Products */ = { - isa = PBXGroup; - children = ( - 88D3FF7E2A05B05000B389CF /* PaLMText.app */, - ); - name = Products; - sourceTree = ""; - }; - 88D3FF802A05B05000B389CF /* PaLMText */ = { - isa = PBXGroup; - children = ( - 88D3FF922A05B0D600B389CF /* ViewModels */, - 88D3FF8F2A05B0BE00B389CF /* Screens */, - 88D3FF812A05B05000B389CF /* PaLMTextApp.swift */, - 88D3FF852A05B05100B389CF /* Assets.xcassets */, - 88B372C62A064AF0008DF7EE /* PaLM-Info.plist */, - 88D3FF872A05B05100B389CF /* Preview Content */, - ); - path = PaLMText; - sourceTree = ""; - }; - 88D3FF872A05B05100B389CF /* Preview Content */ = { - isa = PBXGroup; - children = ( - 88D3FF882A05B05100B389CF /* Preview Assets.xcassets */, - ); - path = "Preview Content"; - sourceTree = ""; - }; - 88D3FF8F2A05B0BE00B389CF /* Screens */ = { - isa = PBXGroup; - children = ( - 88D3FF832A05B05000B389CF /* SummarizeScreen.swift */, - ); - path = Screens; - sourceTree = ""; - }; - 88D3FF922A05B0D600B389CF /* ViewModels */ = { - isa = PBXGroup; - children = ( - 88D3FF932A062F4000B389CF /* SummarizeViewModel.swift */, - ); - path = ViewModels; - sourceTree = ""; - }; -/* End PBXGroup section */ - -/* Begin PBXNativeTarget section */ - 88D3FF7D2A05B05000B389CF /* PaLMText */ = { - isa = PBXNativeTarget; - buildConfigurationList = 88D3FF8C2A05B05100B389CF /* Build configuration list for PBXNativeTarget "PaLMText" */; - buildPhases = ( - 88B372C52A064A74008DF7EE /* ShellScript */, - 88D3FF7A2A05B05000B389CF /* Sources */, - 88D3FF7B2A05B05000B389CF /* Frameworks */, - 88D3FF7C2A05B05000B389CF /* Resources */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = PaLMText; - packageProductDependencies = ( - 88B372C32A0648D5008DF7EE /* GoogleGenerativeAI */, - ); - productName = PaLMText; - productReference = 88D3FF7E2A05B05000B389CF /* PaLMText.app */; - productType = "com.apple.product-type.application"; - }; -/* End PBXNativeTarget section */ - -/* Begin PBXProject section */ - 88D3FF762A05B05000B389CF /* Project object */ = { - isa = PBXProject; - attributes = { - BuildIndependentTargetsInParallel = 1; - LastSwiftUpdateCheck = 1430; - LastUpgradeCheck = 1430; - TargetAttributes = { - 88D3FF7D2A05B05000B389CF = { - CreatedOnToolsVersion = 14.3; - }; - }; - }; - buildConfigurationList = 88D3FF792A05B05000B389CF /* Build configuration list for PBXProject "PaLMText" */; - compatibilityVersion = "Xcode 14.0"; - developmentRegion = en; - hasScannedForEncodings = 0; - knownRegions = ( - en, - Base, - ); - mainGroup = 88D3FF752A05B05000B389CF; - productRefGroup = 88D3FF7F2A05B05000B389CF /* Products */; - projectDirPath = ""; - projectRoot = ""; - targets = ( - 88D3FF7D2A05B05000B389CF /* PaLMText */, - ); - }; -/* End PBXProject section */ - -/* Begin PBXResourcesBuildPhase section */ - 88D3FF7C2A05B05000B389CF /* Resources */ = { - isa = PBXResourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 88D3FF892A05B05100B389CF /* Preview Assets.xcassets in Resources */, - 88D3FF862A05B05100B389CF /* Assets.xcassets in Resources */, - 88B372C72A064AF0008DF7EE /* PaLM-Info.plist in Resources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; -/* End PBXResourcesBuildPhase section */ - -/* Begin PBXShellScriptBuildPhase section */ - 88B372C52A064A74008DF7EE /* ShellScript */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - inputFileListPaths = ( - ); - inputPaths = ( - ); - outputFileListPaths = ( - ); - outputPaths = ( - "$(SRCROOT)/${PRODUCT_NAME}/PaLM-Info.plist", - ); - runOnlyForDeploymentPostprocessing = 0; - shellPath = /bin/sh; - shellScript = "CONFIG_FILE_BASE_NAME=\"PaLM-Info\"\n\nCONFIG_FILE_NAME=${CONFIG_FILE_BASE_NAME}.plist\nSAMPLE_CONFIG_FILE_NAME=${CONFIG_FILE_BASE_NAME}-Sample.plist\n\nCONFIG_FILE_PATH=$SRCROOT/$PRODUCT_NAME/$CONFIG_FILE_NAME\nSAMPLE_CONFIG_FILE_PATH=$SRCROOT/$PRODUCT_NAME/$SAMPLE_CONFIG_FILE_NAME\n\nif [ -f \"$CONFIG_FILE_PATH\" ]; then\n echo \"$CONFIG_FILE_PATH exists.\"\nelse\n echo \"$CONFIG_FILE_PATH does not exist, copying sample\"\n cp -v \"${SAMPLE_CONFIG_FILE_PATH}\" \"${CONFIG_FILE_PATH}\"\nfi\n"; - }; -/* End PBXShellScriptBuildPhase section */ - -/* Begin PBXSourcesBuildPhase section */ - 88D3FF7A2A05B05000B389CF /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 88D3FF842A05B05000B389CF /* SummarizeScreen.swift in Sources */, - 88D3FF822A05B05000B389CF /* PaLMTextApp.swift in Sources */, - 88D3FF942A062F4000B389CF /* SummarizeViewModel.swift in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; -/* End PBXSourcesBuildPhase section */ - -/* Begin XCBuildConfiguration section */ - 88D3FF8A2A05B05100B389CF /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - CLANG_ANALYZER_NONNULL = YES; - CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; - CLANG_CXX_LANGUAGE_STANDARD = "gnu++20"; - CLANG_ENABLE_MODULES = YES; - CLANG_ENABLE_OBJC_ARC = YES; - CLANG_ENABLE_OBJC_WEAK = YES; - CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; - CLANG_WARN_BOOL_CONVERSION = YES; - CLANG_WARN_COMMA = YES; - CLANG_WARN_CONSTANT_CONVERSION = YES; - CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; - CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; - CLANG_WARN_DOCUMENTATION_COMMENTS = YES; - CLANG_WARN_EMPTY_BODY = YES; - CLANG_WARN_ENUM_CONVERSION = YES; - CLANG_WARN_INFINITE_RECURSION = YES; - CLANG_WARN_INT_CONVERSION = YES; - CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; - CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; - CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; - CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; - CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; - CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; - CLANG_WARN_STRICT_PROTOTYPES = YES; - CLANG_WARN_SUSPICIOUS_MOVE = YES; - CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; - CLANG_WARN_UNREACHABLE_CODE = YES; - CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; - COPY_PHASE_STRIP = NO; - DEBUG_INFORMATION_FORMAT = dwarf; - ENABLE_STRICT_OBJC_MSGSEND = YES; - ENABLE_TESTABILITY = YES; - GCC_C_LANGUAGE_STANDARD = gnu11; - GCC_DYNAMIC_NO_PIC = NO; - GCC_NO_COMMON_BLOCKS = YES; - GCC_OPTIMIZATION_LEVEL = 0; - GCC_PREPROCESSOR_DEFINITIONS = ( - "DEBUG=1", - "$(inherited)", - ); - GCC_WARN_64_TO_32_BIT_CONVERSION = YES; - GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; - GCC_WARN_UNDECLARED_SELECTOR = YES; - GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; - GCC_WARN_UNUSED_FUNCTION = YES; - GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 16.4; - MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE; - MTL_FAST_MATH = YES; - ONLY_ACTIVE_ARCH = YES; - SDKROOT = iphoneos; - SWIFT_ACTIVE_COMPILATION_CONDITIONS = DEBUG; - SWIFT_OPTIMIZATION_LEVEL = "-Onone"; - }; - name = Debug; - }; - 88D3FF8B2A05B05100B389CF /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - CLANG_ANALYZER_NONNULL = YES; - CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; - CLANG_CXX_LANGUAGE_STANDARD = "gnu++20"; - CLANG_ENABLE_MODULES = YES; - CLANG_ENABLE_OBJC_ARC = YES; - CLANG_ENABLE_OBJC_WEAK = YES; - CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; - CLANG_WARN_BOOL_CONVERSION = YES; - CLANG_WARN_COMMA = YES; - CLANG_WARN_CONSTANT_CONVERSION = YES; - CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; - CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; - CLANG_WARN_DOCUMENTATION_COMMENTS = YES; - CLANG_WARN_EMPTY_BODY = YES; - CLANG_WARN_ENUM_CONVERSION = YES; - CLANG_WARN_INFINITE_RECURSION = YES; - CLANG_WARN_INT_CONVERSION = YES; - CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; - CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; - CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; - CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; - CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; - CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; - CLANG_WARN_STRICT_PROTOTYPES = YES; - CLANG_WARN_SUSPICIOUS_MOVE = YES; - CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; - CLANG_WARN_UNREACHABLE_CODE = YES; - CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; - COPY_PHASE_STRIP = NO; - DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; - ENABLE_NS_ASSERTIONS = NO; - ENABLE_STRICT_OBJC_MSGSEND = YES; - GCC_C_LANGUAGE_STANDARD = gnu11; - GCC_NO_COMMON_BLOCKS = YES; - GCC_WARN_64_TO_32_BIT_CONVERSION = YES; - GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; - GCC_WARN_UNDECLARED_SELECTOR = YES; - GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; - GCC_WARN_UNUSED_FUNCTION = YES; - GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 16.4; - MTL_ENABLE_DEBUG_INFO = NO; - MTL_FAST_MATH = YES; - SDKROOT = iphoneos; - SWIFT_COMPILATION_MODE = wholemodule; - SWIFT_OPTIMIZATION_LEVEL = "-O"; - VALIDATE_PRODUCT = YES; - }; - name = Release; - }; - 88D3FF8D2A05B05100B389CF /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; - ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; - CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 1; - DEVELOPMENT_ASSET_PATHS = "\"PaLMText/Preview Content\""; - DEVELOPMENT_TEAM = ""; - ENABLE_PREVIEWS = YES; - GENERATE_INFOPLIST_FILE = YES; - INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; - INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; - INFOPLIST_KEY_UILaunchScreen_Generation = YES; - INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; - INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; - LD_RUNPATH_SEARCH_PATHS = ( - "$(inherited)", - "@executable_path/Frameworks", - ); - MARKETING_VERSION = 1.0; - PRODUCT_BUNDLE_IDENTIFIER = com.example.google.generativeai.PaLMText; - PRODUCT_NAME = "$(TARGET_NAME)"; - SWIFT_EMIT_LOC_STRINGS = YES; - SWIFT_VERSION = 5.0; - TARGETED_DEVICE_FAMILY = "1,2"; - }; - name = Debug; - }; - 88D3FF8E2A05B05100B389CF /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; - ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; - CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 1; - DEVELOPMENT_ASSET_PATHS = "\"PaLMText/Preview Content\""; - DEVELOPMENT_TEAM = ""; - ENABLE_PREVIEWS = YES; - GENERATE_INFOPLIST_FILE = YES; - INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; - INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; - INFOPLIST_KEY_UILaunchScreen_Generation = YES; - INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; - INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; - LD_RUNPATH_SEARCH_PATHS = ( - "$(inherited)", - "@executable_path/Frameworks", - ); - MARKETING_VERSION = 1.0; - PRODUCT_BUNDLE_IDENTIFIER = com.example.google.generativeai.PaLMText; - PRODUCT_NAME = "$(TARGET_NAME)"; - SWIFT_EMIT_LOC_STRINGS = YES; - SWIFT_VERSION = 5.0; - TARGETED_DEVICE_FAMILY = "1,2"; - }; - name = Release; - }; -/* End XCBuildConfiguration section */ - -/* Begin XCConfigurationList section */ - 88D3FF792A05B05000B389CF /* Build configuration list for PBXProject "PaLMText" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 88D3FF8A2A05B05100B389CF /* Debug */, - 88D3FF8B2A05B05100B389CF /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - 88D3FF8C2A05B05100B389CF /* Build configuration list for PBXNativeTarget "PaLMText" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 88D3FF8D2A05B05100B389CF /* Debug */, - 88D3FF8E2A05B05100B389CF /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; -/* End XCConfigurationList section */ - -/* Begin XCSwiftPackageProductDependency section */ - 88B372C32A0648D5008DF7EE /* GoogleGenerativeAI */ = { - isa = XCSwiftPackageProductDependency; - productName = GoogleGenerativeAI; - }; -/* End XCSwiftPackageProductDependency section */ - }; - rootObject = 88D3FF762A05B05000B389CF /* Project object */; -} diff --git a/Examples/PaLMText/PaLMText.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/Examples/PaLMText/PaLMText.xcodeproj/project.xcworkspace/contents.xcworkspacedata deleted file mode 100644 index 919434a..0000000 --- a/Examples/PaLMText/PaLMText.xcodeproj/project.xcworkspace/contents.xcworkspacedata +++ /dev/null @@ -1,7 +0,0 @@ - - - - - diff --git a/Examples/PaLMText/PaLMText.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist b/Examples/PaLMText/PaLMText.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist deleted file mode 100644 index 18d9810..0000000 --- a/Examples/PaLMText/PaLMText.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist +++ /dev/null @@ -1,8 +0,0 @@ - - - - - IDEDidComputeMac32BitWarning - - - diff --git a/Examples/PaLMText/PaLMText.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved b/Examples/PaLMText/PaLMText.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved deleted file mode 100644 index caac53b..0000000 --- a/Examples/PaLMText/PaLMText.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved +++ /dev/null @@ -1,23 +0,0 @@ -{ - "pins" : [ - { - "identity" : "get", - "kind" : "remoteSourceControl", - "location" : "https://github.com/kean/Get", - "state" : { - "revision" : "12830cc64f31789ae6f4352d2d51d03a25fc3741", - "version" : "2.1.6" - } - }, - { - "identity" : "urlqueryencoder", - "kind" : "remoteSourceControl", - "location" : "https://github.com/CreateAPI/URLQueryEncoder", - "state" : { - "revision" : "4ce950479707ea109f229d7230ec074a133b15d7", - "version" : "0.2.1" - } - } - ], - "version" : 2 -} diff --git a/Examples/PaLMText/PaLMText/PaLM-Info-Sample.plist b/Examples/PaLMText/PaLMText/PaLM-Info-Sample.plist deleted file mode 100644 index 43401eb..0000000 --- a/Examples/PaLMText/PaLMText/PaLM-Info-Sample.plist +++ /dev/null @@ -1,8 +0,0 @@ - - - - - API_KEY - _API_KEY_ - - diff --git a/Examples/PaLMText/PaLMText/ViewModels/SummarizeViewModel.swift b/Examples/PaLMText/PaLMText/ViewModels/SummarizeViewModel.swift deleted file mode 100644 index bf3c152..0000000 --- a/Examples/PaLMText/PaLMText/ViewModels/SummarizeViewModel.swift +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation -import GoogleGenerativeAI - -@MainActor -class SummarizeViewModel: ObservableObject { - @Published - var inputText = "" - - @Published - var outputText = "" - - @Published - var inProgress = false - - /// Fetch the API key from `PaLM-Info.plist` - private var apiKey: String { - get { - guard let filePath = Bundle.main.path(forResource: "PaLM-Info", ofType: "plist") else { - fatalError("Couldn't find file 'PaLM-Info.plist'.") - } - let plist = NSDictionary(contentsOfFile: filePath) - guard let value = plist?.object(forKey: "API_KEY") as? String else { - fatalError("Couldn't find key 'API_KEY' in 'PaLM-Info.plist'.") - } - if (value.starts(with: "_")) { - fatalError("Follow the instructions at https://developers.generativeai.google/tutorials/setup to get a PaLM API key.") - } - return value - } - } - - private var palmClient: GenerativeLanguage? - - init() { - palmClient = GenerativeLanguage(apiKey: apiKey) - } - - private var prompt: String { - "Summarize the following text for me: \(inputText)" - } - - func summarize() async { - do { - inProgress = true - let response = try await palmClient?.generateText(with: prompt) - inProgress = false - - if let candidate = response?.candidates?.first, let text = candidate.output { - outputText = text - } - } - catch { - print(error.localizedDescription) - } - } -} diff --git a/LICENSE b/LICENSE index 7a4a3ea..261eeb9 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -199,4 +198,4 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file + limitations under the License. diff --git a/Mintfile b/Mintfile new file mode 100644 index 0000000..61b0257 --- /dev/null +++ b/Mintfile @@ -0,0 +1 @@ +nicklockwood/SwiftFormat@0.52.10 diff --git a/Package.resolved b/Package.resolved deleted file mode 100644 index caac53b..0000000 --- a/Package.resolved +++ /dev/null @@ -1,23 +0,0 @@ -{ - "pins" : [ - { - "identity" : "get", - "kind" : "remoteSourceControl", - "location" : "https://github.com/kean/Get", - "state" : { - "revision" : "12830cc64f31789ae6f4352d2d51d03a25fc3741", - "version" : "2.1.6" - } - }, - { - "identity" : "urlqueryencoder", - "kind" : "remoteSourceControl", - "location" : "https://github.com/CreateAPI/URLQueryEncoder", - "state" : { - "revision" : "4ce950479707ea109f229d7230ec074a133b15d7", - "version" : "0.2.1" - } - } - ], - "version" : 2 -} diff --git a/Package.swift b/Package.swift index 3b4e971..5245178 100644 --- a/Package.swift +++ b/Package.swift @@ -1,4 +1,5 @@ -// swift-tools-version: 5.7.1 +// swift-tools-version: 5.9 +// The swift-tools-version declares the minimum version of Swift required to build this package. // Copyright 2023 Google LLC // @@ -17,48 +18,34 @@ import PackageDescription let package = Package( - name: "GoogleGenerativeAI", - platforms: [.iOS(.v13), .macOS(.v12), .watchOS(.v7), .tvOS(.v13)], + name: "generative-ai-swift", + platforms: [ + .iOS(.v15), + .macOS(.v12), + ], products: [ + // Products define the executables and libraries a package produces, making them visible to + // other packages. .library( name: "GoogleGenerativeAI", - targets: ["GoogleGenerativeAI"]), - ], - dependencies: [ - .package(url: "https://github.com/kean/Get", .upToNextMajor(from: "2.0.0")), - .package(url: "https://github.com/CreateAPI/URLQueryEncoder", exact: "0.2.1"), + targets: ["GoogleGenerativeAI"] + ), ], targets: [ + // Targets are the basic building blocks of a package, defining a module or a test suite. + // Targets can depend on other targets in this package and products from dependencies. .target( name: "GoogleGenerativeAI", - dependencies: [ - .product(name: "Get", package: "Get"), - .product(name: "URLQueryEncoder", package: "URLQueryEncoder"), - ], path: "Sources" ), .testTarget( name: "GoogleGenerativeAITests", - dependencies: ["GoogleGenerativeAI"]), - .binaryTarget( - name: "create-api", - url: "https://github.com/CreateAPI/CreateAPI/releases/download/0.1.1/create-api.artifactbundle.zip", - checksum: "0f0cfe7300580ef3062aacf4c4936d942f5a24ab971e722566f937fa7714369a" - ), - .plugin( - name: "CreateAPI", - capability: .command( - intent: .custom( - verb: "generate-api", - description: "Generates the OpenAPI entities and paths using CreateAPI" - ), - permissions: [ - .writeToPackageDirectory(reason: "To output the generated source code") - ] - ), - dependencies: [ - .target(name: "create-api") + dependencies: ["GoogleGenerativeAI"], + path: "Tests", + resources: [ + .process("GoogleAITests/GenerateContentResponses"), + .process("GoogleAITests/SampleResponses"), ] - ) + ), ] ) diff --git a/Plugins/CreateAPI/CreateAPIPlugin.swift b/Plugins/CreateAPI/CreateAPIPlugin.swift deleted file mode 100644 index be8b009..0000000 --- a/Plugins/CreateAPI/CreateAPIPlugin.swift +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation -import PackagePlugin - -@main -struct Plugin: CommandPlugin { - func performCommand(context: PluginContext, arguments: [String]) async throws { - let createAPI = try context.tool(named: "create-api") - let workingDirectory = context.package.directory.appending("Sources") - - let process = Process() - process.currentDirectoryURL = URL(fileURLWithPath: workingDirectory.string) - process.executableURL = URL(fileURLWithPath: createAPI.path.string) - process.arguments = [ - "generate", - "generativelanguage-v1beta3.json", - "--output", "OpenAPI", - "--config", ".create-api.yml" - ] - - try process.run() - process.waitUntilExit() - } -} diff --git a/README.md b/README.md index 728be01..412b406 100644 --- a/README.md +++ b/README.md @@ -1,90 +1,17 @@ -# Google Generative AI SDK for Swift +# Google AI Swift SDK -[![](https://img.shields.io/endpoint?url=https%3A%2F%2Fswiftpackageindex.com%2Fapi%2Fpackages%2Fgoogle%2Fgenerative-ai-swift%2Fbadge%3Ftype%3Dswift-versions)](https://swiftpackageindex.com/google/generative-ai-swift) -[![](https://img.shields.io/endpoint?url=https%3A%2F%2Fswiftpackageindex.com%2Fapi%2Fpackages%2Fgoogle%2Fgenerative-ai-swift%2Fbadge%3Ftype%3Dplatforms)](https://swiftpackageindex.com/google/generative-ai-swift) +> [!IMPORTANT] +> Thanks for your interest in the Google AI SDKs! **You can start using this SDK and its samples on December 13, 2023.** Until then, check out our [blog post](https://blog.google/technology/ai/google-gemini-ai/) to learn more about Google's Gemini multimodal model. +> [!IMPORTANT] +> If you are using the PaLM SDK for Swift, please see [Developers who use the PaLM SDK for Swift](#developers-who-use-the-palm-sdk-for-swift) for instructions. -The Google Generative AI SDK for Swift allows developers to use state-of-the-art Large Language Models (LLMs) to build language applications. +## Developers who use the PaLM SDK for Swift -Once you've added the Swift package to your Swift application, you can call the API as follows: +​​If you're using the PaLM SDK for Swift, review the information below to continue using the PaLM SDK until you've migrated to the new version that allows you to use Gemini. -```swift -import GoogleGenerativeAI +- To continue using PaLM models, make sure your app depends on version [`0.3.0`](https://github.com/google/generative-ai-swift/releases/tag/0.3.0) _up to_ the next minor version ([`0.4.0`](https://github.com/google/generative-ai-swift/releases/tag/0.4.0)) of `generative-ai-swift`. -let palmClient = GenerativeLanguage(apiKey: "YOUR API KEY") -response = try await palmClient.chat(message: "Hello") -``` +- When you're ready to use Gemini models, migrate your code to the Gemini API and update your app's `generative-ai-swift` dependency to version `0.4.0` or higher. - -## Getting Started - -This repository contains a few sample apps. To try them out, follow these steps: - -1. Check out this repository. - ```swift - git clone https://github.com/google/generative-ai-swift - ``` -1. Follow the instructions on the [setup page](https://developers.generativeai.google/tutorials/setup) to obtain an API key. -1. Open and build one of the examples in the `Examples` folder. -1. Paste the API key into the `API_KEY` property in the `PaLM-Info.plist` file. -1. Run the app. - - -## Using the PaLM SDK in your own app - -To use the Swift SDK for the PaLM API in your own apps, follow these steps: - -1. Create a new Swift app (or use your existing app). -1. Right-click on your project in the project navigator. -1. Select _Add Packages_ from the context menu. -1. In the _Add Packages_ dialog, paste the package URL into the search bar: https://github.com/google/generative-ai-swift -1. Click on _Add Package_. Xcode will now add the _GoogleGenerativeAI_ to your project. - -### Initializing the API client - -Before you can make any API calls, you need to import and initialize the API -client. - -1. Import the `GoogleGenerativeAI` module: - ```swift - import GoogleGenerativeAI - ``` -1. Initialize the API client: - ```swift - let palmClient = GenerativeLanguage(apiKey: "YOUR API KEY") - ``` - -### Calling the API - -Now you're ready to call the PaLM API's methods. - -> **Note**: All API methods are asynchronous, so you need to call them using Swift's -async/await. - -For example, here is how you can call the `generateText` method to summarize a Wikipedia article: - -```swift -let prompt = "Summarise the following text: https://wikipedia.org/..." - -let response = try await palmClient.generateText(with: prompt) - -if let candidate = response?.candidates?.first, let text = candidate.output { - print(text) -} -``` - - -## Documentation - -You can find the documentation for the PaLM API at https://developers.generativeai.google.com/guide - - -## Contributing - -See [Contributing](docs/CONTRIBUTING.md) for more information on contributing to the Generative AI SDK for Swift. - - -## License - -The contents of this repository are licensed under the -[Apache License, version 2.0](http://www.apache.org/licenses/LICENSE-2.0). +To see the PaLM documentation and code, go to the [`palm` branch](https://github.com/google/generative-ai-swift/tree/palm). diff --git a/Sources/.create-api.yml b/Sources/.create-api.yml deleted file mode 100644 index 7c779e7..0000000 --- a/Sources/.create-api.yml +++ /dev/null @@ -1,32 +0,0 @@ -module: GenerativeLanguage -generate: -- entities -- enums -indentation: spaces -spaceWidth: 2 -fileHeaderComment: | - // Generated by Create API - // https://github.com/CreateAPI/CreateAPI - // - // Copyright 2023 Google LLC - // - // Licensed under the Apache License, Version 2.0 (the "License"); - // you may not use this file except in compliance with the License. - // You may obtain a copy of the License at - // - // http://www.apache.org/licenses/LICENSE-2.0 - // - // Unless required by applicable law or agreed to in writing, software - // distributed under the License is distributed on an "AS IS" BASIS, - // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - // See the License for the specific language governing permissions and - // limitations under the License. -paths: - style: rest - namespace: "API" - filenameTemplate: "%0API.swift" - inlineSimpleQueryParameters: false - -entities: - includeIdentifiableConformance: true - includeInitializer: true \ No newline at end of file diff --git a/Sources/GoogleAI/Chat.swift b/Sources/GoogleAI/Chat.swift new file mode 100644 index 0000000..1ca542f --- /dev/null +++ b/Sources/GoogleAI/Chat.swift @@ -0,0 +1,147 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation + +/// An object that represents a back-and-forth chat with a model, capturing the history and saving +/// the context in memory between each message sent. +public class Chat { + private let model: GenerativeModel + + /// Initializes a new chat representing a 1:1 conversation between model and user. + init(model: GenerativeModel, history: [ModelContent]) { + self.model = model + self.history = history + } + + /// The previous content from the chat that has been successfully sent and received from the + /// model. This will be provided to the model for each message sent as context for the discussion. + public var history: [ModelContent] + + public func sendMessage(_ parts: PartsRepresentable...) async throws -> GenerateContentResponse { + return try await sendMessage([ModelContent(parts: parts)]) + } + + /// Send a message, using the existing history of this chat as context. If successful, the message + /// and response will be added to the history. If unsuccessful, history will remain unchanged. + public func sendMessage(_ content: [ModelContent]) async throws + -> GenerateContentResponse { + // Ensure that the new content has the role set. + let newContent: [ModelContent] = content.map(populateContentRole(_:)) + + // Send the history alongside the new message as context. + let request = history + newContent + let result = try await model.generateContent(request) + guard let reply = result.candidates.first?.content else { + let error = NSError(domain: "com.google.generative-ai", + code: -1, + userInfo: [ + NSLocalizedDescriptionKey: "No candidates with content available.", + ]) + throw GenerateContentError.internalError(underlying: error) + } + + // Make sure we inject the role into the content received. + let toAdd = ModelContent(role: "model", parts: reply.parts) + + // Append the request and successful result to history, then return the value. + history.append(contentsOf: newContent) + history.append(toAdd) + return result + } + + public func sendMessageStream(_ parts: PartsRepresentable...) + -> AsyncThrowingStream { + return sendMessageStream([ModelContent(parts: parts)]) + } + + public func sendMessageStream(_ content: [ModelContent]) + -> AsyncThrowingStream { + return AsyncThrowingStream { continuation in + Task { + var aggregatedContent: [ModelContent] = [] + + // Ensure that the new content has the role set. + let newContent: [ModelContent] = content.map(populateContentRole(_:)) + + // Send the history alongside the new message as context. + let request = history + newContent + let stream = model.generateContentStream(request) + do { + for try await chunk in stream { + // Capture any content that's streaming. This should be populated if there's no error. + if let chunkContent = chunk.candidates.first?.content { + aggregatedContent.append(chunkContent) + } + + // Pass along the chunk. + continuation.yield(chunk) + } + } catch { + // Rethrow the error that the underlying stream threw. Don't add anything to history. + continuation.finish(throwing: error) + return + } + + // Save the request. + history.append(contentsOf: newContent) + + // Aggregate the content to add it to the history before we finish. + let aggregated = aggregatedChunks(aggregatedContent) + history.append(aggregated) + + continuation.finish() + } + } + } + + private func aggregatedChunks(_ chunks: [ModelContent]) -> ModelContent { + var parts: [ModelContent.Part] = [] + var combinedText = "" + for aggregate in chunks { + // Loop through all the parts, aggregating the text and adding the images. + for part in aggregate.parts { + switch part { + case let .text(str): + combinedText += str + + case .data(mimetype: _, _): + // Don't combine it, just add to the content. If there's any text pending, add that as + // a part. + if !combinedText.isEmpty { + parts.append(.text(combinedText)) + combinedText = "" + } + + parts.append(part) + } + } + } + + if !combinedText.isEmpty { + parts.append(.text(combinedText)) + } + + return ModelContent(role: "model", parts: parts) + } + + /// Populates the `role` field with `user` if it doesn't exist. Required in chat sessions. + private func populateContentRole(_ content: ModelContent) -> ModelContent { + if content.role != nil { + return content + } else { + return ModelContent(role: "user", parts: content.parts) + } + } +} diff --git a/Examples/PaLMChat/PaLMChat/Models/ChatMessage.swift b/Sources/GoogleAI/CountTokensRequest.swift similarity index 57% rename from Examples/PaLMChat/PaLMChat/Models/ChatMessage.swift rename to Sources/GoogleAI/CountTokensRequest.swift index 4396b94..c02e9a3 100644 --- a/Examples/PaLMChat/PaLMChat/Models/ChatMessage.swift +++ b/Sources/GoogleAI/CountTokensRequest.swift @@ -14,23 +14,25 @@ import Foundation -enum Participant { - case system - case user +struct CountTokensRequest { + let model: String + let contents: [ModelContent] } -struct ChatMessage: Identifiable, Equatable { - let id = UUID().uuidString - var message: String - let participant: Participant - var pending = false +extension CountTokensRequest: Encodable { + enum CodingKeys: CodingKey { + case contents + } } -extension ChatMessage { - static var samples: [ChatMessage] = [ - .init(message: "Hi - this is PaLM. What can I do for you today?", participant: .system), - .init(message: "Tell me a joke about a tiger.", participant: .user) - ] +extension CountTokensRequest: GenerativeAIRequest { + typealias Response = CountTokensResponse - static var sample = samples[0] + var url: URL { + URL(string: "\(GenerativeAISwift.baseURL)/\(model):countTokens")! + } +} + +public struct CountTokensResponse: Codable { + public let totalTokens: Int } diff --git a/Sources/GoogleAI/Errors.swift b/Sources/GoogleAI/Errors.swift new file mode 100644 index 0000000..b3bcbec --- /dev/null +++ b/Sources/GoogleAI/Errors.swift @@ -0,0 +1,132 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation + +struct RPCError: Error, Decodable { + let httpResponseCode: Int32 + let message: String + let status: RPCStatus + + enum CodingKeys: CodingKey { + case error + } + + init(httpResponseCode: Int32, message: String, status: RPCStatus) { + self.httpResponseCode = httpResponseCode + self.message = message + self.status = status + } + + init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + let status = try container.decode(ErrorStatus.self, forKey: .error) + + if let code = status.code { + httpResponseCode = code + } else { + httpResponseCode = -1 + } + + if let message = status.message { + self.message = message + } else { + message = "Unknown error." + } + + if let rpcStatus = status.status { + self.status = rpcStatus + } else { + self.status = .unknown + } + } +} + +struct ErrorStatus: Codable { + let code: Int32? + let message: String? + let status: RPCStatus? + + init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + code = try container.decodeIfPresent(Int32.self, forKey: .code) + message = try container.decodeIfPresent(String.self, forKey: .message) + do { + status = try container.decodeIfPresent(RPCStatus.self, forKey: .status) + } catch { + status = .unknown + } + } +} + +enum RPCStatus: String, Codable { + // Not an error; returned on success. + case ok = "OK" + + // The operation was cancelled, typically by the caller. + case cancelled = "CANCELLED" + + // Unknown error. + case unknown = "UNKNOWN" + + // The client specified an invalid argument. + case invalidArgument = "INVALID_ARGUMENT" + + // The deadline expired before the operation could complete. + case deadlineExceeded = "DEADLINE_EXCEEDED" + + // Some requested entity (e.g., file or directory) was not found. + case notFound = "NOT_FOUND" + + // The entity that a client attempted to create (e.g., file or directory) already exists. + case alreadyExists = "ALREADY_EXISTS" + + // The caller does not have permission to execute the specified operation. + case permissionDenied = "PERMISSION_DENIED" + + // The request does not have valid authentication credentials for the operation. + case unauthenticated = "UNAUTHENTICATED" + + // Some resource has been exhausted, perhaps a per-user quota, or perhaps the entire file system + // is out of space. + case resourceExhausted = "RESOURCE_EXHAUSTED" + + // The operation was rejected because the system is not in a state required for the operation's + // execution. + case failedPrecondition = "FAILED_PRECONDITION" + + // The operation was aborted, typically due to a concurrency issue such as a sequencer check + // failure or transaction abort. + case aborted = "ABORTED" + + // The operation was attempted past the valid range. + case outOfRange = "OUT_OF_RANGE" + + // The operation is not implemented or is not supported/enabled in this service. + case unimplemented = "UNIMPLEMENTED" + + // Internal errors. + case internalError = "INTERNAL" + + // The service is currently unavailable. + case unavailable = "UNAVAILABLE" + + // Unrecoverable data loss or corruption. + case dataLoss = "DATA_LOSS" +} + +enum InvalidCandidateError: Error { + case emptyContent(underlyingError: Error) + case malformedContent(underlyingError: Error) +} diff --git a/Sources/GoogleAI/GenerateContentError.swift b/Sources/GoogleAI/GenerateContentError.swift new file mode 100644 index 0000000..38e6b92 --- /dev/null +++ b/Sources/GoogleAI/GenerateContentError.swift @@ -0,0 +1,27 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation + +/// Errors that occur when generating content from a model. +public enum GenerateContentError: Error { + /// An internal error occurred. See the underlying error for more context. + case internalError(underlying: Error) + + /// A prompt was blocked. See the response's `promptFeedback.blockReason` for more information. + case promptBlocked(response: GenerateContentResponse) + + /// A response didn't fully complete. See the `FinishReason` for more information. + case responseStoppedEarly(reason: FinishReason, response: GenerateContentResponse) +} diff --git a/Sources/GoogleAI/GenerateContentRequest.swift b/Sources/GoogleAI/GenerateContentRequest.swift new file mode 100644 index 0000000..074913a --- /dev/null +++ b/Sources/GoogleAI/GenerateContentRequest.swift @@ -0,0 +1,44 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation + +struct GenerateContentRequest { + /// Model name. + let model: String + let contents: [ModelContent] + let generationConfig: GenerationConfig? + let safetySettings: [SafetySetting]? + let isStreaming: Bool +} + +extension GenerateContentRequest: Encodable { + private enum CodingKeys: String, CodingKey { + case contents + case generationConfig + case safetySettings + } +} + +extension GenerateContentRequest: GenerativeAIRequest { + typealias Response = GenerateContentResponse + + var url: URL { + if isStreaming { + URL(string: "\(GenerativeAISwift.baseURL)/\(model):streamGenerateContent?alt=sse")! + } else { + URL(string: "\(GenerativeAISwift.baseURL)/\(model):generateContent")! + } + } +} diff --git a/Sources/GoogleAI/GenerateContentResponse.swift b/Sources/GoogleAI/GenerateContentResponse.swift new file mode 100644 index 0000000..3f5d29e --- /dev/null +++ b/Sources/GoogleAI/GenerateContentResponse.swift @@ -0,0 +1,208 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation + +public struct GenerateContentResponse: Codable { + public let candidates: [CandidateResponse] + + public let promptFeedback: PromptFeedback? + + public var text: String? { + guard let candidate = candidates.first else { + Logging.default.error("Could not get text a response that had no candidates.") + return nil + } + guard let text = candidate.content.parts.first?.text else { + Logging.default.error("Could not get a text part from the first candidate.") + return nil + } + return text + } + + /// Initializer for SwiftUI previews or tests. + public init(candidates: [CandidateResponse], promptFeedback: PromptFeedback?) { + self.candidates = candidates + self.promptFeedback = promptFeedback + } + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + + guard container.contains(CodingKeys.candidates) || container + .contains(CodingKeys.promptFeedback) else { + let context = DecodingError.Context( + codingPath: [], + debugDescription: "Failed to decode GenerateContentResponse;" + + " missing keys 'candidates' and 'promptFeedback'." + ) + throw DecodingError.dataCorrupted(context) + } + + if let candidates = try container.decodeIfPresent( + [CandidateResponse].self, + forKey: .candidates + ) { + self.candidates = candidates + } else { + candidates = [] + } + promptFeedback = try container.decodeIfPresent(PromptFeedback.self, forKey: .promptFeedback) + } +} + +public struct CandidateResponse: Codable { + public let content: ModelContent + public let safetyRatings: [SafetyRating] + + public let finishReason: FinishReason? + + public let citationMetadata: CitationMetadata? + + /// Initializer for SwiftUI previews or tests. + public init(content: ModelContent, safetyRatings: [SafetyRating], finishReason: FinishReason?, + citationMetadata: CitationMetadata?) { + self.content = content + self.safetyRatings = safetyRatings + self.finishReason = finishReason + self.citationMetadata = citationMetadata + } + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + + do { + if let content = try container.decodeIfPresent(ModelContent.self, forKey: .content) { + self.content = content + } else { + content = ModelContent(parts: []) + } + } catch { + // Check if `content` can be decoded as an empty dictionary to detect the `"content": {}` bug. + if let content = try? container.decode([String: String].self, forKey: .content), + content.isEmpty { + throw InvalidCandidateError.emptyContent(underlyingError: error) + } else { + throw InvalidCandidateError.malformedContent(underlyingError: error) + } + } + + if let safetyRatings = try container.decodeIfPresent( + [SafetyRating].self, + forKey: .safetyRatings + ) { + self.safetyRatings = safetyRatings + } else { + safetyRatings = [] + } + + finishReason = try container.decodeIfPresent(FinishReason.self, forKey: .finishReason) + + citationMetadata = try container.decodeIfPresent( + CitationMetadata.self, + forKey: .citationMetadata + ) + } +} + +/// A collection of source attributions for a piece of content. +public struct CitationMetadata: Codable { + public let citationSources: [Citation] +} + +public struct Citation: Codable { + public let startIndex: Int + public let endIndex: Int + public let uri: String + public let license: String +} + +public enum FinishReason: String, Codable { + case unknown = "FINISH_REASON_UNKNOWN" + + case unspecified = "FINISH_REASON_UNSPECIFIED" + + /// Natural stop point of the model or provided stop sequence. + case stop = "STOP" + + /// The maximum number of tokens as specified in the request was reached. + case maxTokens = "MAX_TOKENS" + + /// The token generation was stopped as the response was flagged for safety reasons. + /// NOTE: When streaming, the Candidate.content will be empty if content filters blocked the + /// output. + case safety = "SAFETY" + case recitation = "RECITATION" + case other = "OTHER" + + /// Do not explicitly use. Initializer required for Decodable conformance. + public init(from decoder: Decoder) throws { + let value = try decoder.singleValueContainer().decode(String.self) + guard let decodedFinishReason = FinishReason(rawValue: value) else { + Logging.default + .error("[GoogleGenerativeAI] Unrecognized FinishReason with value \"\(value)\".") + self = .unknown + return + } + + self = decodedFinishReason + } +} + +public struct PromptFeedback: Codable { + public enum BlockReason: String, Codable { + case unknown = "UNKNOWN" + case unspecified = "BLOCK_REASON_UNSPECIFIED" + case safety = "SAFETY" + case other = "OTHER" + + /// Do not explicitly use. Initializer required for Decodable conformance. + public init(from decoder: Decoder) throws { + let value = try decoder.singleValueContainer().decode(String.self) + guard let decodedBlockReason = BlockReason(rawValue: value) else { + Logging.default + .error("[GoogleGenerativeAI] Unrecognized BlockReason with value \"\(value)\".") + self = .unknown + return + } + + self = decodedBlockReason + } + } + + public let blockReason: BlockReason? + public let safetyRatings: [SafetyRating] + + /// Initializer for SwiftUI previews or tests. + public init(blockReason: BlockReason?, safetyRatings: [SafetyRating]) { + self.blockReason = blockReason + self.safetyRatings = safetyRatings + } + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + blockReason = try container.decodeIfPresent( + PromptFeedback.BlockReason.self, + forKey: .blockReason + ) + if let safetyRatings = try container.decodeIfPresent( + [SafetyRating].self, + forKey: .safetyRatings + ) { + self.safetyRatings = safetyRatings + } else { + safetyRatings = [] + } + } +} diff --git a/Sources/GoogleAI/GenerationConfig.swift b/Sources/GoogleAI/GenerationConfig.swift new file mode 100644 index 0000000..ba8cc3c --- /dev/null +++ b/Sources/GoogleAI/GenerationConfig.swift @@ -0,0 +1,85 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation + +/// A struct defining model parameters to be used when sending generative AI +/// requests to the backend model. +public struct GenerationConfig: Codable { + /// A parameter controlling the degree of randomness in token selection. A + /// temperature of zero is deterministic, always choosing the + /// highest-probability response. Typical values are between 0 and 1 + /// inclusive. Defaults to 0 if unspecified. + public let temperature: Float? + + /// The `topP` parameter changes how the model selects tokens for output. + /// Tokens are selected from the most to least probable until the sum of + /// their probabilities equals the `topP` value. For example, if tokens A, B, + /// and C have probabilities of 0.3, 0.2, and 0.1 respectively and the topP + /// value is 0.5, then the model will select either A or B as the next token + /// by using the `temperature` and exclude C as a candidate. + /// Defaults to 0.95 if unset. + public let topP: Float? + + /// The `topK` parameter changes how the model selects tokens for output. A + /// `topK` of 1 means the selected token is the most probable among all the + /// tokens in the model's vocabulary, while a `topK` of 3 means that the next + /// token is selected from among the 3 most probable using the `temperature`. + /// For each token selection step, the `topK` tokens with the highest + /// probabilities are sampled. Tokens are then further filtered based on + /// `topP` with the final token selected using `temperature` sampling. + /// Defaults to 40 if unspecified. + public let topK: Int? + + /// The maximum number of generated response messages to return. This value + /// must be between [1, 8], inclusive. If unset, this will default to 1. + /// + /// - Note: Only unique candidates are returned. Higher temperatures are more + /// likely to produce unique candidates. Setting temperature to 0 will + /// always produce exactly one candidate regardless of the + /// `candidateCount`. + public let candidateCount: Int? + + /// Specifies the maximum number of tokens that can be generated in the + /// response. The number of tokens per word varies depending on the + /// language outputted. The maximum value is capped at 1024. Defaults to 0 + /// (unbounded). + public let maxOutputTokens: Int? + + /// A set of up to 5 ``String``s that will stop output generation. If + /// specified, the API will stop at the first appearance of a stop sequence. + /// The stop sequence will not be included as part of the response. + public let stopSequences: [String]? + + /// Creates a new `GenerationConfig` value. + /// + /// - Parameter temperature: See ``temperature`` + /// - Parameter topP: See ``topP`` + /// - Parameter topK: See ``topK`` + /// - Parameter candidateCount: See ``candidateCount`` + /// - Parameter maxOutputTokens: See ``maxOutputTokens`` + /// - Parameter stopSequences: See ``stopSequences`` + public init(temperature: Float? = nil, topP: Float? = nil, topK: Int? = nil, + candidateCount: Int? = nil, maxOutputTokens: Int? = nil, + stopSequences: [String]? = nil) { + // Explicit init because otherwise if we re-arrange the above variables it changes the API + // surface. + self.temperature = temperature + self.topP = topP + self.topK = topK + self.candidateCount = candidateCount + self.maxOutputTokens = maxOutputTokens + self.stopSequences = stopSequences + } +} diff --git a/Sources/GoogleGenerativeAI/GenerativeLanguageError.swift b/Sources/GoogleAI/GenerativeAIRequest.swift similarity index 81% rename from Sources/GoogleGenerativeAI/GenerativeLanguageError.swift rename to Sources/GoogleAI/GenerativeAIRequest.swift index c6d7329..9beebf6 100644 --- a/Sources/GoogleGenerativeAI/GenerativeLanguageError.swift +++ b/Sources/GoogleAI/GenerativeAIRequest.swift @@ -14,9 +14,8 @@ import Foundation -public enum GenerativeLanguageError: Error { - case missingAPIKey - case invalidResponse(String) - case notImplemented - case unknownError +protocol GenerativeAIRequest: Encodable { + associatedtype Response: Decodable + + var url: URL { get } } diff --git a/Sources/GoogleAI/GenerativeAIService.swift b/Sources/GoogleAI/GenerativeAIService.swift new file mode 100644 index 0000000..d682214 --- /dev/null +++ b/Sources/GoogleAI/GenerativeAIService.swift @@ -0,0 +1,254 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation + +struct GenerativeAIService { + /// Gives permission to talk to the backend. + private let apiKey: String + + private let urlSession: URLSession + + init(apiKey: String, urlSession: URLSession) { + self.apiKey = apiKey + self.urlSession = urlSession + } + + func loadRequest(request: T) async throws -> T.Response { + let urlRequest = try urlRequest(request: request) + + #if DEBUG + printCURLCommand(from: urlRequest) + #endif + + let data: Data + let rawResponse: URLResponse + (data, rawResponse) = try await urlSession.data(for: urlRequest) + + let response = try httpResponse(urlResponse: rawResponse) + + // Verify the status code is 200 + guard response.statusCode == 200 else { + Logging.default.error("[GoogleGenerativeAI] The server responded with an error: \(response)") + if let responseString = String(data: data, encoding: .utf8) { + Logging.network.error("[GoogleGenerativeAI] Response payload: \(responseString)") + } + + throw try JSONDecoder().decode(RPCError.self, from: data) + } + + return try parseResponse(T.Response.self, from: data) + } + + func loadRequestStream(request: T) + -> AsyncThrowingStream { + return AsyncThrowingStream { continuation in + Task { + let urlRequest: URLRequest + do { + urlRequest = try self.urlRequest(request: request) + } catch { + continuation.finish(throwing: error) + return + } + + #if DEBUG + printCURLCommand(from: urlRequest) + #endif + + let stream: URLSession.AsyncBytes + let rawResponse: URLResponse + do { + (stream, rawResponse) = try await urlSession.bytes(for: urlRequest) + } catch { + continuation.finish(throwing: error) + return + } + + // Verify the status code is 200 + let response: HTTPURLResponse + do { + response = try httpResponse(urlResponse: rawResponse) + } catch { + continuation.finish(throwing: error) + return + } + + // Verify the status code is 200 + guard response.statusCode == 200 else { + Logging.default + .error("[GoogleGenerativeAI] The server responded with an error: \(response)") + var responseBody = "" + for try await line in stream.lines { + responseBody += line + "\n" + } + + Logging.network.error("[GoogleGenerativeAI] Response payload: \(responseBody)") + do { + try parseError(responseBody: responseBody) + } catch { + continuation.finish(throwing: error) + } + + return + } + + // Received lines that are not server-sent events (SSE); these are not prefixed with "data:" + var extraLines: String = "" + + let decoder = JSONDecoder() + decoder.keyDecodingStrategy = .convertFromSnakeCase + for try await line in stream.lines { + Logging.network.debug("[GoogleGenerativeAI] Stream response: \(line)") + + if line.hasPrefix("data:") { + // We can assume 5 characters since it's utf-8 encoded, removing `data:`. + let jsonText = String(line.dropFirst(5)) + let data: Data + do { + data = try jsonData(jsonText: jsonText) + } catch { + continuation.finish(throwing: error) + return + } + + // Handle the content. + do { + let content = try parseResponse(T.Response.self, from: data) + continuation.yield(content) + } catch { + continuation.finish(throwing: error) + return + } + } else { + extraLines += line + } + } + + if extraLines.count > 0 { + do { + try parseError(responseBody: extraLines) + } catch { + continuation.finish(throwing: error) + } + + return + } + + continuation.finish(throwing: nil) + } + } + } + + // MARK: - Private Helpers + + private func urlRequest(request: T) throws -> URLRequest { + var urlRequest = URLRequest(url: request.url) + urlRequest.httpMethod = "POST" + urlRequest.setValue(apiKey, forHTTPHeaderField: "x-goog-api-key") + urlRequest.setValue("genai-swift/\(GenerativeAISwift.version)", + forHTTPHeaderField: "x-goog-api-client") + urlRequest.setValue("application/json", forHTTPHeaderField: "Content-Type") + let encoder = JSONEncoder() + encoder.keyEncodingStrategy = .convertToSnakeCase + urlRequest.httpBody = try encoder.encode(request) + + return urlRequest + } + + private func httpResponse(urlResponse: URLResponse) throws -> HTTPURLResponse { + // Verify the status code is 200 + guard let response = urlResponse as? HTTPURLResponse else { + Logging.default + .error( + "[GoogleGenerativeAI] Response wasn't an HTTP response, internal error \(urlResponse)" + ) + throw NSError( + domain: "com.google.generative-ai", + code: -1, + userInfo: [NSLocalizedDescriptionKey: "Response was not an HTTP response."] + ) + } + + return response + } + + private func jsonData(jsonText: String) throws -> Data { + guard let data = jsonText.data(using: .utf8) else { + let error = NSError( + domain: "com.google.generative-ai", + code: -1, + userInfo: [NSLocalizedDescriptionKey: "Could not parse response as UTF8."] + ) + throw error + } + + return data + } + + private func parseError(responseBody: String) throws { + let data = try jsonData(jsonText: responseBody) + + do { + let rpcError = try JSONDecoder().decode(RPCError.self, from: data) + throw rpcError + } catch { + // TODO: Throw an error about an unrecognized error payload with the response body + throw error + } + } + + private func parseResponse(_ type: T.Type, from data: Data) throws -> T { + do { + return try JSONDecoder().decode(type, from: data) + } catch { + if let json = String(data: data, encoding: .utf8) { + Logging.network.error("[GoogleGenerativeAI] JSON response: \(json)") + } + Logging.default.error("[GoogleGenerativeAI] Error decoding server JSON: \(error)") + throw error + } + } + + #if DEBUG + private func cURLCommand(from request: URLRequest) -> String { + var returnValue = "curl " + if let allHeaders = request.allHTTPHeaderFields { + for (key, value) in allHeaders { + returnValue += "-H '\(key): \(value)' " + } + } + + guard let url = request.url else { return "" } + returnValue += "'\(url.absoluteString)' " + + guard let body = request.httpBody, + let jsonStr = String(bytes: body, encoding: .utf8) else { return "" } + let escapedJSON = jsonStr.replacingOccurrences(of: "'", with: "'\\''") + returnValue += "-d '\(escapedJSON)'" + + return returnValue + } + + private func printCURLCommand(from request: URLRequest) { + let command = cURLCommand(from: request) + Logging.verbose.debug(""" + [GoogleGenerativeAI] Creating request with the equivalent cURL command: + ----- cURL command ----- + \(command, privacy: .private) + ------------------------ + """) + } + #endif // DEBUG +} diff --git a/Sources/GoogleAI/GenerativeAISwift.swift b/Sources/GoogleAI/GenerativeAISwift.swift new file mode 100644 index 0000000..4f374ac --- /dev/null +++ b/Sources/GoogleAI/GenerativeAISwift.swift @@ -0,0 +1,21 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +import Foundation + +/// Constants associated with the GenerativeAISwift SDK +public enum GenerativeAISwift { + /// String value of the SDK version + public static let version = "0.4.0" + static let baseURL = "https://generativelanguage.googleapis.com/v1" +} diff --git a/Sources/GoogleAI/GenerativeModel.swift b/Sources/GoogleAI/GenerativeModel.swift new file mode 100644 index 0000000..ed095ad --- /dev/null +++ b/Sources/GoogleAI/GenerativeModel.swift @@ -0,0 +1,231 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation + +/// A type that represents a remote multimodal model (like Gemini), with the ability to generate +/// content based on various input types. +public final class GenerativeModel { + /// Name of the model in the backend. + private let modelName: String + + /// The backing service responsible for sending and receiving model requests to the backend. + let generativeAIService: GenerativeAIService + + /// Configuration parameters used for the MultiModalModel. + let generationConfig: GenerationConfig? + + /// The safety settings to be used for prompts. + let safetySettings: [SafetySetting]? + + /// Initializes a new remote model with the given parameters. + /// + /// - Parameter name: The name of the model to be used. + /// - Parameter apiKey: The API key for your project. + /// - Parameter generationConfig: A value containing the content generation parameters your model + /// should use. + /// - Parameter safetySettings: A value describing what types of harmful content your model + /// should allow. + public convenience init(name: String, + apiKey: String, + generationConfig: GenerationConfig? = nil, + safetySettings: [SafetySetting]? = nil) { + self.init( + name: name, + apiKey: apiKey, + generationConfig: generationConfig, + safetySettings: safetySettings, + urlSession: .shared + ) + } + + /// The designated initializer for this class. + init(name: String, + apiKey: String, + generationConfig: GenerationConfig? = nil, + safetySettings: [SafetySetting]? = nil, + urlSession: URLSession) { + modelName = name + generativeAIService = GenerativeAIService(apiKey: apiKey, urlSession: urlSession) + self.generationConfig = generationConfig + self.safetySettings = safetySettings + + Logging.default.info(""" + [GoogleGenerativeAI] Model \( + name, + privacy: .public + ) initialized. To enable additional logging, add \ + `\(Logging.enableArgumentKey, privacy: .public)` as a launch argument in Xcode. + """) + Logging.verbose.debug("[GoogleGenerativeAI] Verbose logging enabled.") + } + + /// Generates content from String and/or image inputs, given to the model as a prompt, that are + /// representable as one or more ``ModelContent/Part``s. + /// + /// Since ``ModelContent/Part``s do not specify a role, this method is intended for generating + /// content from + /// [zero-shot](https://developers.google.com/machine-learning/glossary/generative#zero-shot-prompting) + /// or "direct" prompts. For + /// [few-shot](https://developers.google.com/machine-learning/glossary/generative#few-shot-prompting) + /// prompts, see ``generateContent(_:)-58rm0``. + /// + /// - Parameter content: The input(s) given to the model as a prompt (see ``PartsRepresentable`` + /// for conforming types). + /// - Returns: The content generated by the model. + /// - Throws: A ``GenerateContentError`` if the request failed. + public func generateContent(_ parts: PartsRepresentable...) + async throws -> GenerateContentResponse { + return try await generateContent([ModelContent(parts: parts)]) + } + + /// Generates new content from input content given to the model as a prompt. + /// + /// - Parameter content: The input(s) given to the model as a prompt. + /// - Returns: The generated content response from the model. + /// - Throws: A ``GenerateContentError`` if the request failed. + public func generateContent(_ content: [ModelContent]) async throws -> GenerateContentResponse { + let generateContentRequest = GenerateContentRequest(model: "models/\(modelName)", + contents: content, + generationConfig: generationConfig, + safetySettings: safetySettings, + isStreaming: false) + let response: GenerateContentResponse + do { + response = try await generativeAIService.loadRequest(request: generateContentRequest) + } catch { + throw GenerateContentError.internalError(underlying: error) + } + + // Check the prompt feedback to see if the prompt was blocked. + if response.promptFeedback?.blockReason != nil { + throw GenerateContentError.promptBlocked(response: response) + } + + // Check to see if an error should be thrown for stop reason. + if let reason = response.candidates.first?.finishReason, reason != .stop { + throw GenerateContentError.responseStoppedEarly(reason: reason, response: response) + } + + return response + } + + /// Generates content from String and/or image inputs, given to the model as a prompt, that are + /// representable as one or more ``ModelContent/Part``s. + /// + /// Since ``ModelContent/Part``s do not specify a role, this method is intended for generating + /// content from + /// [zero-shot](https://developers.google.com/machine-learning/glossary/generative#zero-shot-prompting) + /// or "direct" prompts. For + /// [few-shot](https://developers.google.com/machine-learning/glossary/generative#few-shot-prompting) + /// prompts, see ``generateContent(_:)-58rm0``. + /// + /// - Parameter content: The input(s) given to the model as a prompt (see ``PartsRepresentable`` + /// for conforming types). + /// - Returns: A stream wrapping content generated by the model or a ``GenerateContentError`` + /// error if an error occurred. + public func generateContentStream(_ parts: PartsRepresentable...) + -> AsyncThrowingStream { + return generateContentStream([ModelContent(parts: parts)]) + } + + /// Generates new content from input content given to the model as a prompt. + /// + /// - Parameter content: The input(s) given to the model as a prompt. + /// - Returns: A stream wrapping content generated by the model or a ``GenerateContentError`` + /// error if an error occurred. + public func generateContentStream(_ content: [ModelContent]) + -> AsyncThrowingStream { + let generateContentRequest = GenerateContentRequest(model: "models/\(modelName)", + contents: content, + generationConfig: generationConfig, + safetySettings: safetySettings, + isStreaming: true) + + var responseIterator = generativeAIService.loadRequestStream(request: generateContentRequest) + .makeAsyncIterator() + return AsyncThrowingStream { + let response: GenerateContentResponse? + do { + response = try await responseIterator.next() + } catch { + throw GenerateContentError.internalError(underlying: error) + } + + // The responseIterator will return `nil` when it's done. + guard let response = response else { + // This is the end of the stream! Signal it by sending `nil`. + return nil + } + + // Check the prompt feedback to see if the prompt was blocked. + if response.promptFeedback?.blockReason != nil { + throw GenerateContentError.promptBlocked(response: response) + } + + // If the stream ended early unexpectedly, throw an error. + if let finishReason = response.candidates.first?.finishReason, finishReason != .stop { + throw GenerateContentError.responseStoppedEarly(reason: finishReason, response: response) + } else { + // Response was valid content, pass it along and continue. + return response + } + } + } + + /// Creates a new chat conversation using this model with the provided history. + public func startChat(history: [ModelContent] = []) -> Chat { + return Chat(model: self, history: history) + } + + /// Runs the model's tokenizer on String and/or image inputs that are representable as one or more + /// ``ModelContent/Part``s. + /// + /// Since ``ModelContent/Part``s do not specify a role, this method is intended for tokenizing + /// [zero-shot](https://developers.google.com/machine-learning/glossary/generative#zero-shot-prompting) + /// or "direct" prompts. For + /// [few-shot](https://developers.google.com/machine-learning/glossary/generative#few-shot-prompting) + /// input, see ``countTokens(_:)-9spwl``. + /// + /// - Parameter content: The input(s) given to the model as a prompt (see ``PartsRepresentable`` + /// for conforming types). + /// - Returns: The results of running the model's tokenizer on the input; contains + /// ``CountTokensResponse/totalTokens``. + /// - Throws: A ``CountTokensError`` if the tokenization request failed. + public func countTokens(_ parts: PartsRepresentable...) async throws -> CountTokensResponse { + return try await countTokens([ModelContent(parts: parts)]) + } + + /// Runs the model's tokenizer on the input content and returns the token count. + /// + /// - Parameter content: The input given to the model as a prompt. + /// - Returns: The results of running the model's tokenizer on the input; contains + /// ``CountTokensResponse/totalTokens``. + /// - Throws: A ``CountTokensError`` if the tokenization request failed. + public func countTokens(_ content: [ModelContent]) async throws + -> CountTokensResponse { + let countTokensRequest = CountTokensRequest(model: "models/\(modelName)", contents: content) + + do { + return try await generativeAIService.loadRequest(request: countTokensRequest) + } catch { + throw CountTokensError.internalError(underlying: error) + } + } +} + +/// See ``GenerativeModel/countTokens(_:)-9spwl``. +public enum CountTokensError: Error { + case internalError(underlying: Error) +} diff --git a/Sources/GoogleAI/Logging.swift b/Sources/GoogleAI/Logging.swift new file mode 100644 index 0000000..e8e0285 --- /dev/null +++ b/Sources/GoogleAI/Logging.swift @@ -0,0 +1,55 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation +import OSLog + +struct Logging { + /// Subsystem that should be used for all Loggers. + static let subsystem = "com.google.generative-ai" + + /// Default category used for most loggers, unless specialized. + static let defaultCategory = "" + + /// The argument required to enable additional logging. + static let enableArgumentKey = "-GoogleGenerativeAIDebugLogEnabled" + + // No initializer available. + @available(*, unavailable) + private init() {} + + /// The default logger that is visible for all users. Note: we shouldn't be using anything lower + /// than `.notice`. + static var `default` = Logger(subsystem: subsystem, category: defaultCategory) + + /// A non default + static var network: Logger = { + if ProcessInfo.processInfo.arguments.contains(enableArgumentKey) { + return Logger(subsystem: subsystem, category: "NetworkResponse") + } else { + // Return a valid logger that's using `OSLog.disabled` as the logger, hiding everything. + return Logger(.disabled) + } + }() + + /// + static var verbose: Logger = { + if ProcessInfo.processInfo.arguments.contains(enableArgumentKey) { + return Logger(subsystem: subsystem, category: defaultCategory) + } else { + // Return a valid logger that's using `OSLog.disabled` as the logger, hiding everything. + return Logger(.disabled) + } + }() +} diff --git a/Sources/GoogleAI/ModelContent.swift b/Sources/GoogleAI/ModelContent.swift new file mode 100644 index 0000000..1c09bf6 --- /dev/null +++ b/Sources/GoogleAI/ModelContent.swift @@ -0,0 +1,123 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation + +/// A type describing data in media formats interpretable by an AI model. Each generative AI +/// request or response contains an `Array` of ``ModelContent``s, and each ``ModelContent`` value +/// may comprise multiple heterogeneous ``ModelContent/Part``s. +public struct ModelContent: Codable, Equatable { + /// A discrete piece of data in a media format intepretable by an AI model. Within a single value + /// of ``Part``, different data types may not mix. + public enum Part: Codable, Equatable { + enum CodingKeys: String, CodingKey { + case text + case inlineData + } + + enum InlineDataKeys: String, CodingKey { + case mimeType = "mime_type" + case bytes = "data" + } + + /// Text value. + case text(String) + + /// Data with a specified media type. Not all media types may be supported by the AI model. + case data(mimetype: String, Data) + + // MARK: Convenience Initializers + + /// Convenience function for populating a Part with JPEG data. + public static func jpeg(_ data: Data) -> Self { + return .data(mimetype: "image/jpeg", data) + } + + /// Convenience function for populating a Part with PNG data. + public static func png(_ data: Data) -> Self { + return .data(mimetype: "image/jpeg", data) + } + + // MARK: Codable Conformance + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: ModelContent.Part.CodingKeys.self) + switch self { + case let .text(a0): + try container.encode(a0, forKey: .text) + case let .data(mimetype, bytes): + var inlineDataContainer = container.nestedContainer( + keyedBy: InlineDataKeys.self, + forKey: .inlineData + ) + try inlineDataContainer.encode(mimetype, forKey: .mimeType) + try inlineDataContainer.encode(bytes, forKey: .bytes) + } + } + + public init(from decoder: Decoder) throws { + let values = try decoder.container(keyedBy: CodingKeys.self) + if let textVal = try? values.decode(String.self, forKey: .text) { + self = .text(textVal) + } else if let dataContainer = try? values.nestedContainer( + keyedBy: InlineDataKeys.self, + forKey: .inlineData + ) { + // Get the data here. + let mimetype = try dataContainer.decode(String.self, forKey: .mimeType) + let bytes = try dataContainer.decode(Data.self, forKey: .bytes) + self = .data(mimetype: mimetype, bytes) + } else { + throw DecodingError.dataCorrupted(.init( + codingPath: [CodingKeys.text, CodingKeys.inlineData], + debugDescription: "Neither text or inline data was found." + )) + } + } + + /// Returns the text contents of this ``Part``, if it contains text. + public var text: String? { + switch self { + case let .text(contents): return contents + default: return nil + } + } + } + + /// The role of the entity creating the ``ModelContent``. For user-generated client requests, + /// for example, the role is `user`. + public let role: String? + + /// The data parts comprising this ``ModelContent`` value. + public let parts: [Part] + + /// Creates a new value from any data or `Array` of data interpretable as a + /// ``Part``. See ``PartsRepresentable`` for types that can be interpreted as `Part`s. + public init(role: String? = "user", parts: some PartsRepresentable) { + self.role = role + self.parts = parts.partsValue + } + + /// Creates a new value from a list of ``Part``s. + public init(role: String? = "user", parts: [Part]) { + self.role = role + self.parts = parts + } + + /// Creates a new value from any data interpretable as a ``Part``. See ``PartsRepresentable`` + /// for types that can be interpreted as `Part`s. + public init(role: String? = "user", _ parts: PartsRepresentable...) { + self.init(role: role, parts: parts) + } +} diff --git a/Sources/GoogleAI/PartsRepresentable.swift b/Sources/GoogleAI/PartsRepresentable.swift new file mode 100644 index 0000000..85d4171 --- /dev/null +++ b/Sources/GoogleAI/PartsRepresentable.swift @@ -0,0 +1,80 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation +#if canImport(AppKit) + import AppKit // For NSImage extensions. +#elseif canImport(UIKit) + import UIKit // For UIImage extensions. +#endif + +/// A protocol describing any data that could be interpreted as model input data. +public protocol PartsRepresentable { + var partsValue: [ModelContent.Part] { get } +} + +/// Enables a `String` to be passed in as ``PartsRepresentable``. +extension String: PartsRepresentable { + public var partsValue: [ModelContent.Part] { + return [.text(self)] + } +} + +/// Enables a ``ModelContent.Part`` to be passed in as ``PartsRepresentable``. +extension ModelContent.Part: PartsRepresentable { + public var partsValue: [ModelContent.Part] { + return [self] + } +} + +/// Enable an `Array` of ``PartsRepresentable`` values to be passed in as a single +/// ``PartsRepresentable``. +extension [any PartsRepresentable]: PartsRepresentable { + public var partsValue: [ModelContent.Part] { + return flatMap { $0.partsValue } + } +} + +#if canImport(AppKit) + /// Enables images to be representable as ``PartsRepresentable``. + extension NSImage: PartsRepresentable { + public var partsValue: [ModelContent.Part] { + guard let cgImage = cgImage(forProposedRect: nil, context: nil, hints: nil) else { + Logging.default.error("[GoogleGenerativeAI] Couldn't create CGImage from NSImage.") + return [] + } + let bmp = NSBitmapImageRep(cgImage: cgImage) + guard let data = bmp.representation(using: .jpeg, properties: [.compressionFactor: 0.8]) + else { + Logging.default.error("[GoogleGenerativeAI] Couldn't create BMP from CGImage.") + return [] + } + return [ModelContent.Part.data(mimetype: "image/jpeg", data)] + } + } + +#elseif canImport(UIKit) + /// Enables images to be representable as ``PartsRepresentable``. + extension UIImage: PartsRepresentable { + public var partsValue: [ModelContent.Part] { + guard let data = jpegData(compressionQuality: 0.8) else { + Logging.default.error("[GoogleGenerativeAI] Couldn't create JPEG from UIImage.") + return [] + } + + return [ModelContent.Part.data(mimetype: "image/jpeg", data)] + } + } + +#endif diff --git a/Sources/GoogleAI/Safety.swift b/Sources/GoogleAI/Safety.swift new file mode 100644 index 0000000..36ac8cf --- /dev/null +++ b/Sources/GoogleAI/Safety.swift @@ -0,0 +1,179 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation + +/// A type defining potentially harmful media categories and their model-assigned ratings. A value +/// of this type may be assigned to a category for every model-generated response, not just +/// responses that exceed a certain threshold. +public struct SafetyRating: Codable, Equatable { + /// The category describing the potential harm a piece of content may pose. See + /// ``SafetySetting.HarmCategory`` for a list of possible values. + public let category: SafetySetting.HarmCategory + + /// The model-generated probability that a given piece of content falls under the harm category + /// described in ``category``. This does not + /// indiciate the severity of harm for a piece of content. See ``HarmProbability`` for a list of + /// possible values. + public let probability: HarmProbability + + /// Initializes a new `SafetyRating` instance with the given category and probability. + /// Use this initializer for SwiftUI previews or tests. + public init(category: SafetySetting.HarmCategory, probability: HarmProbability) { + self.category = category + self.probability = probability + } + + /// The probability that a given model output falls under a harmful content category. This does + /// not indicate the severity of harm for a piece of content. + public enum HarmProbability: String, Codable { + /// Unknown. A new server value that isn't recognized by the SDK. + case unknown = "UNKNOWN" + + /// The probability was not specified in the server response. + case unspecified = "HARM_PROBABILITY_UNSPECIFIED" + + /// The probability is zero or close to zero. For benign content, the probability across all + /// categories will be this value. + case negligible = "NEGLIGIBLE" + + /// The probability is small but non-zero. + case low = "LOW" + + /// The probability is moderate. + case medium = "MEDIUM" + + /// The probability is high. The content described is very likely harmful. + case high = "HIGH" + + /// Initializes a new `SafetyRating` from a decoder. + /// Not for external use. Initializer required for Decodable conformance. + public init(from decoder: Decoder) throws { + let value = try decoder.singleValueContainer().decode(String.self) + guard let decodedProbability = HarmProbability(rawValue: value) else { + Logging.default + .error("[GoogleGenerativeAI] Unrecognized HarmProbability with value \"\(value)\".") + self = .unknown + return + } + + self = decodedProbability + } + } +} + +/// Safety feedback for an entire request. +public struct SafetyFeedback: Codable { + /// Safety rating evaluated from content. + public let rating: SafetyRating + + /// Safety settings applied to the request. + public let setting: SafetySetting + + /// Internal initializer. + init(rating: SafetyRating, setting: SafetySetting) { + self.rating = rating + self.setting = setting + } +} + +/// A type used to specify a threshold for harmful content, beyond which the model will return a +/// fallback response instead of generated content. +public struct SafetySetting: Codable { + /// A type describing safety attributes, which include harmful categories and topics that can + /// be considered sensitive. + public enum HarmCategory: String, Codable { + /// Unknown. A new server value that isn't recognized by the SDK. + case unknown = "HARM_CATEGORY_UNKNOWN" + + /// Unspecified by the server. + case unspecified = "HARM_CATEGORY_UNSPECIFIED" + + /// Harassment content. + case harassment = "HARM_CATEGORY_HARASSMENT" + + /// Negative or harmful comments targeting identity and/or protected attributes. + case hateSpeech = "HARM_CATEGORY_HATE_SPEECH" + + /// Contains references to sexual acts or other lewd content. + case sexuallyExplicit = "HARM_CATEGORY_SEXUALLY_EXPLICIT" + + /// Promotes or enables access to harmful goods, services, or activities. + case dangerousContent = "HARM_CATEGORY_DANGEROUS_CONTENT" + + /// Do not explicitly use. Initializer required for Decodable conformance. + public init(from decoder: Decoder) throws { + let value = try decoder.singleValueContainer().decode(String.self) + guard let decodedCategory = HarmCategory(rawValue: value) else { + Logging.default + .error("[GoogleGenerativeAI] Unrecognized HarmCategory with value \"\(value)\".") + self = .unknown + return + } + + self = decodedCategory + } + } + + /// Block at and beyond a specified ``SafetyRating/HarmProbability``. + public enum BlockThreshold: String, Codable { + /// Unknown. A new server value that isn't recognized by the SDK. + case unknown = "UNKNOWN" + + /// Threshold is unspecified. + case unspecified = "HARM_BLOCK_THRESHOLD_UNSPECIFIED" + + // Content with `.negligible` will be allowed. + case blockLowAndAbove = "BLOCK_LOW_AND_ABOVE" + + /// Content with `.negligible` and `.low` will be allowed. + case blockMediumAndAbove = "BLOCK_MEDIUM_AND_ABOVE" + + /// Content with `.negligible`, `.low`, and `.medium` will be allowed. + case blockOnlyHigh = "BLOCK_ONLY_HIGH" + + /// All content will be allowed. + case blockNone = "BLOCK_NONE" + + /// Do not explicitly use. Initializer required for Decodable conformance. + public init(from decoder: Decoder) throws { + let value = try decoder.singleValueContainer().decode(String.self) + guard let decodedThreshold = BlockThreshold(rawValue: value) else { + Logging.default + .error("[GoogleGenerativeAI] Unrecognized BlockThreshold with value \"\(value)\".") + self = .unknown + return + } + + self = decodedThreshold + } + } + + enum CodingKeys: String, CodingKey { + case harmCategory = "category" + case threshold + } + + /// The category this safety setting should be applied to. + public let harmCategory: HarmCategory + + /// The threshold describing what content should be blocked. + public let threshold: BlockThreshold + + /// Initializes a new safety setting with the given category and threshold. + public init(harmCategory: HarmCategory, threshold: BlockThreshold) { + self.harmCategory = harmCategory + self.threshold = threshold + } +} diff --git a/Sources/GoogleGenerativeAI/Endpoints.swift b/Sources/GoogleGenerativeAI/Endpoints.swift deleted file mode 100644 index 3aacc54..0000000 --- a/Sources/GoogleGenerativeAI/Endpoints.swift +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation -import Get -import URLQueryEncoder - -extension API { - static var v1beta3: V1beta3 { - V1beta3(path: "/v1beta3") - } - - struct V1beta3 { - /// Path: `/v1beta3` - let path: String - } -} - -extension API.V1beta3 { - func generateMessage(_ model: String) -> GenerateMessageResource { - GenerateMessageResource(path: "\(path)/\(model):generateMessage") - } - - struct GenerateMessageResource { - /// Path: `/v1beta3/{+model}:generateMessage` - let path: String - - /// Generates a response from the model given an input `MessagePrompt`. - func post(_ body: GenerateMessageRequest? = nil) -> Request { - Request(path: path, method: .post, body: body, id: "generativelanguage.models.generateMessage") - } - } -} - -extension API.V1beta3 { - func generateText(_ model: String) -> GenerateTextResource { - GenerateTextResource(path: "\(path)/\(model):generateText") - } - - struct GenerateTextResource { - /// Path: `/v1beta3/{+model}:generateText` - let path: String - - /// Generates a response from the model given an input `MessagePrompt`. - func post(_ body: GenerateTextRequest? = nil) -> Request { - Request(path: path, method: .post, body: body, id: "generativelanguage.models.generateText") - } - } -} - -extension API.V1beta3 { - func embedText(_ model: String) -> EmbedTextResource { - EmbedTextResource(path: "\(path)/\(model):embedText") - } - - struct EmbedTextResource { - /// Path: `/v1beta3/{+model}:generateText` - let path: String - - /// Generates a response from the model given an input `MessagePrompt`. - func post(_ body: EmbedTextRequest? = nil) -> Request { - Request(path: path, method: .post, body: body, id: "generativelanguage.models.embedText") - } - } -} - - -extension API.V1beta3 { - var models: ModelsResource { - ModelsResource(path: path + "/models") - } - - struct ModelsResource { - /// Path: `/v1beta3/models` - let path: String - - /// Lists models available through the API. - func get(parameters: Parameters? = nil) -> Request { - Request(path: path, method: .get, query: parameters?.asQuery, id: "generativelanguage.models.list") - } - - /// Gets information about a specific Model. - func get(name: String) -> Request { - let modelPath = path.appending("/\(name)") - return Request(path: modelPath, method: .get, id: "generativelanguage.models.get") - } - - struct Parameters { - var pageSize: Int? - var pageToken: String? - - init(pageSize: Int? = nil, pageToken: String? = nil) { - self.pageSize = pageSize - self.pageToken = pageToken - } - - var asQuery: [(String, String?)] { - let encoder = URLQueryEncoder() - encoder.encode(pageSize, forKey: "pageSize") - encoder.encode(pageToken, forKey: "pageToken") - return encoder.items - } - } - } -} diff --git a/Sources/GoogleGenerativeAI/GenerativeLanguage.swift b/Sources/GoogleGenerativeAI/GenerativeLanguage.swift deleted file mode 100644 index 2c6361e..0000000 --- a/Sources/GoogleGenerativeAI/GenerativeLanguage.swift +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation -import Get - -#if canImport(FoundationNetworking) - import FoundationNetworking -#endif - -/// The API client for the PaLM API. -public class GenerativeLanguage { - - private(set) var apiKey: String - - private lazy var apiClient: APIClient = { - let baseURL = URL(string: "https://generativelanguage.googleapis.com") - return APIClient(baseURL: baseURL) { configuration in - configuration.sessionConfiguration.httpAdditionalHeaders = ["x-goog-api-client": "genai-swift/0.3.0"] - configuration.sessionConfiguration.httpAdditionalHeaders = ["x-goog-api-key": apiKey] - } - }() - - private var session = URLSession.shared - - /// Initializes the PalM API client. - /// - /// - Parameter apiKey: The API key to use. - public init(apiKey: String) { - self.apiKey = apiKey - } -} - -extension GenerativeLanguage: GenerativeLanguageProtocol { - public func chat(message: String, context: String? = nil, examples: [Example]? = nil, model: String = "models/chat-bison-001", temperature: Float = 1, candidateCount: Int = 1) async throws -> GenerateMessageResponse { - try await chat(message: message, - history: [Message](), - context: context, - examples: examples, - model: model, - temperature: temperature, - candidateCount: candidateCount) - } - - public func chat(message: String, history: [Message], context: String? = nil, examples: [Example]? = nil, model: String = "models/chat-bison-001", temperature: Float = 1, candidateCount: Int = 1) async throws -> GenerateMessageResponse { - var messages = history - messages.append(Message(content: message, author: "0")) - - let messagePrompt = MessagePrompt(examples: examples, messages: messages, context: context) - let messageRequest = GenerateMessageRequest(prompt: messagePrompt, candidateCount: Int32(candidateCount), temperature: temperature) - - let request = API.v1beta3.generateMessage(model).post(messageRequest) - let response = try await apiClient.send(request) - return response.value - } - - public func generateText(with prompt: String, model: String = "models/text-bison-001", temperature: Float = 1, candidateCount: Int = 1) async throws -> GenerateTextResponse { - let textPrompt = TextPrompt(text: prompt) - let textRequest = GenerateTextRequest(candidateCount: Int32(candidateCount), prompt: textPrompt, temperature: temperature) - let request = API.v1beta3.generateText(model).post(textRequest) - let response = try await apiClient.send(request) - return response.value - } - - public func generateEmbeddings(from text: String, model: String = "models/embedding-gecko-001") async throws -> EmbedTextResponse { - let embedTextRequest = EmbedTextRequest(text: text) - let request = API.v1beta3.embedText(model).post(embedTextRequest) - let response = try await apiClient.send(request) - return response.value - } - - public func listModels() async throws -> ListModelsResponse { - let request = API.v1beta3.models.get() - let response = try await apiClient.send(request) - return response.value - } - - public func getModel(name: String) async throws -> Model { - let request = API.v1beta3.models.get(name: name) - let response = try await apiClient.send(request) - return response.value - } - -} diff --git a/Sources/GoogleGenerativeAI/GenerativeLanguageProtocol.swift b/Sources/GoogleGenerativeAI/GenerativeLanguageProtocol.swift deleted file mode 100644 index 055d449..0000000 --- a/Sources/GoogleGenerativeAI/GenerativeLanguageProtocol.swift +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -public protocol GenerativeLanguageProtocol { - - /// Generates a chat response from the model. - /// - /// - Parameters: - /// - message: The user's message. - /// - context: Text that should be provided to the model first to ground the response. If not empty, - /// this `context` will be given to the model first before the `examples` and `prompt`. When using - /// a `context` be sure to provide it with every request to maintain continuity. This parameter can - /// be a description of your prompt to the model to help provide context and guide the responses. - /// Examples: \"Translate the phrase from English to French.\" or \"Given a statement, classify the - /// sentiment as happy, sad or neutral.\" - /// Anything included in this field will take precedence over message history if the total input - /// size exceeds the model's `input_token_limit` and the input request is truncated. - /// - examples: Examples of what the model should generate. This includes both user input and the - /// response that the model should emulate. These `examples` are treated identically to conversation - /// messages except that they take precedence over the history in `messages`: If the total input - /// size exceeds the model's `input_token_limit` the input will be truncated. Items will be - /// dropped from `messages` before `examples`. - /// - model: The name of the model to use. - /// - temperature: Controls the randomness of the output. Values can range over `[0.0,1.0]`, inclusive. - /// A value closer to `1.0` will produce responses that are more varied, while a value closer to `0.0` - /// will typically result in less surprising responses from the model. - /// - candidateCount: The number of generated response messages to return. This value must be - /// between `[1, 10]`, inclusive. - /// - Returns: A response from the model. - func chat(message: String, context: String?, examples: [Example]?, model: String, temperature: Float, candidateCount: Int) async throws -> GenerateMessageResponse - - /// Generates a chat response from the model. - /// - /// - Parameters: - /// - message: The user's message. - /// - history: A snapshot of the recent conversation history sorted chronologically. Turns - /// alternate between two authors. If the total input size exceeds the model's `input_token_limit` - /// the input will be truncated: The oldest items will be dropped from `messages`. - /// - context: Text that should be provided to the model first to ground the response. If not empty, - /// this `context` will be given to the model first before the `examples` and `prompt`. When using - /// a `context` be sure to provide it with every request to maintain continuity. This parameter can - /// be a description of your prompt to the model to help provide context and guide the responses. - /// Examples: \"Translate the phrase from English to French.\" or \"Given a statement, classify the - /// sentiment as happy, sad or neutral.\" - /// Anything included in this field will take precedence over message history if the total input - /// size exceeds the model's `input_token_limit` and the input request is truncated. - /// - examples: Examples of what the model should generate. This includes both user input and the - /// response that the model should emulate. These `examples` are treated identically to conversation - /// messages except that they take precedence over the history in `messages`: If the total input - /// size exceeds the model's `input_token_limit` the input will be truncated. Items will be - /// dropped from `messages` before `examples`. - /// - model: The name of the model to use. - /// - temperature: Controls the randomness of the output. Values can range over `[0.0,1.0]`, inclusive. - /// A value closer to `1.0` will produce responses that are more varied, while a value closer to `0.0` - /// will typically result in less surprising responses from the model. - /// - candidateCount: The number of generated response messages to return. This value must be - /// between `[1, 10]`, inclusive. - /// - Returns: A response from the model. - func chat(message: String, history: [Message], context: String?, examples: [Example]?, model: String, temperature: Float, candidateCount: Int) async throws -> GenerateMessageResponse - - /// Generates a response from the model given an input message. - /// - Parameters: - /// - prompt: The free-form input text given to the model as a prompt. Given a prompt, the model will - /// generate a number of `TextCompletion` candidates it predicts as the completion of the input text. - /// - model: The name of the model to use. - /// - temperature: Controls the randomness of the output. Note: The default value varies by model, see - /// the `temperature` attribute of the `Model` returned the `getModel` method. Values can range - /// from [0.0,1.0], inclusive. A value closer to 1.0 will produce responses that are more varied - /// and creative, while a value closer to 0.0 will typically result in more straightforward responses - /// from the model. - /// - candidateCount: Number of generated responses to return. This value must be between [1, 8], - /// inclusive. If unset, this will default to 1. - /// - Returns: A response from the model. - func generateText(with prompt: String, model: String, temperature: Float, candidateCount: Int) async throws -> GenerateTextResponse - - /// Generates an embedding from the model given an input message. - /// - Parameters: - /// - text: The free-form input text that the model will turn into an embedding. - /// - model: The name of the model to use. - /// - Returns: A response from the model. - func generateEmbeddings(from text: String, model: String) async throws -> EmbedTextResponse - - /// Lists models available through the API. - /// - Returns: A list of models. - func listModels() async throws -> ListModelsResponse - - /// Gets information about a specific Model. - /// - Parameter name: The model to get information about - /// - Returns: Information about the model. - func getModel(name: String) async throws -> Model -} diff --git a/Sources/OpenAPI/Entities/BatchEmbedTextRequest.swift b/Sources/OpenAPI/Entities/BatchEmbedTextRequest.swift deleted file mode 100644 index cb43df6..0000000 --- a/Sources/OpenAPI/Entities/BatchEmbedTextRequest.swift +++ /dev/null @@ -1,38 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Batch request to get a text embedding from the model. -public struct BatchEmbedTextRequest: Codable { - /// Required. The free-form input texts that the model will turn into an embedding. The current limit is 100 texts, over which an error will be thrown. - public var texts: [String]? - - public init(texts: [String]? = nil) { - self.texts = texts - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.texts = try values.decodeIfPresent([String].self, forKey: "texts") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(texts, forKey: "texts") - } -} diff --git a/Sources/OpenAPI/Entities/BatchEmbedTextResponse.swift b/Sources/OpenAPI/Entities/BatchEmbedTextResponse.swift deleted file mode 100644 index b847bb0..0000000 --- a/Sources/OpenAPI/Entities/BatchEmbedTextResponse.swift +++ /dev/null @@ -1,38 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// The response to a EmbedTextRequest. -public struct BatchEmbedTextResponse: Codable { - /// Output only. The embeddings generated from the input text. - public var embeddings: [Embedding]? - - public init(embeddings: [Embedding]? = nil) { - self.embeddings = embeddings - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.embeddings = try values.decodeIfPresent([Embedding].self, forKey: "embeddings") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(embeddings, forKey: "embeddings") - } -} diff --git a/Sources/OpenAPI/Entities/CitationMetadata.swift b/Sources/OpenAPI/Entities/CitationMetadata.swift deleted file mode 100644 index 12c5b13..0000000 --- a/Sources/OpenAPI/Entities/CitationMetadata.swift +++ /dev/null @@ -1,38 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// A collection of source attributions for a piece of content. -public struct CitationMetadata: Codable { - /// Citations to sources for a specific response. - public var citationSources: [CitationSource]? - - public init(citationSources: [CitationSource]? = nil) { - self.citationSources = citationSources - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.citationSources = try values.decodeIfPresent([CitationSource].self, forKey: "citationSources") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(citationSources, forKey: "citationSources") - } -} diff --git a/Sources/OpenAPI/Entities/CitationSource.swift b/Sources/OpenAPI/Entities/CitationSource.swift deleted file mode 100644 index be60e0a..0000000 --- a/Sources/OpenAPI/Entities/CitationSource.swift +++ /dev/null @@ -1,53 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// A citation to a source for a portion of a specific response. -public struct CitationSource: Codable { - /// Optional. URI that is attributed as a source for a portion of the text. - public var uri: String? - /// Optional. License for the GitHub project that is attributed as a source for segment. License info is required for code citations. - public var license: String? - /// Optional. End of the attributed segment, exclusive. - public var endIndex: Int32? - /// Optional. Start of segment of the response that is attributed to this source. Index indicates the start of the segment, measured in bytes. - public var startIndex: Int32? - - public init(uri: String? = nil, license: String? = nil, endIndex: Int32? = nil, startIndex: Int32? = nil) { - self.uri = uri - self.license = license - self.endIndex = endIndex - self.startIndex = startIndex - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.uri = try values.decodeIfPresent(String.self, forKey: "uri") - self.license = try values.decodeIfPresent(String.self, forKey: "license") - self.endIndex = try values.decodeIfPresent(Int32.self, forKey: "endIndex") - self.startIndex = try values.decodeIfPresent(Int32.self, forKey: "startIndex") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(uri, forKey: "uri") - try values.encodeIfPresent(license, forKey: "license") - try values.encodeIfPresent(endIndex, forKey: "endIndex") - try values.encodeIfPresent(startIndex, forKey: "startIndex") - } -} diff --git a/Sources/OpenAPI/Entities/ContentFilter.swift b/Sources/OpenAPI/Entities/ContentFilter.swift deleted file mode 100644 index bac639d..0000000 --- a/Sources/OpenAPI/Entities/ContentFilter.swift +++ /dev/null @@ -1,50 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Content filtering metadata associated with processing a single request. ContentFilter contains a reason and an optional supporting string. The reason may be unspecified. -public struct ContentFilter: Codable { - /// A string that describes the filtering behavior in more detail. - public var message: String? - /// The reason content was blocked during request processing. - public var reason: Reason? - - /// The reason content was blocked during request processing. - public enum Reason: String, Codable, CaseIterable { - case blockedReasonUnspecified = "BLOCKED_REASON_UNSPECIFIED" - case safety = "SAFETY" - case other = "OTHER" - } - - public init(message: String? = nil, reason: Reason? = nil) { - self.message = message - self.reason = reason - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.message = try values.decodeIfPresent(String.self, forKey: "message") - self.reason = try values.decodeIfPresent(Reason.self, forKey: "reason") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(message, forKey: "message") - try values.encodeIfPresent(reason, forKey: "reason") - } -} diff --git a/Sources/OpenAPI/Entities/CountMessageTokensRequest.swift b/Sources/OpenAPI/Entities/CountMessageTokensRequest.swift deleted file mode 100644 index cd2c411..0000000 --- a/Sources/OpenAPI/Entities/CountMessageTokensRequest.swift +++ /dev/null @@ -1,38 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Counts the number of tokens in the `prompt` sent to a model. Models may tokenize text differently, so each model may return a different `token_count`. -public struct CountMessageTokensRequest: Codable { - /// All of the structured input text passed to the model as a prompt. A `MessagePrompt` contains a structured set of fields that provide context for the conversation, examples of user input/model output message pairs that prime the model to respond in different ways, and the conversation history or list of messages representing the alternating turns of the conversation between the user and the model. - public var prompt: MessagePrompt? - - public init(prompt: MessagePrompt? = nil) { - self.prompt = prompt - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.prompt = try values.decodeIfPresent(MessagePrompt.self, forKey: "prompt") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(prompt, forKey: "prompt") - } -} diff --git a/Sources/OpenAPI/Entities/CountMessageTokensResponse.swift b/Sources/OpenAPI/Entities/CountMessageTokensResponse.swift deleted file mode 100644 index cd56be8..0000000 --- a/Sources/OpenAPI/Entities/CountMessageTokensResponse.swift +++ /dev/null @@ -1,38 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// A response from `CountMessageTokens`. It returns the model's `token_count` for the `prompt`. -public struct CountMessageTokensResponse: Codable { - /// The number of tokens that the `model` tokenizes the `prompt` into. Always non-negative. - public var tokenCount: Int32? - - public init(tokenCount: Int32? = nil) { - self.tokenCount = tokenCount - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.tokenCount = try values.decodeIfPresent(Int32.self, forKey: "tokenCount") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(tokenCount, forKey: "tokenCount") - } -} diff --git a/Sources/OpenAPI/Entities/CountTextTokensRequest.swift b/Sources/OpenAPI/Entities/CountTextTokensRequest.swift deleted file mode 100644 index 82804bb..0000000 --- a/Sources/OpenAPI/Entities/CountTextTokensRequest.swift +++ /dev/null @@ -1,38 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Counts the number of tokens in the `prompt` sent to a model. Models may tokenize text differently, so each model may return a different `token_count`. -public struct CountTextTokensRequest: Codable { - /// Text given to the model as a prompt. The Model will use this TextPrompt to Generate a text completion. - public var prompt: TextPrompt? - - public init(prompt: TextPrompt? = nil) { - self.prompt = prompt - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.prompt = try values.decodeIfPresent(TextPrompt.self, forKey: "prompt") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(prompt, forKey: "prompt") - } -} diff --git a/Sources/OpenAPI/Entities/CountTextTokensResponse.swift b/Sources/OpenAPI/Entities/CountTextTokensResponse.swift deleted file mode 100644 index 0bcb7ae..0000000 --- a/Sources/OpenAPI/Entities/CountTextTokensResponse.swift +++ /dev/null @@ -1,38 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// A response from `CountTextTokens`. It returns the model's `token_count` for the `prompt`. -public struct CountTextTokensResponse: Codable { - /// The number of tokens that the `model` tokenizes the `prompt` into. Always non-negative. - public var tokenCount: Int32? - - public init(tokenCount: Int32? = nil) { - self.tokenCount = tokenCount - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.tokenCount = try values.decodeIfPresent(Int32.self, forKey: "tokenCount") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(tokenCount, forKey: "tokenCount") - } -} diff --git a/Sources/OpenAPI/Entities/CreateTunedModelMetadata.swift b/Sources/OpenAPI/Entities/CreateTunedModelMetadata.swift deleted file mode 100644 index 6fd7096..0000000 --- a/Sources/OpenAPI/Entities/CreateTunedModelMetadata.swift +++ /dev/null @@ -1,58 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Metadata about the state and progress of creating a tuned model returned from the long-running operation -public struct CreateTunedModelMetadata: Codable { - /// Name of the tuned model associated with the tuning operation. - public var tunedModel: String? - /// The number of steps completed. - public var completedSteps: Int32? - /// The completed percentage for the tuning operation. - public var completedPercent: Float? - /// The total number of tuning steps. - public var totalSteps: Int32? - /// Metrics collected during tuning. - public var snapshots: [TuningSnapshot]? - - public init(tunedModel: String? = nil, completedSteps: Int32? = nil, completedPercent: Float? = nil, totalSteps: Int32? = nil, snapshots: [TuningSnapshot]? = nil) { - self.tunedModel = tunedModel - self.completedSteps = completedSteps - self.completedPercent = completedPercent - self.totalSteps = totalSteps - self.snapshots = snapshots - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.tunedModel = try values.decodeIfPresent(String.self, forKey: "tunedModel") - self.completedSteps = try values.decodeIfPresent(Int32.self, forKey: "completedSteps") - self.completedPercent = try values.decodeIfPresent(Float.self, forKey: "completedPercent") - self.totalSteps = try values.decodeIfPresent(Int32.self, forKey: "totalSteps") - self.snapshots = try values.decodeIfPresent([TuningSnapshot].self, forKey: "snapshots") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(tunedModel, forKey: "tunedModel") - try values.encodeIfPresent(completedSteps, forKey: "completedSteps") - try values.encodeIfPresent(completedPercent, forKey: "completedPercent") - try values.encodeIfPresent(totalSteps, forKey: "totalSteps") - try values.encodeIfPresent(snapshots, forKey: "snapshots") - } -} diff --git a/Sources/OpenAPI/Entities/Dataset.swift b/Sources/OpenAPI/Entities/Dataset.swift deleted file mode 100644 index 397fbc5..0000000 --- a/Sources/OpenAPI/Entities/Dataset.swift +++ /dev/null @@ -1,38 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Dataset for training or validation. -public struct Dataset: Codable { - /// A set of tuning examples. Can be training or validatation data. - public var examples: TuningExamples? - - public init(examples: TuningExamples? = nil) { - self.examples = examples - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.examples = try values.decodeIfPresent(TuningExamples.self, forKey: "examples") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(examples, forKey: "examples") - } -} diff --git a/Sources/OpenAPI/Entities/EmbedTextRequest.swift b/Sources/OpenAPI/Entities/EmbedTextRequest.swift deleted file mode 100644 index d1ac34a..0000000 --- a/Sources/OpenAPI/Entities/EmbedTextRequest.swift +++ /dev/null @@ -1,38 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Request to get a text embedding from the model. -public struct EmbedTextRequest: Codable { - /// Required. The free-form input text that the model will turn into an embedding. - public var text: String? - - public init(text: String? = nil) { - self.text = text - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.text = try values.decodeIfPresent(String.self, forKey: "text") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(text, forKey: "text") - } -} diff --git a/Sources/OpenAPI/Entities/EmbedTextResponse.swift b/Sources/OpenAPI/Entities/EmbedTextResponse.swift deleted file mode 100644 index 5e79ad7..0000000 --- a/Sources/OpenAPI/Entities/EmbedTextResponse.swift +++ /dev/null @@ -1,38 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// The response to a EmbedTextRequest. -public struct EmbedTextResponse: Codable { - /// A list of floats representing the embedding. - public var embedding: Embedding? - - public init(embedding: Embedding? = nil) { - self.embedding = embedding - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.embedding = try values.decodeIfPresent(Embedding.self, forKey: "embedding") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(embedding, forKey: "embedding") - } -} diff --git a/Sources/OpenAPI/Entities/Embedding.swift b/Sources/OpenAPI/Entities/Embedding.swift deleted file mode 100644 index 116cdef..0000000 --- a/Sources/OpenAPI/Entities/Embedding.swift +++ /dev/null @@ -1,38 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// A list of floats representing the embedding. -public struct Embedding: Codable { - /// The embedding values. - public var value: [Float]? - - public init(value: [Float]? = nil) { - self.value = value - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.value = try values.decodeIfPresent([Float].self, forKey: "value") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(value, forKey: "value") - } -} diff --git a/Sources/OpenAPI/Entities/Example.swift b/Sources/OpenAPI/Entities/Example.swift deleted file mode 100644 index 7e7c067..0000000 --- a/Sources/OpenAPI/Entities/Example.swift +++ /dev/null @@ -1,43 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// An input/output example used to instruct the Model. It demonstrates how the model should respond or format its response. -public struct Example: Codable { - /// The base unit of structured text. A `Message` includes an `author` and the `content` of the `Message`. The `author` is used to tag messages when they are fed to the model as text. - public var input: Message? - /// The base unit of structured text. A `Message` includes an `author` and the `content` of the `Message`. The `author` is used to tag messages when they are fed to the model as text. - public var output: Message? - - public init(input: Message? = nil, output: Message? = nil) { - self.input = input - self.output = output - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.input = try values.decodeIfPresent(Message.self, forKey: "input") - self.output = try values.decodeIfPresent(Message.self, forKey: "output") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(input, forKey: "input") - try values.encodeIfPresent(output, forKey: "output") - } -} diff --git a/Sources/OpenAPI/Entities/GenerateMessageRequest.swift b/Sources/OpenAPI/Entities/GenerateMessageRequest.swift deleted file mode 100644 index 644d5ee..0000000 --- a/Sources/OpenAPI/Entities/GenerateMessageRequest.swift +++ /dev/null @@ -1,58 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Request to generate a message response from the model. -public struct GenerateMessageRequest: Codable { - /// All of the structured input text passed to the model as a prompt. A `MessagePrompt` contains a structured set of fields that provide context for the conversation, examples of user input/model output message pairs that prime the model to respond in different ways, and the conversation history or list of messages representing the alternating turns of the conversation between the user and the model. - public var prompt: MessagePrompt? - /// Optional. The maximum cumulative probability of tokens to consider when sampling. The model uses combined Top-k and nucleus sampling. Nucleus sampling considers the smallest set of tokens whose probability sum is at least `top_p`. - public var topP: Float? - /// Optional. The number of generated response messages to return. This value must be between `[1, 8]`, inclusive. If unset, this will default to `1`. - public var candidateCount: Int32? - /// Optional. Controls the randomness of the output. Values can range over `[0.0,1.0]`, inclusive. A value closer to `1.0` will produce responses that are more varied, while a value closer to `0.0` will typically result in less surprising responses from the model. - public var temperature: Float? - /// Optional. The maximum number of tokens to consider when sampling. The model uses combined Top-k and nucleus sampling. Top-k sampling considers the set of `top_k` most probable tokens. - public var topK: Int32? - - public init(prompt: MessagePrompt? = nil, topP: Float? = nil, candidateCount: Int32? = nil, temperature: Float? = nil, topK: Int32? = nil) { - self.prompt = prompt - self.topP = topP - self.candidateCount = candidateCount - self.temperature = temperature - self.topK = topK - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.prompt = try values.decodeIfPresent(MessagePrompt.self, forKey: "prompt") - self.topP = try values.decodeIfPresent(Float.self, forKey: "topP") - self.candidateCount = try values.decodeIfPresent(Int32.self, forKey: "candidateCount") - self.temperature = try values.decodeIfPresent(Float.self, forKey: "temperature") - self.topK = try values.decodeIfPresent(Int32.self, forKey: "topK") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(prompt, forKey: "prompt") - try values.encodeIfPresent(topP, forKey: "topP") - try values.encodeIfPresent(candidateCount, forKey: "candidateCount") - try values.encodeIfPresent(temperature, forKey: "temperature") - try values.encodeIfPresent(topK, forKey: "topK") - } -} diff --git a/Sources/OpenAPI/Entities/GenerateMessageResponse.swift b/Sources/OpenAPI/Entities/GenerateMessageResponse.swift deleted file mode 100644 index 231fc0d..0000000 --- a/Sources/OpenAPI/Entities/GenerateMessageResponse.swift +++ /dev/null @@ -1,48 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// The response from the model. This includes candidate messages and conversation history in the form of chronologically-ordered messages. -public struct GenerateMessageResponse: Codable { - /// The conversation history used by the model. - public var messages: [Message]? - /// Candidate response messages from the model. - public var candidates: [Message]? - /// A set of content filtering metadata for the prompt and response text. This indicates which `SafetyCategory`(s) blocked a candidate from this response, the lowest `HarmProbability` that triggered a block, and the HarmThreshold setting for that category. - public var filters: [ContentFilter]? - - public init(messages: [Message]? = nil, candidates: [Message]? = nil, filters: [ContentFilter]? = nil) { - self.messages = messages - self.candidates = candidates - self.filters = filters - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.messages = try values.decodeIfPresent([Message].self, forKey: "messages") - self.candidates = try values.decodeIfPresent([Message].self, forKey: "candidates") - self.filters = try values.decodeIfPresent([ContentFilter].self, forKey: "filters") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(messages, forKey: "messages") - try values.encodeIfPresent(candidates, forKey: "candidates") - try values.encodeIfPresent(filters, forKey: "filters") - } -} diff --git a/Sources/OpenAPI/Entities/GenerateTextRequest.swift b/Sources/OpenAPI/Entities/GenerateTextRequest.swift deleted file mode 100644 index 17cff57..0000000 --- a/Sources/OpenAPI/Entities/GenerateTextRequest.swift +++ /dev/null @@ -1,73 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Request to generate a text completion response from the model. -public struct GenerateTextRequest: Codable { - /// A list of unique `SafetySetting` instances for blocking unsafe content. that will be enforced on the `GenerateTextRequest.prompt` and `GenerateTextResponse.candidates`. There should not be more than one setting for each `SafetyCategory` type. The API will block any prompts and responses that fail to meet the thresholds set by these settings. This list overrides the default settings for each `SafetyCategory` specified in the safety_settings. If there is no `SafetySetting` for a given `SafetyCategory` provided in the list, the API will use the default safety setting for that category. - public var safetySettings: [SafetySetting]? - /// Optional. Number of generated responses to return. This value must be between [1, 8], inclusive. If unset, this will default to 1. - public var candidateCount: Int32? - /// The set of character sequences (up to 5) that will stop output generation. If specified, the API will stop at the first appearance of a stop sequence. The stop sequence will not be included as part of the response. - public var stopSequences: [String]? - /// Optional. The maximum number of tokens to consider when sampling. The model uses combined Top-k and nucleus sampling. Top-k sampling considers the set of `top_k` most probable tokens. Defaults to 40. Note: The default value varies by model, see the `Model.top_k` attribute of the `Model` returned the `getModel` function. - public var topK: Int32? - /// Text given to the model as a prompt. The Model will use this TextPrompt to Generate a text completion. - public var prompt: TextPrompt? - /// Optional. The maximum number of tokens to include in a candidate. If unset, this will default to output_token_limit specified in the `Model` specification. - public var maxOutputTokens: Int32? - /// Optional. The maximum cumulative probability of tokens to consider when sampling. The model uses combined Top-k and nucleus sampling. Tokens are sorted based on their assigned probabilities so that only the most likely tokens are considered. Top-k sampling directly limits the maximum number of tokens to consider, while Nucleus sampling limits number of tokens based on the cumulative probability. Note: The default value varies by model, see the `Model.top_p` attribute of the `Model` returned the `getModel` function. - public var topP: Float? - /// Optional. Controls the randomness of the output. Note: The default value varies by model, see the `Model.temperature` attribute of the `Model` returned the `getModel` function. Values can range from [0.0,1.0], inclusive. A value closer to 1.0 will produce responses that are more varied and creative, while a value closer to 0.0 will typically result in more straightforward responses from the model. - public var temperature: Float? - - public init(safetySettings: [SafetySetting]? = nil, candidateCount: Int32? = nil, stopSequences: [String]? = nil, topK: Int32? = nil, prompt: TextPrompt? = nil, maxOutputTokens: Int32? = nil, topP: Float? = nil, temperature: Float? = nil) { - self.safetySettings = safetySettings - self.candidateCount = candidateCount - self.stopSequences = stopSequences - self.topK = topK - self.prompt = prompt - self.maxOutputTokens = maxOutputTokens - self.topP = topP - self.temperature = temperature - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.safetySettings = try values.decodeIfPresent([SafetySetting].self, forKey: "safetySettings") - self.candidateCount = try values.decodeIfPresent(Int32.self, forKey: "candidateCount") - self.stopSequences = try values.decodeIfPresent([String].self, forKey: "stopSequences") - self.topK = try values.decodeIfPresent(Int32.self, forKey: "topK") - self.prompt = try values.decodeIfPresent(TextPrompt.self, forKey: "prompt") - self.maxOutputTokens = try values.decodeIfPresent(Int32.self, forKey: "maxOutputTokens") - self.topP = try values.decodeIfPresent(Float.self, forKey: "topP") - self.temperature = try values.decodeIfPresent(Float.self, forKey: "temperature") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(safetySettings, forKey: "safetySettings") - try values.encodeIfPresent(candidateCount, forKey: "candidateCount") - try values.encodeIfPresent(stopSequences, forKey: "stopSequences") - try values.encodeIfPresent(topK, forKey: "topK") - try values.encodeIfPresent(prompt, forKey: "prompt") - try values.encodeIfPresent(maxOutputTokens, forKey: "maxOutputTokens") - try values.encodeIfPresent(topP, forKey: "topP") - try values.encodeIfPresent(temperature, forKey: "temperature") - } -} diff --git a/Sources/OpenAPI/Entities/GenerateTextResponse.swift b/Sources/OpenAPI/Entities/GenerateTextResponse.swift deleted file mode 100644 index 5c91f5f..0000000 --- a/Sources/OpenAPI/Entities/GenerateTextResponse.swift +++ /dev/null @@ -1,48 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// The response from the model, including candidate completions. -public struct GenerateTextResponse: Codable { - /// Candidate responses from the model. - public var candidates: [TextCompletion]? - /// Returns any safety feedback related to content filtering. - public var safetyFeedback: [SafetyFeedback]? - /// A set of content filtering metadata for the prompt and response text. This indicates which `SafetyCategory`(s) blocked a candidate from this response, the lowest `HarmProbability` that triggered a block, and the HarmThreshold setting for that category. This indicates the smallest change to the `SafetySettings` that would be necessary to unblock at least 1 response. The blocking is configured by the `SafetySettings` in the request (or the default `SafetySettings` of the API). - public var filters: [ContentFilter]? - - public init(candidates: [TextCompletion]? = nil, safetyFeedback: [SafetyFeedback]? = nil, filters: [ContentFilter]? = nil) { - self.candidates = candidates - self.safetyFeedback = safetyFeedback - self.filters = filters - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.candidates = try values.decodeIfPresent([TextCompletion].self, forKey: "candidates") - self.safetyFeedback = try values.decodeIfPresent([SafetyFeedback].self, forKey: "safetyFeedback") - self.filters = try values.decodeIfPresent([ContentFilter].self, forKey: "filters") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(candidates, forKey: "candidates") - try values.encodeIfPresent(safetyFeedback, forKey: "safetyFeedback") - try values.encodeIfPresent(filters, forKey: "filters") - } -} diff --git a/Sources/OpenAPI/Entities/Hyperparameters.swift b/Sources/OpenAPI/Entities/Hyperparameters.swift deleted file mode 100644 index bd4c300..0000000 --- a/Sources/OpenAPI/Entities/Hyperparameters.swift +++ /dev/null @@ -1,48 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Hyperparameters controlling the tuning process. -public struct Hyperparameters: Codable { - /// Immutable. The batch size hyperparameter for tuning. If not set, a default of 16 or 64 will be used based on the number of training examples. - public var batchSize: Int32? - /// Immutable. The learning rate hyperparameter for tuning. If not set, a default of 0.0002 or 0.002 will be calculated based on the number of training examples. - public var learningRate: Float? - /// Immutable. The number of training epochs. An epoch is one pass through the training data. If not set, a default of 10 will be used. - public var epochCount: Int32? - - public init(batchSize: Int32? = nil, learningRate: Float? = nil, epochCount: Int32? = nil) { - self.batchSize = batchSize - self.learningRate = learningRate - self.epochCount = epochCount - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.batchSize = try values.decodeIfPresent(Int32.self, forKey: "batchSize") - self.learningRate = try values.decodeIfPresent(Float.self, forKey: "learningRate") - self.epochCount = try values.decodeIfPresent(Int32.self, forKey: "epochCount") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(batchSize, forKey: "batchSize") - try values.encodeIfPresent(learningRate, forKey: "learningRate") - try values.encodeIfPresent(epochCount, forKey: "epochCount") - } -} diff --git a/Sources/OpenAPI/Entities/ListModelsResponse.swift b/Sources/OpenAPI/Entities/ListModelsResponse.swift deleted file mode 100644 index 084adeb..0000000 --- a/Sources/OpenAPI/Entities/ListModelsResponse.swift +++ /dev/null @@ -1,43 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Response from `ListModel` containing a paginated list of Models. -public struct ListModelsResponse: Codable { - /// A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no more pages. - public var nextPageToken: String? - /// The returned Models. - public var models: [Model]? - - public init(nextPageToken: String? = nil, models: [Model]? = nil) { - self.nextPageToken = nextPageToken - self.models = models - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.nextPageToken = try values.decodeIfPresent(String.self, forKey: "nextPageToken") - self.models = try values.decodeIfPresent([Model].self, forKey: "models") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(nextPageToken, forKey: "nextPageToken") - try values.encodeIfPresent(models, forKey: "models") - } -} diff --git a/Sources/OpenAPI/Entities/ListPermissionsResponse.swift b/Sources/OpenAPI/Entities/ListPermissionsResponse.swift deleted file mode 100644 index 92465ed..0000000 --- a/Sources/OpenAPI/Entities/ListPermissionsResponse.swift +++ /dev/null @@ -1,43 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Response from `ListPermissions` containing a paginated list of permissions. -public struct ListPermissionsResponse: Codable { - /// A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no more pages. - public var nextPageToken: String? - /// Returned permissions. - public var permissions: [Permission]? - - public init(nextPageToken: String? = nil, permissions: [Permission]? = nil) { - self.nextPageToken = nextPageToken - self.permissions = permissions - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.nextPageToken = try values.decodeIfPresent(String.self, forKey: "nextPageToken") - self.permissions = try values.decodeIfPresent([Permission].self, forKey: "permissions") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(nextPageToken, forKey: "nextPageToken") - try values.encodeIfPresent(permissions, forKey: "permissions") - } -} diff --git a/Sources/OpenAPI/Entities/ListTunedModelsResponse.swift b/Sources/OpenAPI/Entities/ListTunedModelsResponse.swift deleted file mode 100644 index 1fb0b5c..0000000 --- a/Sources/OpenAPI/Entities/ListTunedModelsResponse.swift +++ /dev/null @@ -1,43 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Response from `ListTunedModels` containing a paginated list of Models. -public struct ListTunedModelsResponse: Codable { - /// A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no more pages. - public var nextPageToken: String? - /// The returned Models. - public var tunedModels: [TunedModel]? - - public init(nextPageToken: String? = nil, tunedModels: [TunedModel]? = nil) { - self.nextPageToken = nextPageToken - self.tunedModels = tunedModels - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.nextPageToken = try values.decodeIfPresent(String.self, forKey: "nextPageToken") - self.tunedModels = try values.decodeIfPresent([TunedModel].self, forKey: "tunedModels") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(nextPageToken, forKey: "nextPageToken") - try values.encodeIfPresent(tunedModels, forKey: "tunedModels") - } -} diff --git a/Sources/OpenAPI/Entities/Message.swift b/Sources/OpenAPI/Entities/Message.swift deleted file mode 100644 index 8848044..0000000 --- a/Sources/OpenAPI/Entities/Message.swift +++ /dev/null @@ -1,48 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// The base unit of structured text. A `Message` includes an `author` and the `content` of the `Message`. The `author` is used to tag messages when they are fed to the model as text. -public struct Message: Codable { - /// A collection of source attributions for a piece of content. - public var citationMetadata: CitationMetadata? - /// Required. The text content of the structured `Message`. - public var content: String? - /// Optional. The author of this Message. This serves as a key for tagging the content of this Message when it is fed to the model as text. The author can be any alphanumeric string. - public var author: String? - - public init(citationMetadata: CitationMetadata? = nil, content: String? = nil, author: String? = nil) { - self.citationMetadata = citationMetadata - self.content = content - self.author = author - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.citationMetadata = try values.decodeIfPresent(CitationMetadata.self, forKey: "citationMetadata") - self.content = try values.decodeIfPresent(String.self, forKey: "content") - self.author = try values.decodeIfPresent(String.self, forKey: "author") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(citationMetadata, forKey: "citationMetadata") - try values.encodeIfPresent(content, forKey: "content") - try values.encodeIfPresent(author, forKey: "author") - } -} diff --git a/Sources/OpenAPI/Entities/MessagePrompt.swift b/Sources/OpenAPI/Entities/MessagePrompt.swift deleted file mode 100644 index 91c61b1..0000000 --- a/Sources/OpenAPI/Entities/MessagePrompt.swift +++ /dev/null @@ -1,48 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// All of the structured input text passed to the model as a prompt. A `MessagePrompt` contains a structured set of fields that provide context for the conversation, examples of user input/model output message pairs that prime the model to respond in different ways, and the conversation history or list of messages representing the alternating turns of the conversation between the user and the model. -public struct MessagePrompt: Codable { - /// Optional. Examples of what the model should generate. This includes both user input and the response that the model should emulate. These `examples` are treated identically to conversation messages except that they take precedence over the history in `messages`: If the total input size exceeds the model's `input_token_limit` the input will be truncated. Items will be dropped from `messages` before `examples`. - public var examples: [Example]? - /// Required. A snapshot of the recent conversation history sorted chronologically. Turns alternate between two authors. If the total input size exceeds the model's `input_token_limit` the input will be truncated: The oldest items will be dropped from `messages`. - public var messages: [Message]? - /// Optional. Text that should be provided to the model first to ground the response. If not empty, this `context` will be given to the model first before the `examples` and `messages`. When using a `context` be sure to provide it with every request to maintain continuity. This field can be a description of your prompt to the model to help provide context and guide the responses. Examples: "Translate the phrase from English to French." or "Given a statement, classify the sentiment as happy, sad or neutral." Anything included in this field will take precedence over message history if the total input size exceeds the model's `input_token_limit` and the input request is truncated. - public var context: String? - - public init(examples: [Example]? = nil, messages: [Message]? = nil, context: String? = nil) { - self.examples = examples - self.messages = messages - self.context = context - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.examples = try values.decodeIfPresent([Example].self, forKey: "examples") - self.messages = try values.decodeIfPresent([Message].self, forKey: "messages") - self.context = try values.decodeIfPresent(String.self, forKey: "context") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(examples, forKey: "examples") - try values.encodeIfPresent(messages, forKey: "messages") - try values.encodeIfPresent(context, forKey: "context") - } -} diff --git a/Sources/OpenAPI/Entities/Model.swift b/Sources/OpenAPI/Entities/Model.swift deleted file mode 100644 index c74cbee..0000000 --- a/Sources/OpenAPI/Entities/Model.swift +++ /dev/null @@ -1,88 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Information about a Generative Language Model. -public struct Model: Codable { - /// Maximum number of input tokens allowed for this model. - public var inputTokenLimit: Int32? - /// For Top-k sampling. Top-k sampling considers the set of `top_k` most probable tokens. This value specifies default to be used by the backend while making the call to the model. - public var topK: Int32? - /// For Nucleus sampling. Nucleus sampling considers the smallest set of tokens whose probability sum is at least `top_p`. This value specifies default to be used by the backend while making the call to the model. - public var topP: Float? - /// Required. The resource name of the `Model`. Format: `models/{model}` with a `{model}` naming convention of: * "{base_model_id}-{version}" Examples: * `models/chat-bison-001` - public var name: String? - /// Maximum number of output tokens available for this model. - public var outputTokenLimit: Int32? - /// The human-readable name of the model. E.g. "Chat Bison". The name can be up to 128 characters long and can consist of any UTF-8 characters. - public var displayName: String? - /// Required. The name of the base model, pass this to the generation request. Examples: * `chat-bison` - public var baseModelID: String? - /// Required. The version number of the model. This represents the major version - public var version: String? - /// The model's supported generation methods. The method names are defined as Pascal case strings, such as `generateMessage` which correspond to API methods. - public var supportedGenerationMethods: [String]? - /// A short description of the model. - public var description: String? - /// Controls the randomness of the output. Values can range over `[0.0,1.0]`, inclusive. A value closer to `1.0` will produce responses that are more varied, while a value closer to `0.0` will typically result in less surprising responses from the model. This value specifies default to be used by the backend while making the call to the model. - public var temperature: Float? - - public init(inputTokenLimit: Int32? = nil, topK: Int32? = nil, topP: Float? = nil, name: String? = nil, outputTokenLimit: Int32? = nil, displayName: String? = nil, baseModelID: String? = nil, version: String? = nil, supportedGenerationMethods: [String]? = nil, description: String? = nil, temperature: Float? = nil) { - self.inputTokenLimit = inputTokenLimit - self.topK = topK - self.topP = topP - self.name = name - self.outputTokenLimit = outputTokenLimit - self.displayName = displayName - self.baseModelID = baseModelID - self.version = version - self.supportedGenerationMethods = supportedGenerationMethods - self.description = description - self.temperature = temperature - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.inputTokenLimit = try values.decodeIfPresent(Int32.self, forKey: "inputTokenLimit") - self.topK = try values.decodeIfPresent(Int32.self, forKey: "topK") - self.topP = try values.decodeIfPresent(Float.self, forKey: "topP") - self.name = try values.decodeIfPresent(String.self, forKey: "name") - self.outputTokenLimit = try values.decodeIfPresent(Int32.self, forKey: "outputTokenLimit") - self.displayName = try values.decodeIfPresent(String.self, forKey: "displayName") - self.baseModelID = try values.decodeIfPresent(String.self, forKey: "baseModelId") - self.version = try values.decodeIfPresent(String.self, forKey: "version") - self.supportedGenerationMethods = try values.decodeIfPresent([String].self, forKey: "supportedGenerationMethods") - self.description = try values.decodeIfPresent(String.self, forKey: "description") - self.temperature = try values.decodeIfPresent(Float.self, forKey: "temperature") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(inputTokenLimit, forKey: "inputTokenLimit") - try values.encodeIfPresent(topK, forKey: "topK") - try values.encodeIfPresent(topP, forKey: "topP") - try values.encodeIfPresent(name, forKey: "name") - try values.encodeIfPresent(outputTokenLimit, forKey: "outputTokenLimit") - try values.encodeIfPresent(displayName, forKey: "displayName") - try values.encodeIfPresent(baseModelID, forKey: "baseModelId") - try values.encodeIfPresent(version, forKey: "version") - try values.encodeIfPresent(supportedGenerationMethods, forKey: "supportedGenerationMethods") - try values.encodeIfPresent(description, forKey: "description") - try values.encodeIfPresent(temperature, forKey: "temperature") - } -} diff --git a/Sources/OpenAPI/Entities/Operation.swift b/Sources/OpenAPI/Entities/Operation.swift deleted file mode 100644 index f6a0806..0000000 --- a/Sources/OpenAPI/Entities/Operation.swift +++ /dev/null @@ -1,58 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// This resource represents a long-running operation that is the result of a network API call. -public struct Operation: Codable { - /// If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. - public var isDone: Bool? - /// The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`. - public var name: String? - /// The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`. - public var response: [String: AnyJSON]? - /// The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). - public var error: Status? - /// Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. - public var metadata: [String: AnyJSON]? - - public init(isDone: Bool? = nil, name: String? = nil, response: [String: AnyJSON]? = nil, error: Status? = nil, metadata: [String: AnyJSON]? = nil) { - self.isDone = isDone - self.name = name - self.response = response - self.error = error - self.metadata = metadata - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.isDone = try values.decodeIfPresent(Bool.self, forKey: "done") - self.name = try values.decodeIfPresent(String.self, forKey: "name") - self.response = try values.decodeIfPresent([String: AnyJSON].self, forKey: "response") - self.error = try values.decodeIfPresent(Status.self, forKey: "error") - self.metadata = try values.decodeIfPresent([String: AnyJSON].self, forKey: "metadata") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(isDone, forKey: "done") - try values.encodeIfPresent(name, forKey: "name") - try values.encodeIfPresent(response, forKey: "response") - try values.encodeIfPresent(error, forKey: "error") - try values.encodeIfPresent(metadata, forKey: "metadata") - } -} diff --git a/Sources/OpenAPI/Entities/Permission.swift b/Sources/OpenAPI/Entities/Permission.swift deleted file mode 100644 index b84a2ea..0000000 --- a/Sources/OpenAPI/Entities/Permission.swift +++ /dev/null @@ -1,69 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Permission resource grants user, group or the rest of the world access to the PaLM API resource (e.g. a tuned model, file). A role is a collection of permitted operations that allows users to perform specific actions on PaLM API resources. To make them available to users, groups, or service accounts, you assign roles. When you assign a role, you grant permissions that the role contains. There are three concentric roles. Each role is a superset of the previous role's permitted operations: - reader can use the resource (e.g. tuned model) for inference - writer has reader's permissions and additionally can edit and share - owner has writer's permissions and additionally can delete -public struct Permission: Codable { - /// Output only. The permission name. A unique name will be generated on create. Example: tunedModels/{tuned_model}permssions/{permission} Output only. - public var name: String? - /// Optional. Immutable. The email address of the user of group which this permission refers. Field is not set when permission's grantee type is EVERYONE. - public var emailAddress: String? - /// Required. Immutable. The type of the grantee. - public var granteeType: GranteeType? - /// Required. The role granted by this permission. - public var role: Role? - - /// Required. Immutable. The type of the grantee. - public enum GranteeType: String, Codable, CaseIterable { - case granteeTypeUnspecified = "GRANTEE_TYPE_UNSPECIFIED" - case user = "USER" - case group = "GROUP" - case everyone = "EVERYONE" - } - - /// Required. The role granted by this permission. - public enum Role: String, Codable, CaseIterable { - case roleUnspecified = "ROLE_UNSPECIFIED" - case owner = "OWNER" - case writer = "WRITER" - case reader = "READER" - } - - public init(name: String? = nil, emailAddress: String? = nil, granteeType: GranteeType? = nil, role: Role? = nil) { - self.name = name - self.emailAddress = emailAddress - self.granteeType = granteeType - self.role = role - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.name = try values.decodeIfPresent(String.self, forKey: "name") - self.emailAddress = try values.decodeIfPresent(String.self, forKey: "emailAddress") - self.granteeType = try values.decodeIfPresent(GranteeType.self, forKey: "granteeType") - self.role = try values.decodeIfPresent(Role.self, forKey: "role") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(name, forKey: "name") - try values.encodeIfPresent(emailAddress, forKey: "emailAddress") - try values.encodeIfPresent(granteeType, forKey: "granteeType") - try values.encodeIfPresent(role, forKey: "role") - } -} diff --git a/Sources/OpenAPI/Entities/SafetyFeedback.swift b/Sources/OpenAPI/Entities/SafetyFeedback.swift deleted file mode 100644 index 9a890f1..0000000 --- a/Sources/OpenAPI/Entities/SafetyFeedback.swift +++ /dev/null @@ -1,43 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Safety feedback for an entire request. This field is populated if content in the input and/or response is blocked due to safety settings. SafetyFeedback may not exist for every HarmCategory. Each SafetyFeedback will return the safety settings used by the request as well as the lowest HarmProbability that should be allowed in order to return a result. -public struct SafetyFeedback: Codable { - /// Safety rating for a piece of content. The safety rating contains the category of harm and the harm probability level in that category for a piece of content. Content is classified for safety across a number of harm categories and the probability of the harm classification is included here. - public var rating: SafetyRating? - /// Safety setting, affecting the safety-blocking behavior. Passing a safety setting for a category changes the allowed proability that content is blocked. - public var setting: SafetySetting? - - public init(rating: SafetyRating? = nil, setting: SafetySetting? = nil) { - self.rating = rating - self.setting = setting - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.rating = try values.decodeIfPresent(SafetyRating.self, forKey: "rating") - self.setting = try values.decodeIfPresent(SafetySetting.self, forKey: "setting") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(rating, forKey: "rating") - try values.encodeIfPresent(setting, forKey: "setting") - } -} diff --git a/Sources/OpenAPI/Entities/SafetyRating.swift b/Sources/OpenAPI/Entities/SafetyRating.swift deleted file mode 100644 index e856bac..0000000 --- a/Sources/OpenAPI/Entities/SafetyRating.swift +++ /dev/null @@ -1,63 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Safety rating for a piece of content. The safety rating contains the category of harm and the harm probability level in that category for a piece of content. Content is classified for safety across a number of harm categories and the probability of the harm classification is included here. -public struct SafetyRating: Codable { - /// Required. The category for this rating. - public var category: Category? - /// Required. The probability of harm for this content. - public var probability: Probability? - - /// Required. The category for this rating. - public enum Category: String, Codable, CaseIterable { - case harmCategoryUnspecified = "HARM_CATEGORY_UNSPECIFIED" - case harmCategoryDerogatory = "HARM_CATEGORY_DEROGATORY" - case harmCategoryToxicity = "HARM_CATEGORY_TOXICITY" - case harmCategoryViolence = "HARM_CATEGORY_VIOLENCE" - case harmCategorySexual = "HARM_CATEGORY_SEXUAL" - case harmCategoryMedical = "HARM_CATEGORY_MEDICAL" - case harmCategoryDangerous = "HARM_CATEGORY_DANGEROUS" - } - - /// Required. The probability of harm for this content. - public enum Probability: String, Codable, CaseIterable { - case harmProbabilityUnspecified = "HARM_PROBABILITY_UNSPECIFIED" - case negligible = "NEGLIGIBLE" - case low = "LOW" - case medium = "MEDIUM" - case high = "HIGH" - } - - public init(category: Category? = nil, probability: Probability? = nil) { - self.category = category - self.probability = probability - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.category = try values.decodeIfPresent(Category.self, forKey: "category") - self.probability = try values.decodeIfPresent(Probability.self, forKey: "probability") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(category, forKey: "category") - try values.encodeIfPresent(probability, forKey: "probability") - } -} diff --git a/Sources/OpenAPI/Entities/SafetySetting.swift b/Sources/OpenAPI/Entities/SafetySetting.swift deleted file mode 100644 index 7a97736..0000000 --- a/Sources/OpenAPI/Entities/SafetySetting.swift +++ /dev/null @@ -1,63 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Safety setting, affecting the safety-blocking behavior. Passing a safety setting for a category changes the allowed proability that content is blocked. -public struct SafetySetting: Codable { - /// Required. The category for this setting. - public var category: Category? - /// Required. Controls the probability threshold at which harm is blocked. - public var threshold: Threshold? - - /// Required. The category for this setting. - public enum Category: String, Codable, CaseIterable { - case harmCategoryUnspecified = "HARM_CATEGORY_UNSPECIFIED" - case harmCategoryDerogatory = "HARM_CATEGORY_DEROGATORY" - case harmCategoryToxicity = "HARM_CATEGORY_TOXICITY" - case harmCategoryViolence = "HARM_CATEGORY_VIOLENCE" - case harmCategorySexual = "HARM_CATEGORY_SEXUAL" - case harmCategoryMedical = "HARM_CATEGORY_MEDICAL" - case harmCategoryDangerous = "HARM_CATEGORY_DANGEROUS" - } - - /// Required. Controls the probability threshold at which harm is blocked. - public enum Threshold: String, Codable, CaseIterable { - case harmBlockThresholdUnspecified = "HARM_BLOCK_THRESHOLD_UNSPECIFIED" - case blockLowAndAbove = "BLOCK_LOW_AND_ABOVE" - case blockMediumAndAbove = "BLOCK_MEDIUM_AND_ABOVE" - case blockOnlyHigh = "BLOCK_ONLY_HIGH" - case blockNone = "BLOCK_NONE" - } - - public init(category: Category? = nil, threshold: Threshold? = nil) { - self.category = category - self.threshold = threshold - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.category = try values.decodeIfPresent(Category.self, forKey: "category") - self.threshold = try values.decodeIfPresent(Threshold.self, forKey: "threshold") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(category, forKey: "category") - try values.encodeIfPresent(threshold, forKey: "threshold") - } -} diff --git a/Sources/OpenAPI/Entities/Status.swift b/Sources/OpenAPI/Entities/Status.swift deleted file mode 100644 index 7a70384..0000000 --- a/Sources/OpenAPI/Entities/Status.swift +++ /dev/null @@ -1,48 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). -public struct Status: Codable { - /// The status code, which should be an enum value of google.rpc.Code. - public var code: Int32? - /// A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. - public var message: String? - /// A list of messages that carry the error details. There is a common set of message types for APIs to use. - public var details: [[String: AnyJSON]]? - - public init(code: Int32? = nil, message: String? = nil, details: [[String: AnyJSON]]? = nil) { - self.code = code - self.message = message - self.details = details - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.code = try values.decodeIfPresent(Int32.self, forKey: "code") - self.message = try values.decodeIfPresent(String.self, forKey: "message") - self.details = try values.decodeIfPresent([[String: AnyJSON]].self, forKey: "details") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(code, forKey: "code") - try values.encodeIfPresent(message, forKey: "message") - try values.encodeIfPresent(details, forKey: "details") - } -} diff --git a/Sources/OpenAPI/Entities/TextCompletion.swift b/Sources/OpenAPI/Entities/TextCompletion.swift deleted file mode 100644 index 9f01aac..0000000 --- a/Sources/OpenAPI/Entities/TextCompletion.swift +++ /dev/null @@ -1,48 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Output text returned from a model. -public struct TextCompletion: Codable { - /// A collection of source attributions for a piece of content. - public var citationMetadata: CitationMetadata? - /// Output only. The generated text returned from the model. - public var output: String? - /// Ratings for the safety of a response. There is at most one rating per category. - public var safetyRatings: [SafetyRating]? - - public init(citationMetadata: CitationMetadata? = nil, output: String? = nil, safetyRatings: [SafetyRating]? = nil) { - self.citationMetadata = citationMetadata - self.output = output - self.safetyRatings = safetyRatings - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.citationMetadata = try values.decodeIfPresent(CitationMetadata.self, forKey: "citationMetadata") - self.output = try values.decodeIfPresent(String.self, forKey: "output") - self.safetyRatings = try values.decodeIfPresent([SafetyRating].self, forKey: "safetyRatings") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(citationMetadata, forKey: "citationMetadata") - try values.encodeIfPresent(output, forKey: "output") - try values.encodeIfPresent(safetyRatings, forKey: "safetyRatings") - } -} diff --git a/Sources/OpenAPI/Entities/TextPrompt.swift b/Sources/OpenAPI/Entities/TextPrompt.swift deleted file mode 100644 index dc21ce2..0000000 --- a/Sources/OpenAPI/Entities/TextPrompt.swift +++ /dev/null @@ -1,38 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Text given to the model as a prompt. The Model will use this TextPrompt to Generate a text completion. -public struct TextPrompt: Codable { - /// Required. The prompt text. - public var text: String? - - public init(text: String? = nil) { - self.text = text - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.text = try values.decodeIfPresent(String.self, forKey: "text") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(text, forKey: "text") - } -} diff --git a/Sources/OpenAPI/Entities/TransferOwnershipRequest.swift b/Sources/OpenAPI/Entities/TransferOwnershipRequest.swift deleted file mode 100644 index ce8da7f..0000000 --- a/Sources/OpenAPI/Entities/TransferOwnershipRequest.swift +++ /dev/null @@ -1,38 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Request to transfer the ownership of the tuned model. -public struct TransferOwnershipRequest: Codable { - /// Required. The email address of the user to whom the tuned model is being transferred to. - public var emailAddress: String? - - public init(emailAddress: String? = nil) { - self.emailAddress = emailAddress - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.emailAddress = try values.decodeIfPresent(String.self, forKey: "emailAddress") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(emailAddress, forKey: "emailAddress") - } -} diff --git a/Sources/OpenAPI/Entities/TunedModel.swift b/Sources/OpenAPI/Entities/TunedModel.swift deleted file mode 100644 index a919fcd..0000000 --- a/Sources/OpenAPI/Entities/TunedModel.swift +++ /dev/null @@ -1,101 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// A fine-tuned model created using ModelService.CreateTunedModel. -public struct TunedModel: Codable { - /// Optional. A short description of this model. - public var description: String? - /// Optional. For Nucleus sampling. Nucleus sampling considers the smallest set of tokens whose probability sum is at least `top_p`. This value specifies default to be the one used by the base model while creating the model. - public var topP: Float? - /// Output only. The timestamp when this model was updated. - public var updateTime: String? - /// Tuning tasks that create tuned models. - public var tuningTask: TuningTask? - /// Tuned model as a source for training a new model. - public var tunedModelSource: TunedModelSource? - /// Optional. Controls the randomness of the output. Values can range over `[0.0,1.0]`, inclusive. A value closer to `1.0` will produce responses that are more varied, while a value closer to `0.0` will typically result in less surprising responses from the model. This value specifies default to be the one used by the base model while creating the model. - public var temperature: Float? - /// Output only. The timestamp when this model was created. - public var createTime: String? - /// Immutable. The name of the `Model` to tune. Example: `models/text-bison-001` - public var baseModel: String? - /// Output only. The tuned model name. A unique name will be generated on create. Example: `tunedModels/az2mb0bpw6i` If display_name is set on create, the id portion of the name will be set by concatenating the words of the display_name with hyphens and adding a random portion for uniqueness. Example: display_name = "Sentence Translator" name = "tunedModels/sentence-translator-u3b7m" - public var name: String? - /// Optional. For Top-k sampling. Top-k sampling considers the set of `top_k` most probable tokens. This value specifies default to be used by the backend while making the call to the model. This value specifies default to be the one used by the base model while creating the model. - public var topK: Int32? - /// Optional. The name to display for this model in user interfaces. The display name must be up to 40 characters including spaces. - public var displayName: String? - /// Output only. The state of the tuned model. - public var state: State? - - /// Output only. The state of the tuned model. - public enum State: String, Codable, CaseIterable { - case stateUnspecified = "STATE_UNSPECIFIED" - case creating = "CREATING" - case active = "ACTIVE" - case failed = "FAILED" - } - - public init(description: String? = nil, topP: Float? = nil, updateTime: String? = nil, tuningTask: TuningTask? = nil, tunedModelSource: TunedModelSource? = nil, temperature: Float? = nil, createTime: String? = nil, baseModel: String? = nil, name: String? = nil, topK: Int32? = nil, displayName: String? = nil, state: State? = nil) { - self.description = description - self.topP = topP - self.updateTime = updateTime - self.tuningTask = tuningTask - self.tunedModelSource = tunedModelSource - self.temperature = temperature - self.createTime = createTime - self.baseModel = baseModel - self.name = name - self.topK = topK - self.displayName = displayName - self.state = state - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.description = try values.decodeIfPresent(String.self, forKey: "description") - self.topP = try values.decodeIfPresent(Float.self, forKey: "topP") - self.updateTime = try values.decodeIfPresent(String.self, forKey: "updateTime") - self.tuningTask = try values.decodeIfPresent(TuningTask.self, forKey: "tuningTask") - self.tunedModelSource = try values.decodeIfPresent(TunedModelSource.self, forKey: "tunedModelSource") - self.temperature = try values.decodeIfPresent(Float.self, forKey: "temperature") - self.createTime = try values.decodeIfPresent(String.self, forKey: "createTime") - self.baseModel = try values.decodeIfPresent(String.self, forKey: "baseModel") - self.name = try values.decodeIfPresent(String.self, forKey: "name") - self.topK = try values.decodeIfPresent(Int32.self, forKey: "topK") - self.displayName = try values.decodeIfPresent(String.self, forKey: "displayName") - self.state = try values.decodeIfPresent(State.self, forKey: "state") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(description, forKey: "description") - try values.encodeIfPresent(topP, forKey: "topP") - try values.encodeIfPresent(updateTime, forKey: "updateTime") - try values.encodeIfPresent(tuningTask, forKey: "tuningTask") - try values.encodeIfPresent(tunedModelSource, forKey: "tunedModelSource") - try values.encodeIfPresent(temperature, forKey: "temperature") - try values.encodeIfPresent(createTime, forKey: "createTime") - try values.encodeIfPresent(baseModel, forKey: "baseModel") - try values.encodeIfPresent(name, forKey: "name") - try values.encodeIfPresent(topK, forKey: "topK") - try values.encodeIfPresent(displayName, forKey: "displayName") - try values.encodeIfPresent(state, forKey: "state") - } -} diff --git a/Sources/OpenAPI/Entities/TunedModelSource.swift b/Sources/OpenAPI/Entities/TunedModelSource.swift deleted file mode 100644 index 96f9f6b..0000000 --- a/Sources/OpenAPI/Entities/TunedModelSource.swift +++ /dev/null @@ -1,43 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Tuned model as a source for training a new model. -public struct TunedModelSource: Codable { - /// Output only. The name of the base `Model` this `TunedModel` was tuned from. Example: `models/text-bison-001` - public var baseModel: String? - /// Immutable. The name of the `TunedModel` to use as the starting point for training the new model. Example: `tunedModels/my-tuned-model` - public var tunedModel: String? - - public init(baseModel: String? = nil, tunedModel: String? = nil) { - self.baseModel = baseModel - self.tunedModel = tunedModel - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.baseModel = try values.decodeIfPresent(String.self, forKey: "baseModel") - self.tunedModel = try values.decodeIfPresent(String.self, forKey: "tunedModel") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(baseModel, forKey: "baseModel") - try values.encodeIfPresent(tunedModel, forKey: "tunedModel") - } -} diff --git a/Sources/OpenAPI/Entities/TuningExample.swift b/Sources/OpenAPI/Entities/TuningExample.swift deleted file mode 100644 index 6e7be9e..0000000 --- a/Sources/OpenAPI/Entities/TuningExample.swift +++ /dev/null @@ -1,43 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// A single example for tuning. -public struct TuningExample: Codable { - /// Optional. Text model input. - public var textInput: String? - /// Required. The expected model output. - public var output: String? - - public init(textInput: String? = nil, output: String? = nil) { - self.textInput = textInput - self.output = output - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.textInput = try values.decodeIfPresent(String.self, forKey: "textInput") - self.output = try values.decodeIfPresent(String.self, forKey: "output") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(textInput, forKey: "textInput") - try values.encodeIfPresent(output, forKey: "output") - } -} diff --git a/Sources/OpenAPI/Entities/TuningExamples.swift b/Sources/OpenAPI/Entities/TuningExamples.swift deleted file mode 100644 index 6d71d2d..0000000 --- a/Sources/OpenAPI/Entities/TuningExamples.swift +++ /dev/null @@ -1,38 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// A set of tuning examples. Can be training or validatation data. -public struct TuningExamples: Codable { - /// Required. The examples. Example input can be for text or discuss, but all examples in a set must be of the same type. - public var examples: [TuningExample]? - - public init(examples: [TuningExample]? = nil) { - self.examples = examples - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.examples = try values.decodeIfPresent([TuningExample].self, forKey: "examples") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(examples, forKey: "examples") - } -} diff --git a/Sources/OpenAPI/Entities/TuningSnapshot.swift b/Sources/OpenAPI/Entities/TuningSnapshot.swift deleted file mode 100644 index cf1b8a2..0000000 --- a/Sources/OpenAPI/Entities/TuningSnapshot.swift +++ /dev/null @@ -1,53 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Record for a single tuning step. -public struct TuningSnapshot: Codable { - /// Output only. The timestamp when this metric was computed. - public var computeTime: String? - /// Output only. The mean loss of the training examples for this step. - public var meanLoss: Float? - /// Output only. The tuning step. - public var step: Int32? - /// Output only. The epoch this step was part of. - public var epoch: Int32? - - public init(computeTime: String? = nil, meanLoss: Float? = nil, step: Int32? = nil, epoch: Int32? = nil) { - self.computeTime = computeTime - self.meanLoss = meanLoss - self.step = step - self.epoch = epoch - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.computeTime = try values.decodeIfPresent(String.self, forKey: "computeTime") - self.meanLoss = try values.decodeIfPresent(Float.self, forKey: "meanLoss") - self.step = try values.decodeIfPresent(Int32.self, forKey: "step") - self.epoch = try values.decodeIfPresent(Int32.self, forKey: "epoch") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(computeTime, forKey: "computeTime") - try values.encodeIfPresent(meanLoss, forKey: "meanLoss") - try values.encodeIfPresent(step, forKey: "step") - try values.encodeIfPresent(epoch, forKey: "epoch") - } -} diff --git a/Sources/OpenAPI/Entities/TuningTask.swift b/Sources/OpenAPI/Entities/TuningTask.swift deleted file mode 100644 index ce6df12..0000000 --- a/Sources/OpenAPI/Entities/TuningTask.swift +++ /dev/null @@ -1,58 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -/// Tuning tasks that create tuned models. -public struct TuningTask: Codable { - /// Dataset for training or validation. - public var trainingData: Dataset? - /// Output only. The timestamp when tuning this model completed. - public var completeTime: String? - /// Output only. The timestamp when tuning this model started. - public var startTime: String? - /// Output only. Metrics collected during tuning. - public var snapshots: [TuningSnapshot]? - /// Hyperparameters controlling the tuning process. - public var hyperparameters: Hyperparameters? - - public init(trainingData: Dataset? = nil, completeTime: String? = nil, startTime: String? = nil, snapshots: [TuningSnapshot]? = nil, hyperparameters: Hyperparameters? = nil) { - self.trainingData = trainingData - self.completeTime = completeTime - self.startTime = startTime - self.snapshots = snapshots - self.hyperparameters = hyperparameters - } - - public init(from decoder: Decoder) throws { - let values = try decoder.container(keyedBy: StringCodingKey.self) - self.trainingData = try values.decodeIfPresent(Dataset.self, forKey: "trainingData") - self.completeTime = try values.decodeIfPresent(String.self, forKey: "completeTime") - self.startTime = try values.decodeIfPresent(String.self, forKey: "startTime") - self.snapshots = try values.decodeIfPresent([TuningSnapshot].self, forKey: "snapshots") - self.hyperparameters = try values.decodeIfPresent(Hyperparameters.self, forKey: "hyperparameters") - } - - public func encode(to encoder: Encoder) throws { - var values = encoder.container(keyedBy: StringCodingKey.self) - try values.encodeIfPresent(trainingData, forKey: "trainingData") - try values.encodeIfPresent(completeTime, forKey: "completeTime") - try values.encodeIfPresent(startTime, forKey: "startTime") - try values.encodeIfPresent(snapshots, forKey: "snapshots") - try values.encodeIfPresent(hyperparameters, forKey: "hyperparameters") - } -} diff --git a/Sources/OpenAPI/Extensions/API.swift b/Sources/OpenAPI/Extensions/API.swift deleted file mode 100644 index 7283c44..0000000 --- a/Sources/OpenAPI/Extensions/API.swift +++ /dev/null @@ -1,22 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation -import Get -import URLQueryEncoder - -public enum API {} diff --git a/Sources/OpenAPI/Extensions/AnyJSON.swift b/Sources/OpenAPI/Extensions/AnyJSON.swift deleted file mode 100644 index 4ea7224..0000000 --- a/Sources/OpenAPI/Extensions/AnyJSON.swift +++ /dev/null @@ -1,66 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -public enum AnyJSON: Equatable, Codable { - case string(String) - case number(Double) - case object([String: AnyJSON]) - case array([AnyJSON]) - case bool(Bool) - - var value: Any { - switch self { - case .string(let string): return string - case .number(let double): return double - case .object(let dictionary): return dictionary - case .array(let array): return array - case .bool(let bool): return bool - } - } - - public func encode(to encoder: Encoder) throws { - var container = encoder.singleValueContainer() - switch self { - case let .array(array): try container.encode(array) - case let .object(object): try container.encode(object) - case let .string(string): try container.encode(string) - case let .number(number): try container.encode(number) - case let .bool(bool): try container.encode(bool) - } - } - - public init(from decoder: Decoder) throws { - let container = try decoder.singleValueContainer() - if let object = try? container.decode([String: AnyJSON].self) { - self = .object(object) - } else if let array = try? container.decode([AnyJSON].self) { - self = .array(array) - } else if let string = try? container.decode(String.self) { - self = .string(string) - } else if let bool = try? container.decode(Bool.self) { - self = .bool(bool) - } else if let number = try? container.decode(Double.self) { - self = .number(number) - } else { - throw DecodingError.dataCorrupted( - .init(codingPath: decoder.codingPath, debugDescription: "Invalid JSON value.") - ) - } - } -} diff --git a/Sources/OpenAPI/Extensions/StringCodingKey.swift b/Sources/OpenAPI/Extensions/StringCodingKey.swift deleted file mode 100644 index d89bcd4..0000000 --- a/Sources/OpenAPI/Extensions/StringCodingKey.swift +++ /dev/null @@ -1,44 +0,0 @@ -// Generated by Create API -// https://github.com/CreateAPI/CreateAPI -// -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import Foundation - -struct StringCodingKey: CodingKey, ExpressibleByStringLiteral { - private let string: String - private var int: Int? - - var stringValue: String { return string } - - init(string: String) { - self.string = string - } - - init?(stringValue: String) { - self.string = stringValue - } - - var intValue: Int? { return int } - - init?(intValue: Int) { - self.string = String(describing: intValue) - self.int = intValue - } - - init(stringLiteral value: String) { - self.string = value - } -} diff --git a/Sources/generativelanguage-v1beta2.json b/Sources/generativelanguage-v1beta2.json deleted file mode 100644 index decadf3..0000000 --- a/Sources/generativelanguage-v1beta2.json +++ /dev/null @@ -1,564 +0,0 @@ -{ - "openapi": "3.0.0", - "servers": [ - { - "url": "https://generativelanguage.googleapis.com/" - } - ], - "info": { - "title": "Generative Language API", - "description": "The Generative Language API allows developers to use state-of-the-art Large Language Models (LLMs), also known as Generative Language Models, to build language applications. LLMs are a powerful, versatile type of machine learning model that enables computers to comprehend and generate natural language better than they ever have before.", - "contact": { - "name": "Google", - "url": "https://google.com" - }, - "version": "v1beta1" - }, - "paths": { - "/v1beta1/{+name}": { - "get": { - "description": "Gets information about a specific Model.", - "operationId": "generativelanguage.models.get", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/Model" - } - } - } - } - }, - "parameters": [ - { - "name": "name", - "in": "path", - "description": "Required. The resource name of the model. This name should match a model name returned by the `ListModels` method. Format: `models/{model}`", - "required": true, - "schema": { - "type": "string" - } - } - ], - "tags": [ - "models" - ] - }, - "parameters": [ - { - "$ref": "#/components/parameters/access_token" - }, - { - "$ref": "#/components/parameters/prettyPrint" - }, - { - "$ref": "#/components/parameters/alt" - }, - { - "$ref": "#/components/parameters/_.xgafv" - }, - { - "$ref": "#/components/parameters/upload_protocol" - }, - { - "$ref": "#/components/parameters/oauth_token" - }, - { - "$ref": "#/components/parameters/callback" - }, - { - "$ref": "#/components/parameters/quotaUser" - }, - { - "$ref": "#/components/parameters/key" - }, - { - "$ref": "#/components/parameters/fields" - }, - { - "$ref": "#/components/parameters/uploadType" - } - ] - }, - "/v1beta1/{+model}:generateMessage": { - "post": { - "description": "Generates a response from the model given an input `MessagePrompt`.", - "operationId": "generativelanguage.models.generateMessage", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/GenerateMessageResponse" - } - } - } - } - }, - "parameters": [ - { - "name": "model", - "in": "path", - "description": "Required. The name of the model to use. Format: `name=models/{model}`.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "tags": [ - "models" - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GenerateMessageRequest" - } - } - } - } - }, - "parameters": [ - { - "$ref": "#/components/parameters/access_token" - }, - { - "$ref": "#/components/parameters/prettyPrint" - }, - { - "$ref": "#/components/parameters/alt" - }, - { - "$ref": "#/components/parameters/_.xgafv" - }, - { - "$ref": "#/components/parameters/upload_protocol" - }, - { - "$ref": "#/components/parameters/oauth_token" - }, - { - "$ref": "#/components/parameters/callback" - }, - { - "$ref": "#/components/parameters/quotaUser" - }, - { - "$ref": "#/components/parameters/key" - }, - { - "$ref": "#/components/parameters/fields" - }, - { - "$ref": "#/components/parameters/uploadType" - } - ] - }, - "/v1beta1/models": { - "get": { - "description": "Lists models available through the API.", - "operationId": "generativelanguage.models.list", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/ListModelsResponse" - } - } - } - } - }, - "parameters": [ - { - "name": "pageSize", - "in": "query", - "description": "The maximum number of `Models` to return (per page). The service may return fewer models. If unspecified, at most 50 models will be returned per page. This method returns at most 1000 models per page, even if you pass a larger page_size.", - "schema": { - "type": "integer" - } - }, - { - "name": "pageToken", - "in": "query", - "description": "A page token, received from a previous `ListModels` call. Provide the `page_token` returned by one request as an argument to the next request to retrieve the next page. When paginating, all other parameters provided to `ListModels` must match the call that provided the page token.", - "schema": { - "type": "string" - } - } - ], - "tags": [ - "models" - ] - }, - "parameters": [ - { - "$ref": "#/components/parameters/access_token" - }, - { - "$ref": "#/components/parameters/prettyPrint" - }, - { - "$ref": "#/components/parameters/alt" - }, - { - "$ref": "#/components/parameters/_.xgafv" - }, - { - "$ref": "#/components/parameters/upload_protocol" - }, - { - "$ref": "#/components/parameters/oauth_token" - }, - { - "$ref": "#/components/parameters/callback" - }, - { - "$ref": "#/components/parameters/quotaUser" - }, - { - "$ref": "#/components/parameters/key" - }, - { - "$ref": "#/components/parameters/fields" - }, - { - "$ref": "#/components/parameters/uploadType" - } - ] - } - }, - "tags": [ - { - "name": "models" - } - ], - "externalDocs": { - "url": "https://cloud.google.com/" - }, - "components": { - "schemas": { - "Model": { - "properties": { - "temperature": { - "description": "Controls the randomness of the output. Values can range over `[0.0,1.0]`, inclusive. A value closer to `1.0` will produce responses that are more varied, while a value closer to `0.0` will typically result in less surprising responses from the model. This value specifies default to be used by the backend while making the call to the model.", - "type": "number", - "format": "float" - }, - "displayName": { - "description": "The human-readable name of the model. E.g. \"Chat Panda\". The name can be up to 128 characters long and can consist of any UTF-8 characters.", - "type": "string" - }, - "topP": { - "format": "float", - "type": "number", - "description": "Nucleus sampling considers the smallest set of tokens whose probability sum is at least `top_p`. This value specifies default to be used by the backend while making the call to the model." - }, - "version": { - "description": "Required. The version number of the model. This represents the major version", - "type": "string" - }, - "name": { - "description": "Required. The resource name of the `Model`. Format: `models/{model}` with a `{model}` naming convention of: * \"{base_model_id}-{version}\" Examples: * `models/chat-panda-001`", - "type": "string" - }, - "description": { - "type": "string", - "description": "A short description of the model." - }, - "inputTokenLimit": { - "description": "Maximum number of input tokens allowed for this model.", - "type": "integer", - "format": "int32" - }, - "baseModelId": { - "type": "string", - "description": "Required. The name of the base model, pass this to the generation request. Examples: * `chat-panda`" - }, - "supportedGenerationMethods": { - "items": { - "type": "string" - }, - "description": "The model's supported generation methods. The method names are defined as Pascal case strings, such as `generateMessage` which correspond to API methods.", - "type": "array" - }, - "outputTokenLimit": { - "description": "Maximum number of output tokens available for this model.", - "type": "integer", - "format": "int32" - }, - "topK": { - "type": "integer", - "format": "int32", - "description": "Top-k sampling considers the set of `top_k` most probable tokens. This value specifies default to be used by the backend while making the call to the model." - } - }, - "description": "Information about a Generative Language Model.", - "type": "object" - }, - "CitationMetadata": { - "properties": { - "citationSources": { - "description": "Citations to sources for a specific response.", - "type": "array", - "items": { - "$ref": "#/components/schemas/CitationSource" - } - } - }, - "description": "Metadata about whether content in the response is attributed to a source and citations for those sources.", - "type": "object" - }, - "CitationSource": { - "description": "A citation to a source for a portion of a specific response.", - "properties": { - "uri": { - "description": "Optional. URI that is attributed as a source for a portion of the text.", - "type": "string" - }, - "endIndex": { - "format": "int32", - "description": "Optional. End of the attributed segment, exclusive.", - "type": "integer" - }, - "startIndex": { - "type": "integer", - "format": "int32", - "description": "Optional. Start of segment of the response that is attributed to this source. Index indicates the start of the segment, measured in bytes." - } - }, - "type": "object" - }, - "GenerateMessageResponse": { - "description": "The response from the model. This includes candidate messages and conversation history in the form of chronologically-ordered messages.", - "properties": { - "candidates": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Message" - }, - "description": "Candidate response messages from the model." - }, - "messages": { - "items": { - "$ref": "#/components/schemas/Message" - }, - "type": "array", - "description": "The conversation history used by the model." - } - }, - "type": "object" - }, - "GenerateMessageRequest": { - "properties": { - "topP": { - "format": "float", - "type": "number", - "description": "Optional. If specified, nucleus sampling will be used as the decoding strategy. Nucleus sampling considers the smallest set of tokens whose probability sum is at least `top_p`." - }, - "candidateCount": { - "format": "int32", - "type": "integer", - "description": "Optional. The number of generated response messages to return. This value must be between `[1, 10]`, inclusive. If unset, this will default to `1`." - }, - "prompt": { - "description": "Required. The structured textual input given to the model as a prompt. Given a prompt, the model will return what it predicts is the next message in the discussion.", - "$ref": "#/components/schemas/MessagePrompt" - }, - "temperature": { - "type": "number", - "format": "float", - "description": "Optional. Controls the randomness of the output. Values can range over `[0.0,1.0]`, inclusive. A value closer to `1.0` will produce responses that are more varied, while a value closer to `0.0` will typically result in less surprising responses from the model." - }, - "topK": { - "description": "Optional. If specified, top-k sampling will be used as the decoding strategy. Top-k sampling considers the set of `top_k` most probable tokens.", - "type": "integer", - "format": "int32" - } - }, - "type": "object", - "description": "Request to generate a message response from the model." - }, - "MessagePrompt": { - "type": "object", - "description": "All of the structured input text passed to the model as a prompt. A `MessagePrompt` contains a structured set of fields that provide context for the conversation, examples of user input/model output message pairs that prime the model to respond in different ways, and the conversation history or list of messages representing the alternating turns of the conversation between the user and the model.", - "properties": { - "messages": { - "description": "Required. A snapshot of the recent conversation history sorted chronologically. Turns alternate between two authors. If the total input size exceeds the model's `input_token_limit` the input will be truncated: The oldest items will be dropped from `messages`.", - "type": "array", - "items": { - "$ref": "#/components/schemas/Message" - } - }, - "context": { - "type": "string", - "description": "Optional. Text that should be provided to the model first to ground the response. If not empty, this `context` will be given to the model first before the `examples` and `messages`. When using a `context` be sure to provide it with every request to maintain continuity. This field can be a description of your prompt to the model to help provide context and guide the responses. Examples: \"Translate the phrase from English to French.\" or \"Given a statement, classify the sentiment as happy, sad or neutral.\" Anything included in this field will take precedence over message history if the total input size exceeds the model's `input_token_limit` and the input request is truncated." - }, - "examples": { - "description": "Optional. Examples of what the model should generate. This includes both user input and the response that the model should emulate. These `examples` are treated identically to conversation messages except that they take precedence over the history in `messages`: If the total input size exceeds the model's `input_token_limit` the input will be truncated. Items will be dropped from `messages` before `examples`.", - "items": { - "$ref": "#/components/schemas/Example" - }, - "type": "array" - } - } - }, - "ListModelsResponse": { - "description": "Response from `ListModel` containing a paginated list of Models.", - "properties": { - "models": { - "description": "The returned Models.", - "type": "array", - "items": { - "$ref": "#/components/schemas/Model" - } - }, - "nextPageToken": { - "type": "string", - "description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no more pages." - } - }, - "type": "object" - }, - "Example": { - "description": "An input/output example used to instruct the Model. It demonstrates how the model should respond or format its response.", - "properties": { - "input": { - "$ref": "#/components/schemas/Message", - "description": "An example of an input `Message` from the user." - }, - "output": { - "$ref": "#/components/schemas/Message", - "description": "An example of what the model should output given the input." - } - }, - "type": "object" - }, - "Message": { - "properties": { - "content": { - "description": "Required. The text content of the structured `Message`.", - "type": "string" - }, - "author": { - "type": "string", - "description": "Optional. This serves as a key for tagging the content of this Message when it is fed to the model as text. The author can be any alphanumeric string." - }, - "citationMetadata": { - "$ref": "#/components/schemas/CitationMetadata", - "description": "Output only. Citation information for model-generated `content` in this `Message`. If this `Message` was generated as output from the model, this field may be populated with attribution information for any text included in the `content`. This field is used only on output.", - "readOnly": true - } - }, - "description": "The base unit of structured text. A `Message` includes an `author` and the `content` of the `Message`. The `author` is used to tag messages when they are fed to the model as text.", - "type": "object" - } - }, - "parameters": { - "access_token": { - "name": "access_token", - "in": "query", - "description": "OAuth access token.", - "schema": { - "type": "string" - } - }, - "prettyPrint": { - "name": "prettyPrint", - "in": "query", - "description": "Returns response with indentations and line breaks.", - "schema": { - "type": "boolean", - "default": true - } - }, - "alt": { - "name": "alt", - "in": "query", - "description": "Data format for response.", - "schema": { - "type": "string", - "enum": [ - "json", - "media", - "proto" - ], - "default": "json" - } - }, - "upload_protocol": { - "name": "upload_protocol", - "in": "query", - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "schema": { - "type": "string" - } - }, - "oauth_token": { - "name": "oauth_token", - "in": "query", - "description": "OAuth 2.0 token for the current user.", - "schema": { - "type": "string" - } - }, - "callback": { - "name": "callback", - "in": "query", - "description": "JSONP", - "schema": { - "type": "string" - } - }, - "quotaUser": { - "name": "quotaUser", - "in": "query", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "schema": { - "type": "string" - } - }, - "key": { - "name": "key", - "in": "query", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "schema": { - "type": "string" - } - }, - "fields": { - "name": "fields", - "in": "query", - "description": "Selector specifying which fields to include in a partial response.", - "schema": { - "type": "string" - } - }, - "uploadType": { - "name": "uploadType", - "in": "query", - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "schema": { - "type": "string" - } - }, - "_.xgafv": { - "name": "$.xgafv", - "in": "query", - "description": "V1 error format.", - "schema": { - "type": "string", - "enum": [ - "1", - "2" - ] - } - } - } - } -} \ No newline at end of file diff --git a/Sources/generativelanguage-v1beta3.json b/Sources/generativelanguage-v1beta3.json deleted file mode 100644 index 6c709c2..0000000 --- a/Sources/generativelanguage-v1beta3.json +++ /dev/null @@ -1,2054 +0,0 @@ -{ - "openapi": "3.0.0", - "servers": [ - { - "url": "https://generativelanguage.googleapis.com/" - } - ], - "info": { - "title": "Generative Language API", - "description": "The PaLM API allows developers to build generative AI applications using the PaLM model. Large Language Models (LLMs) are a powerful, versatile type of machine learning model that enables computers to comprehend and generate natural language through a series of prompts. The PaLM API is based on Google's next generation LLM, PaLM. It excels at a variety of different tasks like code generation, reasoning, and writing. You can use the PaLM API to build generative AI applications for use cases like content generation, dialogue agents, summarization and classification systems, and more.", - "contact": { - "name": "Google", - "url": "https://google.com" - }, - "version": "v1beta3" - }, - "paths": { - "/v1beta3/{+model}:generateText": { - "post": { - "description": "Generates a response from the model given an input message.", - "operationId": "generativelanguage.models.generateText", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/GenerateTextResponse" - } - } - } - } - }, - "parameters": [ - { - "name": "model", - "in": "path", - "description": "Required. The name of the `Model` or `TunedModel` to use for generating the completion. Examples: models/text-bison-001 tunedModels/sentence-translator-u3b7m", - "required": true, - "schema": { - "type": "string" - } - } - ], - "tags": [ - "models" - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GenerateTextRequest" - } - } - } - } - }, - "parameters": [ - { - "$ref": "#/components/parameters/_.xgafv" - }, - { - "$ref": "#/components/parameters/callback" - }, - { - "$ref": "#/components/parameters/quotaUser" - }, - { - "$ref": "#/components/parameters/fields" - }, - { - "$ref": "#/components/parameters/upload_protocol" - }, - { - "$ref": "#/components/parameters/uploadType" - }, - { - "$ref": "#/components/parameters/key" - }, - { - "$ref": "#/components/parameters/oauth_token" - }, - { - "$ref": "#/components/parameters/alt" - }, - { - "$ref": "#/components/parameters/prettyPrint" - }, - { - "$ref": "#/components/parameters/access_token" - } - ] - }, - "/v1beta3/tunedModels": { - "post": { - "description": "Creates a tuned model. Intermediate tuning progress (if any) is accessed through the [google.longrunning.Operations] service. Status and results can be accessed through the Operations service. Example: GET /v1/tunedModels/az2mb0bpw6i/operations/000-111-222", - "operationId": "generativelanguage.tunedModels.create", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/Operation" - } - } - } - } - }, - "parameters": [ - { - "name": "tunedModelId", - "in": "query", - "description": "Optional. The unique id for the tuned model if specified. This value should be up to 40 characters, the first character must be a letter, the last could be a letter or a number. The id must match the regular expression: [a-z]([a-z0-9-]{0,38}[a-z0-9])?.", - "schema": { - "type": "string" - } - } - ], - "tags": [ - "tunedModels" - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/TunedModel" - } - } - } - } - }, - "get": { - "description": "Lists tuned models owned by the user.", - "operationId": "generativelanguage.tunedModels.list", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/ListTunedModelsResponse" - } - } - } - } - }, - "parameters": [ - { - "name": "pageSize", - "in": "query", - "description": "Optional. The maximum number of `TunedModels` to return (per page). The service may return fewer tuned models. If unspecified, at most 10 tuned models will be returned. This method returns at most 1000 models per page, even if you pass a larger page_size.", - "schema": { - "type": "integer" - } - }, - { - "name": "filter", - "in": "query", - "description": "Optional. A filter is a full text search over the tuned model's description and display name. By default, results will not include tuned models shared with everyone. Additional operators: - owner:me - writers:me - readers:me - readers:everyone Examples: \"owner:me\" returns all tuned models to which caller has owner role \"readers:me\" returns all tuned models to which caller has reader role \"readers:everyone\" returns all tuned models that are shared with everyone", - "schema": { - "type": "string" - } - }, - { - "name": "pageToken", - "in": "query", - "description": "Optional. A page token, received from a previous `ListTunedModels` call. Provide the `page_token` returned by one request as an argument to the next request to retrieve the next page. When paginating, all other parameters provided to `ListTunedModels` must match the call that provided the page token.", - "schema": { - "type": "string" - } - } - ], - "tags": [ - "tunedModels" - ] - }, - "parameters": [ - { - "$ref": "#/components/parameters/_.xgafv" - }, - { - "$ref": "#/components/parameters/callback" - }, - { - "$ref": "#/components/parameters/quotaUser" - }, - { - "$ref": "#/components/parameters/fields" - }, - { - "$ref": "#/components/parameters/upload_protocol" - }, - { - "$ref": "#/components/parameters/uploadType" - }, - { - "$ref": "#/components/parameters/key" - }, - { - "$ref": "#/components/parameters/oauth_token" - }, - { - "$ref": "#/components/parameters/alt" - }, - { - "$ref": "#/components/parameters/prettyPrint" - }, - { - "$ref": "#/components/parameters/access_token" - } - ] - }, - "/v1beta3/{+name}": { - "patch": { - "description": "Updates the permission.", - "operationId": "generativelanguage.tunedModels.permissions.patch", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/Permission" - } - } - } - } - }, - "parameters": [ - { - "name": "name", - "in": "path", - "description": "Output only. The permission name. A unique name will be generated on create. Example: tunedModels/{tuned_model}permssions/{permission} Output only.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "updateMask", - "in": "query", - "description": "Required. The list of fields to update. Accepted ones: - role (`Permission.role` field)", - "schema": { - "type": "string" - } - } - ], - "tags": [ - "tunedModels" - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Permission" - } - } - } - } - }, - "delete": { - "description": "Deletes the permission.", - "operationId": "generativelanguage.tunedModels.permissions.delete", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/Empty" - } - } - } - } - }, - "parameters": [ - { - "name": "name", - "in": "path", - "description": "Required. The resource name of the permission. Format: `tunedModels/{tuned_model}/permissions/{permission}`", - "required": true, - "schema": { - "type": "string" - } - } - ], - "tags": [ - "tunedModels" - ] - }, - "get": { - "description": "Gets information about a specific Model.", - "operationId": "generativelanguage.models.get", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/Model" - } - } - } - } - }, - "parameters": [ - { - "name": "name", - "in": "path", - "description": "Required. The resource name of the model. This name should match a model name returned by the `ListModels` method. Format: `models/{model}`", - "required": true, - "schema": { - "type": "string" - } - } - ], - "tags": [ - "models" - ] - }, - "parameters": [ - { - "$ref": "#/components/parameters/_.xgafv" - }, - { - "$ref": "#/components/parameters/callback" - }, - { - "$ref": "#/components/parameters/quotaUser" - }, - { - "$ref": "#/components/parameters/fields" - }, - { - "$ref": "#/components/parameters/upload_protocol" - }, - { - "$ref": "#/components/parameters/uploadType" - }, - { - "$ref": "#/components/parameters/key" - }, - { - "$ref": "#/components/parameters/oauth_token" - }, - { - "$ref": "#/components/parameters/alt" - }, - { - "$ref": "#/components/parameters/prettyPrint" - }, - { - "$ref": "#/components/parameters/access_token" - } - ] - }, - "/v1beta3/{+name}:transferOwnership": { - "post": { - "description": "Transfers ownership of the tuned model. This is the only way to change ownership of the tuned model. The current owner will be downgraded to writer role.", - "operationId": "generativelanguage.tunedModels.transferOwnership", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/TransferOwnershipResponse" - } - } - } - } - }, - "parameters": [ - { - "name": "name", - "in": "path", - "description": "Required. The resource name of the tuned model to transfer ownership . Format: `tunedModels/my-model-id`", - "required": true, - "schema": { - "type": "string" - } - } - ], - "tags": [ - "tunedModels" - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/TransferOwnershipRequest" - } - } - } - } - }, - "parameters": [ - { - "$ref": "#/components/parameters/_.xgafv" - }, - { - "$ref": "#/components/parameters/callback" - }, - { - "$ref": "#/components/parameters/quotaUser" - }, - { - "$ref": "#/components/parameters/fields" - }, - { - "$ref": "#/components/parameters/upload_protocol" - }, - { - "$ref": "#/components/parameters/uploadType" - }, - { - "$ref": "#/components/parameters/key" - }, - { - "$ref": "#/components/parameters/oauth_token" - }, - { - "$ref": "#/components/parameters/alt" - }, - { - "$ref": "#/components/parameters/prettyPrint" - }, - { - "$ref": "#/components/parameters/access_token" - } - ] - }, - "/v1beta3/{+parent}/permissions": { - "post": { - "description": "Create a permission to a specific resource.", - "operationId": "generativelanguage.tunedModels.permissions.create", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/Permission" - } - } - } - } - }, - "parameters": [ - { - "name": "parent", - "in": "path", - "description": "Required. The parent resource of the `Permission`. Format: tunedModels/{tuned_model}", - "required": true, - "schema": { - "type": "string" - } - } - ], - "tags": [ - "tunedModels" - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Permission" - } - } - } - } - }, - "get": { - "description": "Lists permissions for the specific resource.", - "operationId": "generativelanguage.tunedModels.permissions.list", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/ListPermissionsResponse" - } - } - } - } - }, - "parameters": [ - { - "name": "parent", - "in": "path", - "description": "Required. The parent resource of the permissions. Format: tunedModels/{tuned_model}", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "pageToken", - "in": "query", - "description": "Optional. A page token, received from a previous `ListPermissions` call. Provide the `page_token` returned by one request as an argument to the next request to retrieve the next page. When paginating, all other parameters provided to `ListPermissions` must match the call that provided the page token.", - "schema": { - "type": "string" - } - }, - { - "name": "pageSize", - "in": "query", - "description": "Optional. The maximum number of `Permission`s to return (per page). The service may return fewer permissions. If unspecified, at most 10 permissions will be returned. This method returns at most 1000 permissions per page, even if you pass larger page_size.", - "schema": { - "type": "integer" - } - } - ], - "tags": [ - "tunedModels" - ] - }, - "parameters": [ - { - "$ref": "#/components/parameters/_.xgafv" - }, - { - "$ref": "#/components/parameters/callback" - }, - { - "$ref": "#/components/parameters/quotaUser" - }, - { - "$ref": "#/components/parameters/fields" - }, - { - "$ref": "#/components/parameters/upload_protocol" - }, - { - "$ref": "#/components/parameters/uploadType" - }, - { - "$ref": "#/components/parameters/key" - }, - { - "$ref": "#/components/parameters/oauth_token" - }, - { - "$ref": "#/components/parameters/alt" - }, - { - "$ref": "#/components/parameters/prettyPrint" - }, - { - "$ref": "#/components/parameters/access_token" - } - ] - }, - "/v1beta3/models": { - "get": { - "description": "Lists models available through the API.", - "operationId": "generativelanguage.models.list", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/ListModelsResponse" - } - } - } - } - }, - "parameters": [ - { - "name": "pageSize", - "in": "query", - "description": "The maximum number of `Models` to return (per page). The service may return fewer models. If unspecified, at most 50 models will be returned per page. This method returns at most 1000 models per page, even if you pass a larger page_size.", - "schema": { - "type": "integer" - } - }, - { - "name": "pageToken", - "in": "query", - "description": "A page token, received from a previous `ListModels` call. Provide the `page_token` returned by one request as an argument to the next request to retrieve the next page. When paginating, all other parameters provided to `ListModels` must match the call that provided the page token.", - "schema": { - "type": "string" - } - } - ], - "tags": [ - "models" - ] - }, - "parameters": [ - { - "$ref": "#/components/parameters/_.xgafv" - }, - { - "$ref": "#/components/parameters/callback" - }, - { - "$ref": "#/components/parameters/quotaUser" - }, - { - "$ref": "#/components/parameters/fields" - }, - { - "$ref": "#/components/parameters/upload_protocol" - }, - { - "$ref": "#/components/parameters/uploadType" - }, - { - "$ref": "#/components/parameters/key" - }, - { - "$ref": "#/components/parameters/oauth_token" - }, - { - "$ref": "#/components/parameters/alt" - }, - { - "$ref": "#/components/parameters/prettyPrint" - }, - { - "$ref": "#/components/parameters/access_token" - } - ] - }, - "/v1beta3/{+model}:countMessageTokens": { - "post": { - "description": "Runs a model's tokenizer on a string and returns the token count.", - "operationId": "generativelanguage.models.countMessageTokens", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/CountMessageTokensResponse" - } - } - } - } - }, - "parameters": [ - { - "name": "model", - "in": "path", - "description": "Required. The model's resource name. This serves as an ID for the Model to use. This name should match a model name returned by the `ListModels` method. Format: `models/{model}`", - "required": true, - "schema": { - "type": "string" - } - } - ], - "tags": [ - "models" - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CountMessageTokensRequest" - } - } - } - } - }, - "parameters": [ - { - "$ref": "#/components/parameters/_.xgafv" - }, - { - "$ref": "#/components/parameters/callback" - }, - { - "$ref": "#/components/parameters/quotaUser" - }, - { - "$ref": "#/components/parameters/fields" - }, - { - "$ref": "#/components/parameters/upload_protocol" - }, - { - "$ref": "#/components/parameters/uploadType" - }, - { - "$ref": "#/components/parameters/key" - }, - { - "$ref": "#/components/parameters/oauth_token" - }, - { - "$ref": "#/components/parameters/alt" - }, - { - "$ref": "#/components/parameters/prettyPrint" - }, - { - "$ref": "#/components/parameters/access_token" - } - ] - }, - "/v1beta3/{+model}:embedText": { - "post": { - "description": "Generates an embedding from the model given an input message.", - "operationId": "generativelanguage.models.embedText", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/EmbedTextResponse" - } - } - } - } - }, - "parameters": [ - { - "name": "model", - "in": "path", - "description": "Required. The model name to use with the format model=models/{model}.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "tags": [ - "models" - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/EmbedTextRequest" - } - } - } - } - }, - "parameters": [ - { - "$ref": "#/components/parameters/_.xgafv" - }, - { - "$ref": "#/components/parameters/callback" - }, - { - "$ref": "#/components/parameters/quotaUser" - }, - { - "$ref": "#/components/parameters/fields" - }, - { - "$ref": "#/components/parameters/upload_protocol" - }, - { - "$ref": "#/components/parameters/uploadType" - }, - { - "$ref": "#/components/parameters/key" - }, - { - "$ref": "#/components/parameters/oauth_token" - }, - { - "$ref": "#/components/parameters/alt" - }, - { - "$ref": "#/components/parameters/prettyPrint" - }, - { - "$ref": "#/components/parameters/access_token" - } - ] - }, - "/v1beta3/{+model}:batchEmbedText": { - "post": { - "description": "Generates multiple embeddings from the model given input text in a synchronous call.", - "operationId": "generativelanguage.models.batchEmbedText", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/BatchEmbedTextResponse" - } - } - } - } - }, - "parameters": [ - { - "name": "model", - "in": "path", - "description": "Required. The name of the `Model` to use for generating the embedding. Examples: models/embedding-gecko-001", - "required": true, - "schema": { - "type": "string" - } - } - ], - "tags": [ - "models" - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/BatchEmbedTextRequest" - } - } - } - } - }, - "parameters": [ - { - "$ref": "#/components/parameters/_.xgafv" - }, - { - "$ref": "#/components/parameters/callback" - }, - { - "$ref": "#/components/parameters/quotaUser" - }, - { - "$ref": "#/components/parameters/fields" - }, - { - "$ref": "#/components/parameters/upload_protocol" - }, - { - "$ref": "#/components/parameters/uploadType" - }, - { - "$ref": "#/components/parameters/key" - }, - { - "$ref": "#/components/parameters/oauth_token" - }, - { - "$ref": "#/components/parameters/alt" - }, - { - "$ref": "#/components/parameters/prettyPrint" - }, - { - "$ref": "#/components/parameters/access_token" - } - ] - }, - "/v1beta3/{+model}:countTextTokens": { - "post": { - "description": "Runs a model's tokenizer on a text and returns the token count.", - "operationId": "generativelanguage.models.countTextTokens", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/CountTextTokensResponse" - } - } - } - } - }, - "parameters": [ - { - "name": "model", - "in": "path", - "description": "Required. The model's resource name. This serves as an ID for the Model to use. This name should match a model name returned by the `ListModels` method. Format: `models/{model}`", - "required": true, - "schema": { - "type": "string" - } - } - ], - "tags": [ - "models" - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CountTextTokensRequest" - } - } - } - } - }, - "parameters": [ - { - "$ref": "#/components/parameters/_.xgafv" - }, - { - "$ref": "#/components/parameters/callback" - }, - { - "$ref": "#/components/parameters/quotaUser" - }, - { - "$ref": "#/components/parameters/fields" - }, - { - "$ref": "#/components/parameters/upload_protocol" - }, - { - "$ref": "#/components/parameters/uploadType" - }, - { - "$ref": "#/components/parameters/key" - }, - { - "$ref": "#/components/parameters/oauth_token" - }, - { - "$ref": "#/components/parameters/alt" - }, - { - "$ref": "#/components/parameters/prettyPrint" - }, - { - "$ref": "#/components/parameters/access_token" - } - ] - }, - "/v1beta3/{+model}:generateMessage": { - "post": { - "description": "Generates a response from the model given an input `MessagePrompt`.", - "operationId": "generativelanguage.models.generateMessage", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/GenerateMessageResponse" - } - } - } - } - }, - "parameters": [ - { - "name": "model", - "in": "path", - "description": "Required. The name of the model to use. Format: `name=models/{model}`.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "tags": [ - "models" - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GenerateMessageRequest" - } - } - } - } - }, - "parameters": [ - { - "$ref": "#/components/parameters/_.xgafv" - }, - { - "$ref": "#/components/parameters/callback" - }, - { - "$ref": "#/components/parameters/quotaUser" - }, - { - "$ref": "#/components/parameters/fields" - }, - { - "$ref": "#/components/parameters/upload_protocol" - }, - { - "$ref": "#/components/parameters/uploadType" - }, - { - "$ref": "#/components/parameters/key" - }, - { - "$ref": "#/components/parameters/oauth_token" - }, - { - "$ref": "#/components/parameters/alt" - }, - { - "$ref": "#/components/parameters/prettyPrint" - }, - { - "$ref": "#/components/parameters/access_token" - } - ] - } - }, - "tags": [ - { - "name": "tunedModels" - }, - { - "name": "models" - } - ], - "externalDocs": { - "url": "https://developers.generativeai.google/api" - }, - "components": { - "schemas": { - "TunedModel": { - "description": "A fine-tuned model created using ModelService.CreateTunedModel.", - "type": "object", - "properties": { - "description": { - "type": "string", - "description": "Optional. A short description of this model." - }, - "topP": { - "description": "Optional. For Nucleus sampling. Nucleus sampling considers the smallest set of tokens whose probability sum is at least `top_p`. This value specifies default to be the one used by the base model while creating the model.", - "format": "float", - "type": "number" - }, - "updateTime": { - "type": "string", - "description": "Output only. The timestamp when this model was updated.", - "readOnly": true, - "format": "google-datetime" - }, - "tuningTask": { - "$ref": "#/components/schemas/TuningTask", - "description": "Required. The tuning task that creates the tuned model." - }, - "tunedModelSource": { - "$ref": "#/components/schemas/TunedModelSource", - "description": "Optional. TunedModel to use as the starting point for training the new model." - }, - "temperature": { - "type": "number", - "format": "float", - "description": "Optional. Controls the randomness of the output. Values can range over `[0.0,1.0]`, inclusive. A value closer to `1.0` will produce responses that are more varied, while a value closer to `0.0` will typically result in less surprising responses from the model. This value specifies default to be the one used by the base model while creating the model." - }, - "createTime": { - "format": "google-datetime", - "description": "Output only. The timestamp when this model was created.", - "type": "string", - "readOnly": true - }, - "baseModel": { - "type": "string", - "description": "Immutable. The name of the `Model` to tune. Example: `models/text-bison-001`" - }, - "name": { - "type": "string", - "description": "Output only. The tuned model name. A unique name will be generated on create. Example: `tunedModels/az2mb0bpw6i` If display_name is set on create, the id portion of the name will be set by concatenating the words of the display_name with hyphens and adding a random portion for uniqueness. Example: display_name = \"Sentence Translator\" name = \"tunedModels/sentence-translator-u3b7m\"", - "readOnly": true - }, - "topK": { - "description": "Optional. For Top-k sampling. Top-k sampling considers the set of `top_k` most probable tokens. This value specifies default to be used by the backend while making the call to the model. This value specifies default to be the one used by the base model while creating the model.", - "format": "int32", - "type": "integer" - }, - "displayName": { - "description": "Optional. The name to display for this model in user interfaces. The display name must be up to 40 characters including spaces.", - "type": "string" - }, - "state": { - "description": "Output only. The state of the tuned model.", - "type": "string", - "enum": [ - "STATE_UNSPECIFIED", - "CREATING", - "ACTIVE", - "FAILED" - ], - "readOnly": true - } - } - }, - "Permission": { - "description": "Permission resource grants user, group or the rest of the world access to the PaLM API resource (e.g. a tuned model, file). A role is a collection of permitted operations that allows users to perform specific actions on PaLM API resources. To make them available to users, groups, or service accounts, you assign roles. When you assign a role, you grant permissions that the role contains. There are three concentric roles. Each role is a superset of the previous role's permitted operations: - reader can use the resource (e.g. tuned model) for inference - writer has reader's permissions and additionally can edit and share - owner has writer's permissions and additionally can delete", - "properties": { - "name": { - "readOnly": true, - "description": "Output only. The permission name. A unique name will be generated on create. Example: tunedModels/{tuned_model}permssions/{permission} Output only.", - "type": "string" - }, - "emailAddress": { - "description": "Optional. Immutable. The email address of the user of group which this permission refers. Field is not set when permission's grantee type is EVERYONE.", - "type": "string" - }, - "granteeType": { - "description": "Required. Immutable. The type of the grantee.", - "type": "string", - "enum": [ - "GRANTEE_TYPE_UNSPECIFIED", - "USER", - "GROUP", - "EVERYONE" - ] - }, - "role": { - "enum": [ - "ROLE_UNSPECIFIED", - "OWNER", - "WRITER", - "READER" - ], - "description": "Required. The role granted by this permission.", - "type": "string" - } - }, - "type": "object" - }, - "EmbedTextResponse": { - "description": "The response to a EmbedTextRequest.", - "properties": { - "embedding": { - "$ref": "#/components/schemas/Embedding", - "description": "Output only. The embedding generated from the input text.", - "readOnly": true - } - }, - "type": "object" - }, - "ListPermissionsResponse": { - "type": "object", - "description": "Response from `ListPermissions` containing a paginated list of permissions.", - "properties": { - "nextPageToken": { - "type": "string", - "description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no more pages." - }, - "permissions": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Permission" - }, - "description": "Returned permissions." - } - } - }, - "Status": { - "properties": { - "code": { - "description": "The status code, which should be an enum value of google.rpc.Code.", - "format": "int32", - "type": "integer" - }, - "message": { - "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", - "type": "string" - }, - "details": { - "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", - "items": { - "type": "object", - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL." - } - }, - "type": "array" - } - }, - "type": "object", - "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors)." - }, - "SafetyFeedback": { - "description": "Safety feedback for an entire request. This field is populated if content in the input and/or response is blocked due to safety settings. SafetyFeedback may not exist for every HarmCategory. Each SafetyFeedback will return the safety settings used by the request as well as the lowest HarmProbability that should be allowed in order to return a result.", - "type": "object", - "properties": { - "rating": { - "description": "Safety rating evaluated from content.", - "$ref": "#/components/schemas/SafetyRating" - }, - "setting": { - "$ref": "#/components/schemas/SafetySetting", - "description": "Safety settings applied to the request." - } - } - }, - "TunedModelSource": { - "description": "Tuned model as a source for training a new model.", - "properties": { - "baseModel": { - "type": "string", - "readOnly": true, - "description": "Output only. The name of the base `Model` this `TunedModel` was tuned from. Example: `models/text-bison-001`" - }, - "tunedModel": { - "type": "string", - "description": "Immutable. The name of the `TunedModel` to use as the starting point for training the new model. Example: `tunedModels/my-tuned-model`" - } - }, - "type": "object" - }, - "Example": { - "description": "An input/output example used to instruct the Model. It demonstrates how the model should respond or format its response.", - "properties": { - "input": { - "description": "Required. An example of an input `Message` from the user.", - "$ref": "#/components/schemas/Message" - }, - "output": { - "$ref": "#/components/schemas/Message", - "description": "Required. An example of what the model should output given the input." - } - }, - "type": "object" - }, - "GenerateTextRequest": { - "description": "Request to generate a text completion response from the model.", - "properties": { - "safetySettings": { - "type": "array", - "items": { - "$ref": "#/components/schemas/SafetySetting" - }, - "description": "A list of unique `SafetySetting` instances for blocking unsafe content. that will be enforced on the `GenerateTextRequest.prompt` and `GenerateTextResponse.candidates`. There should not be more than one setting for each `SafetyCategory` type. The API will block any prompts and responses that fail to meet the thresholds set by these settings. This list overrides the default settings for each `SafetyCategory` specified in the safety_settings. If there is no `SafetySetting` for a given `SafetyCategory` provided in the list, the API will use the default safety setting for that category." - }, - "candidateCount": { - "description": "Optional. Number of generated responses to return. This value must be between [1, 8], inclusive. If unset, this will default to 1.", - "format": "int32", - "type": "integer" - }, - "stopSequences": { - "items": { - "type": "string" - }, - "type": "array", - "description": "The set of character sequences (up to 5) that will stop output generation. If specified, the API will stop at the first appearance of a stop sequence. The stop sequence will not be included as part of the response." - }, - "topK": { - "type": "integer", - "format": "int32", - "description": "Optional. The maximum number of tokens to consider when sampling. The model uses combined Top-k and nucleus sampling. Top-k sampling considers the set of `top_k` most probable tokens. Defaults to 40. Note: The default value varies by model, see the `Model.top_k` attribute of the `Model` returned the `getModel` function." - }, - "prompt": { - "description": "Required. The free-form input text given to the model as a prompt. Given a prompt, the model will generate a TextCompletion response it predicts as the completion of the input text.", - "$ref": "#/components/schemas/TextPrompt" - }, - "maxOutputTokens": { - "type": "integer", - "description": "Optional. The maximum number of tokens to include in a candidate. If unset, this will default to output_token_limit specified in the `Model` specification.", - "format": "int32" - }, - "topP": { - "type": "number", - "description": "Optional. The maximum cumulative probability of tokens to consider when sampling. The model uses combined Top-k and nucleus sampling. Tokens are sorted based on their assigned probabilities so that only the most likely tokens are considered. Top-k sampling directly limits the maximum number of tokens to consider, while Nucleus sampling limits number of tokens based on the cumulative probability. Note: The default value varies by model, see the `Model.top_p` attribute of the `Model` returned the `getModel` function.", - "format": "float" - }, - "temperature": { - "format": "float", - "type": "number", - "description": "Optional. Controls the randomness of the output. Note: The default value varies by model, see the `Model.temperature` attribute of the `Model` returned the `getModel` function. Values can range from [0.0,1.0], inclusive. A value closer to 1.0 will produce responses that are more varied and creative, while a value closer to 0.0 will typically result in more straightforward responses from the model." - } - }, - "type": "object" - }, - "ListModelsResponse": { - "properties": { - "nextPageToken": { - "type": "string", - "description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no more pages." - }, - "models": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Model" - }, - "description": "The returned Models." - } - }, - "type": "object", - "description": "Response from `ListModel` containing a paginated list of Models." - }, - "CitationMetadata": { - "type": "object", - "properties": { - "citationSources": { - "type": "array", - "items": { - "$ref": "#/components/schemas/CitationSource" - }, - "description": "Citations to sources for a specific response." - } - }, - "description": "A collection of source attributions for a piece of content." - }, - "SafetySetting": { - "properties": { - "category": { - "type": "string", - "enum": [ - "HARM_CATEGORY_UNSPECIFIED", - "HARM_CATEGORY_DEROGATORY", - "HARM_CATEGORY_TOXICITY", - "HARM_CATEGORY_VIOLENCE", - "HARM_CATEGORY_SEXUAL", - "HARM_CATEGORY_MEDICAL", - "HARM_CATEGORY_DANGEROUS" - ], - "description": "Required. The category for this setting." - }, - "threshold": { - "description": "Required. Controls the probability threshold at which harm is blocked.", - "type": "string", - "enum": [ - "HARM_BLOCK_THRESHOLD_UNSPECIFIED", - "BLOCK_LOW_AND_ABOVE", - "BLOCK_MEDIUM_AND_ABOVE", - "BLOCK_ONLY_HIGH", - "BLOCK_NONE" - ] - } - }, - "type": "object", - "description": "Safety setting, affecting the safety-blocking behavior. Passing a safety setting for a category changes the allowed proability that content is blocked." - }, - "SafetyRating": { - "type": "object", - "description": "Safety rating for a piece of content. The safety rating contains the category of harm and the harm probability level in that category for a piece of content. Content is classified for safety across a number of harm categories and the probability of the harm classification is included here.", - "properties": { - "category": { - "type": "string", - "description": "Required. The category for this rating.", - "enum": [ - "HARM_CATEGORY_UNSPECIFIED", - "HARM_CATEGORY_DEROGATORY", - "HARM_CATEGORY_TOXICITY", - "HARM_CATEGORY_VIOLENCE", - "HARM_CATEGORY_SEXUAL", - "HARM_CATEGORY_MEDICAL", - "HARM_CATEGORY_DANGEROUS" - ] - }, - "probability": { - "enum": [ - "HARM_PROBABILITY_UNSPECIFIED", - "NEGLIGIBLE", - "LOW", - "MEDIUM", - "HIGH" - ], - "description": "Required. The probability of harm for this content.", - "type": "string" - } - } - }, - "GenerateTextResponse": { - "properties": { - "candidates": { - "description": "Candidate responses from the model.", - "items": { - "$ref": "#/components/schemas/TextCompletion" - }, - "type": "array" - }, - "safetyFeedback": { - "items": { - "$ref": "#/components/schemas/SafetyFeedback" - }, - "type": "array", - "description": "Returns any safety feedback related to content filtering." - }, - "filters": { - "description": "A set of content filtering metadata for the prompt and response text. This indicates which `SafetyCategory`(s) blocked a candidate from this response, the lowest `HarmProbability` that triggered a block, and the HarmThreshold setting for that category. This indicates the smallest change to the `SafetySettings` that would be necessary to unblock at least 1 response. The blocking is configured by the `SafetySettings` in the request (or the default `SafetySettings` of the API).", - "type": "array", - "items": { - "$ref": "#/components/schemas/ContentFilter" - } - } - }, - "description": "The response from the model, including candidate completions.", - "type": "object" - }, - "TextCompletion": { - "description": "Output text returned from a model.", - "properties": { - "citationMetadata": { - "description": "Output only. Citation information for model-generated `output` in this `TextCompletion`. This field may be populated with attribution information for any text included in the `output`.", - "readOnly": true, - "$ref": "#/components/schemas/CitationMetadata" - }, - "output": { - "readOnly": true, - "type": "string", - "description": "Output only. The generated text returned from the model." - }, - "safetyRatings": { - "items": { - "$ref": "#/components/schemas/SafetyRating" - }, - "type": "array", - "description": "Ratings for the safety of a response. There is at most one rating per category." - } - }, - "type": "object" - }, - "CountTextTokensRequest": { - "properties": { - "prompt": { - "description": "Required. The free-form input text given to the model as a prompt.", - "$ref": "#/components/schemas/TextPrompt" - } - }, - "type": "object", - "description": "Counts the number of tokens in the `prompt` sent to a model. Models may tokenize text differently, so each model may return a different `token_count`." - }, - "CountTextTokensResponse": { - "description": "A response from `CountTextTokens`. It returns the model's `token_count` for the `prompt`.", - "type": "object", - "properties": { - "tokenCount": { - "type": "integer", - "description": "The number of tokens that the `model` tokenizes the `prompt` into. Always non-negative.", - "format": "int32" - } - } - }, - "EmbedTextRequest": { - "type": "object", - "description": "Request to get a text embedding from the model.", - "properties": { - "text": { - "type": "string", - "description": "Required. The free-form input text that the model will turn into an embedding." - } - } - }, - "BatchEmbedTextRequest": { - "description": "Batch request to get a text embedding from the model.", - "type": "object", - "properties": { - "texts": { - "description": "Required. The free-form input texts that the model will turn into an embedding. The current limit is 100 texts, over which an error will be thrown.", - "items": { - "type": "string" - }, - "type": "array" - } - } - }, - "TuningSnapshot": { - "description": "Record for a single tuning step.", - "type": "object", - "properties": { - "computeTime": { - "description": "Output only. The timestamp when this metric was computed.", - "type": "string", - "format": "google-datetime", - "readOnly": true - }, - "meanLoss": { - "readOnly": true, - "format": "float", - "description": "Output only. The mean loss of the training examples for this step.", - "type": "number" - }, - "step": { - "type": "integer", - "readOnly": true, - "description": "Output only. The tuning step.", - "format": "int32" - }, - "epoch": { - "description": "Output only. The epoch this step was part of.", - "format": "int32", - "type": "integer", - "readOnly": true - } - } - }, - "Hyperparameters": { - "type": "object", - "description": "Hyperparameters controlling the tuning process.", - "properties": { - "batchSize": { - "description": "Immutable. The batch size hyperparameter for tuning. If not set, a default of 16 or 64 will be used based on the number of training examples.", - "type": "integer", - "format": "int32" - }, - "learningRate": { - "description": "Immutable. The learning rate hyperparameter for tuning. If not set, a default of 0.0002 or 0.002 will be calculated based on the number of training examples.", - "type": "number", - "format": "float" - }, - "epochCount": { - "format": "int32", - "type": "integer", - "description": "Immutable. The number of training epochs. An epoch is one pass through the training data. If not set, a default of 10 will be used." - } - } - }, - "CountMessageTokensResponse": { - "description": "A response from `CountMessageTokens`. It returns the model's `token_count` for the `prompt`.", - "properties": { - "tokenCount": { - "format": "int32", - "type": "integer", - "description": "The number of tokens that the `model` tokenizes the `prompt` into. Always non-negative." - } - }, - "type": "object" - }, - "Embedding": { - "description": "A list of floats representing the embedding.", - "type": "object", - "properties": { - "value": { - "description": "The embedding values.", - "type": "array", - "items": { - "type": "number", - "format": "float" - } - } - } - }, - "TransferOwnershipRequest": { - "properties": { - "emailAddress": { - "description": "Required. The email address of the user to whom the tuned model is being transferred to.", - "type": "string" - } - }, - "type": "object", - "description": "Request to transfer the ownership of the tuned model." - }, - "ContentFilter": { - "description": "Content filtering metadata associated with processing a single request. ContentFilter contains a reason and an optional supporting string. The reason may be unspecified.", - "properties": { - "message": { - "type": "string", - "description": "A string that describes the filtering behavior in more detail." - }, - "reason": { - "enum": [ - "BLOCKED_REASON_UNSPECIFIED", - "SAFETY", - "OTHER" - ], - "description": "The reason content was blocked during request processing.", - "type": "string" - } - }, - "type": "object" - }, - "TextPrompt": { - "properties": { - "text": { - "type": "string", - "description": "Required. The prompt text." - } - }, - "type": "object", - "description": "Text given to the model as a prompt. The Model will use this TextPrompt to Generate a text completion." - }, - "TuningExample": { - "description": "A single example for tuning.", - "type": "object", - "properties": { - "textInput": { - "description": "Optional. Text model input.", - "type": "string" - }, - "output": { - "type": "string", - "description": "Required. The expected model output." - } - } - }, - "TransferOwnershipResponse": { - "properties": {}, - "description": "Response from `TransferOwnership`.", - "type": "object" - }, - "Model": { - "properties": { - "inputTokenLimit": { - "description": "Maximum number of input tokens allowed for this model.", - "format": "int32", - "type": "integer" - }, - "topK": { - "format": "int32", - "description": "For Top-k sampling. Top-k sampling considers the set of `top_k` most probable tokens. This value specifies default to be used by the backend while making the call to the model.", - "type": "integer" - }, - "topP": { - "description": "For Nucleus sampling. Nucleus sampling considers the smallest set of tokens whose probability sum is at least `top_p`. This value specifies default to be used by the backend while making the call to the model.", - "format": "float", - "type": "number" - }, - "name": { - "description": "Required. The resource name of the `Model`. Format: `models/{model}` with a `{model}` naming convention of: * \"{base_model_id}-{version}\" Examples: * `models/chat-bison-001`", - "type": "string" - }, - "outputTokenLimit": { - "description": "Maximum number of output tokens available for this model.", - "type": "integer", - "format": "int32" - }, - "displayName": { - "type": "string", - "description": "The human-readable name of the model. E.g. \"Chat Bison\". The name can be up to 128 characters long and can consist of any UTF-8 characters." - }, - "baseModelId": { - "description": "Required. The name of the base model, pass this to the generation request. Examples: * `chat-bison`", - "type": "string" - }, - "version": { - "type": "string", - "description": "Required. The version number of the model. This represents the major version" - }, - "supportedGenerationMethods": { - "items": { - "type": "string" - }, - "description": "The model's supported generation methods. The method names are defined as Pascal case strings, such as `generateMessage` which correspond to API methods.", - "type": "array" - }, - "description": { - "type": "string", - "description": "A short description of the model." - }, - "temperature": { - "type": "number", - "format": "float", - "description": "Controls the randomness of the output. Values can range over `[0.0,1.0]`, inclusive. A value closer to `1.0` will produce responses that are more varied, while a value closer to `0.0` will typically result in less surprising responses from the model. This value specifies default to be used by the backend while making the call to the model." - } - }, - "description": "Information about a Generative Language Model.", - "type": "object" - }, - "BatchEmbedTextResponse": { - "description": "The response to a EmbedTextRequest.", - "properties": { - "embeddings": { - "description": "Output only. The embeddings generated from the input text.", - "readOnly": true, - "items": { - "$ref": "#/components/schemas/Embedding" - }, - "type": "array" - } - }, - "type": "object" - }, - "CreateTunedModelMetadata": { - "description": "Metadata about the state and progress of creating a tuned model returned from the long-running operation", - "type": "object", - "properties": { - "tunedModel": { - "type": "string", - "description": "Name of the tuned model associated with the tuning operation." - }, - "completedSteps": { - "type": "integer", - "description": "The number of steps completed.", - "format": "int32" - }, - "completedPercent": { - "type": "number", - "format": "float", - "description": "The completed percentage for the tuning operation." - }, - "totalSteps": { - "description": "The total number of tuning steps.", - "format": "int32", - "type": "integer" - }, - "snapshots": { - "type": "array", - "items": { - "$ref": "#/components/schemas/TuningSnapshot" - }, - "description": "Metrics collected during tuning." - } - } - }, - "GenerateMessageRequest": { - "properties": { - "prompt": { - "description": "Required. The structured textual input given to the model as a prompt. Given a prompt, the model will return what it predicts is the next message in the discussion.", - "$ref": "#/components/schemas/MessagePrompt" - }, - "topP": { - "format": "float", - "type": "number", - "description": "Optional. The maximum cumulative probability of tokens to consider when sampling. The model uses combined Top-k and nucleus sampling. Nucleus sampling considers the smallest set of tokens whose probability sum is at least `top_p`." - }, - "candidateCount": { - "format": "int32", - "description": "Optional. The number of generated response messages to return. This value must be between `[1, 8]`, inclusive. If unset, this will default to `1`.", - "type": "integer" - }, - "temperature": { - "format": "float", - "description": "Optional. Controls the randomness of the output. Values can range over `[0.0,1.0]`, inclusive. A value closer to `1.0` will produce responses that are more varied, while a value closer to `0.0` will typically result in less surprising responses from the model.", - "type": "number" - }, - "topK": { - "format": "int32", - "type": "integer", - "description": "Optional. The maximum number of tokens to consider when sampling. The model uses combined Top-k and nucleus sampling. Top-k sampling considers the set of `top_k` most probable tokens." - } - }, - "type": "object", - "description": "Request to generate a message response from the model." - }, - "ListTunedModelsResponse": { - "description": "Response from `ListTunedModels` containing a paginated list of Models.", - "type": "object", - "properties": { - "nextPageToken": { - "description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no more pages.", - "type": "string" - }, - "tunedModels": { - "description": "The returned Models.", - "items": { - "$ref": "#/components/schemas/TunedModel" - }, - "type": "array" - } - } - }, - "CitationSource": { - "type": "object", - "properties": { - "uri": { - "description": "Optional. URI that is attributed as a source for a portion of the text.", - "type": "string" - }, - "license": { - "description": "Optional. License for the GitHub project that is attributed as a source for segment. License info is required for code citations.", - "type": "string" - }, - "endIndex": { - "description": "Optional. End of the attributed segment, exclusive.", - "format": "int32", - "type": "integer" - }, - "startIndex": { - "type": "integer", - "description": "Optional. Start of segment of the response that is attributed to this source. Index indicates the start of the segment, measured in bytes.", - "format": "int32" - } - }, - "description": "A citation to a source for a portion of a specific response." - }, - "Message": { - "description": "The base unit of structured text. A `Message` includes an `author` and the `content` of the `Message`. The `author` is used to tag messages when they are fed to the model as text.", - "properties": { - "citationMetadata": { - "description": "Output only. Citation information for model-generated `content` in this `Message`. If this `Message` was generated as output from the model, this field may be populated with attribution information for any text included in the `content`. This field is used only on output.", - "$ref": "#/components/schemas/CitationMetadata", - "readOnly": true - }, - "content": { - "description": "Required. The text content of the structured `Message`.", - "type": "string" - }, - "author": { - "type": "string", - "description": "Optional. The author of this Message. This serves as a key for tagging the content of this Message when it is fed to the model as text. The author can be any alphanumeric string." - } - }, - "type": "object" - }, - "CountMessageTokensRequest": { - "type": "object", - "description": "Counts the number of tokens in the `prompt` sent to a model. Models may tokenize text differently, so each model may return a different `token_count`.", - "properties": { - "prompt": { - "$ref": "#/components/schemas/MessagePrompt", - "description": "Required. The prompt, whose token count is to be returned." - } - } - }, - "TuningTask": { - "properties": { - "trainingData": { - "description": "Required. Input only. Immutable. The model training data.", - "$ref": "#/components/schemas/Dataset" - }, - "completeTime": { - "type": "string", - "format": "google-datetime", - "readOnly": true, - "description": "Output only. The timestamp when tuning this model completed." - }, - "startTime": { - "format": "google-datetime", - "description": "Output only. The timestamp when tuning this model started.", - "type": "string", - "readOnly": true - }, - "snapshots": { - "type": "array", - "items": { - "$ref": "#/components/schemas/TuningSnapshot" - }, - "description": "Output only. Metrics collected during tuning.", - "readOnly": true - }, - "hyperparameters": { - "description": "Immutable. Hyperparameters controlling the tuning process. If not provided, default values will be used.", - "$ref": "#/components/schemas/Hyperparameters" - } - }, - "description": "Tuning tasks that create tuned models.", - "type": "object" - }, - "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }", - "properties": {}, - "type": "object" - }, - "Dataset": { - "properties": { - "examples": { - "$ref": "#/components/schemas/TuningExamples", - "description": "Optional. Inline examples." - } - }, - "type": "object", - "description": "Dataset for training or validation." - }, - "Operation": { - "description": "This resource represents a long-running operation that is the result of a network API call.", - "type": "object", - "properties": { - "done": { - "type": "boolean", - "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available." - }, - "name": { - "type": "string", - "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`." - }, - "response": { - "description": "The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL." - }, - "type": "object" - }, - "error": { - "description": "The error result of the operation in case of failure or cancellation.", - "$ref": "#/components/schemas/Status" - }, - "metadata": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL." - }, - "type": "object", - "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any." - } - } - }, - "TuningExamples": { - "description": "A set of tuning examples. Can be training or validatation data.", - "type": "object", - "properties": { - "examples": { - "type": "array", - "items": { - "$ref": "#/components/schemas/TuningExample" - }, - "description": "Required. The examples. Example input can be for text or discuss, but all examples in a set must be of the same type." - } - } - }, - "GenerateMessageResponse": { - "description": "The response from the model. This includes candidate messages and conversation history in the form of chronologically-ordered messages.", - "type": "object", - "properties": { - "messages": { - "items": { - "$ref": "#/components/schemas/Message" - }, - "type": "array", - "description": "The conversation history used by the model." - }, - "candidates": { - "items": { - "$ref": "#/components/schemas/Message" - }, - "type": "array", - "description": "Candidate response messages from the model." - }, - "filters": { - "description": "A set of content filtering metadata for the prompt and response text. This indicates which `SafetyCategory`(s) blocked a candidate from this response, the lowest `HarmProbability` that triggered a block, and the HarmThreshold setting for that category.", - "type": "array", - "items": { - "$ref": "#/components/schemas/ContentFilter" - } - } - } - }, - "MessagePrompt": { - "description": "All of the structured input text passed to the model as a prompt. A `MessagePrompt` contains a structured set of fields that provide context for the conversation, examples of user input/model output message pairs that prime the model to respond in different ways, and the conversation history or list of messages representing the alternating turns of the conversation between the user and the model.", - "properties": { - "examples": { - "description": "Optional. Examples of what the model should generate. This includes both user input and the response that the model should emulate. These `examples` are treated identically to conversation messages except that they take precedence over the history in `messages`: If the total input size exceeds the model's `input_token_limit` the input will be truncated. Items will be dropped from `messages` before `examples`.", - "type": "array", - "items": { - "$ref": "#/components/schemas/Example" - } - }, - "messages": { - "items": { - "$ref": "#/components/schemas/Message" - }, - "type": "array", - "description": "Required. A snapshot of the recent conversation history sorted chronologically. Turns alternate between two authors. If the total input size exceeds the model's `input_token_limit` the input will be truncated: The oldest items will be dropped from `messages`." - }, - "context": { - "description": "Optional. Text that should be provided to the model first to ground the response. If not empty, this `context` will be given to the model first before the `examples` and `messages`. When using a `context` be sure to provide it with every request to maintain continuity. This field can be a description of your prompt to the model to help provide context and guide the responses. Examples: \"Translate the phrase from English to French.\" or \"Given a statement, classify the sentiment as happy, sad or neutral.\" Anything included in this field will take precedence over message history if the total input size exceeds the model's `input_token_limit` and the input request is truncated.", - "type": "string" - } - }, - "type": "object" - } - }, - "parameters": { - "callback": { - "name": "callback", - "in": "query", - "description": "JSONP", - "schema": { - "type": "string" - } - }, - "quotaUser": { - "name": "quotaUser", - "in": "query", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "schema": { - "type": "string" - } - }, - "fields": { - "name": "fields", - "in": "query", - "description": "Selector specifying which fields to include in a partial response.", - "schema": { - "type": "string" - } - }, - "upload_protocol": { - "name": "upload_protocol", - "in": "query", - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "schema": { - "type": "string" - } - }, - "uploadType": { - "name": "uploadType", - "in": "query", - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "schema": { - "type": "string" - } - }, - "key": { - "name": "key", - "in": "query", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "schema": { - "type": "string" - } - }, - "oauth_token": { - "name": "oauth_token", - "in": "query", - "description": "OAuth 2.0 token for the current user.", - "schema": { - "type": "string" - } - }, - "alt": { - "name": "alt", - "in": "query", - "description": "Data format for response.", - "schema": { - "type": "string", - "enum": [ - "json", - "media", - "proto" - ], - "default": "json" - } - }, - "prettyPrint": { - "name": "prettyPrint", - "in": "query", - "description": "Returns response with indentations and line breaks.", - "schema": { - "type": "boolean", - "default": true - } - }, - "access_token": { - "name": "access_token", - "in": "query", - "description": "OAuth access token.", - "schema": { - "type": "string" - } - }, - "_.xgafv": { - "name": "$.xgafv", - "in": "query", - "description": "V1 error format.", - "schema": { - "type": "string", - "enum": [ - "1", - "2" - ] - } - } - }, - "requestBodies": { - "Permission": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Permission" - } - } - } - } - } - } -} \ No newline at end of file diff --git a/Sources/palm-v1beta2.json b/Sources/palm-v1beta2.json deleted file mode 100644 index e48a598..0000000 --- a/Sources/palm-v1beta2.json +++ /dev/null @@ -1,928 +0,0 @@ -{ - "openapi": "3.0.0", - "servers": [ - { - "url": "https://generativelanguage.googleapis.com/" - } - ], - "info": { - "title": "Generative Language API", - "description": "The PaLM API allows developers to build generative AI applications using the PaLM model. Large Language Models (LLMs) are a powerful, versatile type of machine learning model that enables computers to comprehend and generate natural language through a series of prompts. The PaLM API is based on Google's next generation LLM, PaLM. It excels at a variety of different tasks like code generation, reasoning, and writing. You can use the PaLM API to build generative AI applications for use cases like content generation, dialogue agents, summarization and classification systems, and more.", - "contact": { - "name": "Google", - "url": "https://google.com" - }, - "version": "v1beta2" - }, - "paths": { - "/v1beta2/{+model}:generateMessage": { - "post": { - "description": "Generates a response from the model given an input `MessagePrompt`.", - "operationId": "generativelanguage.models.generateMessage", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/GenerateMessageResponse" - } - } - } - } - }, - "parameters": [ - { - "name": "model", - "in": "path", - "description": "Required. The name of the model to use. Format: `name=models/{model}`.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "tags": [ - "models" - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GenerateMessageRequest" - } - } - } - } - }, - "parameters": [ - { - "$ref": "#/components/parameters/oauth_token" - }, - { - "$ref": "#/components/parameters/upload_protocol" - }, - { - "$ref": "#/components/parameters/uploadType" - }, - { - "$ref": "#/components/parameters/access_token" - }, - { - "$ref": "#/components/parameters/alt" - }, - { - "$ref": "#/components/parameters/callback" - }, - { - "$ref": "#/components/parameters/key" - }, - { - "$ref": "#/components/parameters/fields" - }, - { - "$ref": "#/components/parameters/prettyPrint" - }, - { - "$ref": "#/components/parameters/_.xgafv" - }, - { - "$ref": "#/components/parameters/quotaUser" - } - ] - }, - "/v1beta2/{+model}:generateText": { - "post": { - "description": "Generates a response from the model given an input message.", - "operationId": "generativelanguage.models.generateText", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/GenerateTextResponse" - } - } - } - } - }, - "parameters": [ - { - "name": "model", - "in": "path", - "description": "Required. The model name to use with the format name=models/{model}.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "tags": [ - "models" - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GenerateTextRequest" - } - } - } - } - }, - "parameters": [ - { - "$ref": "#/components/parameters/oauth_token" - }, - { - "$ref": "#/components/parameters/upload_protocol" - }, - { - "$ref": "#/components/parameters/uploadType" - }, - { - "$ref": "#/components/parameters/access_token" - }, - { - "$ref": "#/components/parameters/alt" - }, - { - "$ref": "#/components/parameters/callback" - }, - { - "$ref": "#/components/parameters/key" - }, - { - "$ref": "#/components/parameters/fields" - }, - { - "$ref": "#/components/parameters/prettyPrint" - }, - { - "$ref": "#/components/parameters/_.xgafv" - }, - { - "$ref": "#/components/parameters/quotaUser" - } - ] - }, - "/v1beta2/{+model}:embedText": { - "post": { - "description": "Generates an embedding from the model given an input message.", - "operationId": "generativelanguage.models.embedText", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/EmbedTextResponse" - } - } - } - } - }, - "parameters": [ - { - "name": "model", - "in": "path", - "description": "Required. The model name to use with the format model=models/{model}.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "tags": [ - "models" - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/EmbedTextRequest" - } - } - } - } - }, - "parameters": [ - { - "$ref": "#/components/parameters/oauth_token" - }, - { - "$ref": "#/components/parameters/upload_protocol" - }, - { - "$ref": "#/components/parameters/uploadType" - }, - { - "$ref": "#/components/parameters/access_token" - }, - { - "$ref": "#/components/parameters/alt" - }, - { - "$ref": "#/components/parameters/callback" - }, - { - "$ref": "#/components/parameters/key" - }, - { - "$ref": "#/components/parameters/fields" - }, - { - "$ref": "#/components/parameters/prettyPrint" - }, - { - "$ref": "#/components/parameters/_.xgafv" - }, - { - "$ref": "#/components/parameters/quotaUser" - } - ] - }, - "/v1beta2/models": { - "get": { - "description": "Lists models available through the API.", - "operationId": "generativelanguage.models.list", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/ListModelsResponse" - } - } - } - } - }, - "parameters": [ - { - "name": "pageSize", - "in": "query", - "description": "The maximum number of `Models` to return (per page). The service may return fewer models. If unspecified, at most 50 models will be returned per page. This method returns at most 1000 models per page, even if you pass a larger page_size.", - "schema": { - "type": "integer" - } - }, - { - "name": "pageToken", - "in": "query", - "description": "A page token, received from a previous `ListModels` call. Provide the `page_token` returned by one request as an argument to the next request to retrieve the next page. When paginating, all other parameters provided to `ListModels` must match the call that provided the page token.", - "schema": { - "type": "string" - } - } - ], - "tags": [ - "models" - ] - }, - "parameters": [ - { - "$ref": "#/components/parameters/oauth_token" - }, - { - "$ref": "#/components/parameters/upload_protocol" - }, - { - "$ref": "#/components/parameters/uploadType" - }, - { - "$ref": "#/components/parameters/access_token" - }, - { - "$ref": "#/components/parameters/alt" - }, - { - "$ref": "#/components/parameters/callback" - }, - { - "$ref": "#/components/parameters/key" - }, - { - "$ref": "#/components/parameters/fields" - }, - { - "$ref": "#/components/parameters/prettyPrint" - }, - { - "$ref": "#/components/parameters/_.xgafv" - }, - { - "$ref": "#/components/parameters/quotaUser" - } - ] - }, - "/v1beta2/{+name}": { - "get": { - "description": "Gets information about a specific Model.", - "operationId": "generativelanguage.models.get", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/Model" - } - } - } - } - }, - "parameters": [ - { - "name": "name", - "in": "path", - "description": "Required. The resource name of the model. This name should match a model name returned by the `ListModels` method. Format: `models/{model}`", - "required": true, - "schema": { - "type": "string" - } - } - ], - "tags": [ - "models" - ] - }, - "parameters": [ - { - "$ref": "#/components/parameters/oauth_token" - }, - { - "$ref": "#/components/parameters/upload_protocol" - }, - { - "$ref": "#/components/parameters/uploadType" - }, - { - "$ref": "#/components/parameters/access_token" - }, - { - "$ref": "#/components/parameters/alt" - }, - { - "$ref": "#/components/parameters/callback" - }, - { - "$ref": "#/components/parameters/key" - }, - { - "$ref": "#/components/parameters/fields" - }, - { - "$ref": "#/components/parameters/prettyPrint" - }, - { - "$ref": "#/components/parameters/_.xgafv" - }, - { - "$ref": "#/components/parameters/quotaUser" - } - ] - }, - "/v1beta2/{+model}:countMessageTokens": { - "post": { - "description": "Runs a model's tokenizer on a string and returns the token count.", - "operationId": "generativelanguage.models.countMessageTokens", - "responses": { - "200": { - "description": "Successful response", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/CountMessageTokensResponse" - } - } - } - } - }, - "parameters": [ - { - "name": "model", - "in": "path", - "description": "Required. The model's resource name. This serves as an ID for the Model to use. This name should match a model name returned by the `ListModels` method. Format: `models/{model}`", - "required": true, - "schema": { - "type": "string" - } - } - ], - "tags": [ - "models" - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CountMessageTokensRequest" - } - } - } - } - }, - "parameters": [ - { - "$ref": "#/components/parameters/oauth_token" - }, - { - "$ref": "#/components/parameters/upload_protocol" - }, - { - "$ref": "#/components/parameters/uploadType" - }, - { - "$ref": "#/components/parameters/access_token" - }, - { - "$ref": "#/components/parameters/alt" - }, - { - "$ref": "#/components/parameters/callback" - }, - { - "$ref": "#/components/parameters/key" - }, - { - "$ref": "#/components/parameters/fields" - }, - { - "$ref": "#/components/parameters/prettyPrint" - }, - { - "$ref": "#/components/parameters/_.xgafv" - }, - { - "$ref": "#/components/parameters/quotaUser" - } - ] - } - }, - "tags": [ - { - "name": "models" - } - ], - "externalDocs": { - "url": "https://developers.generativeai.google/api" - }, - "components": { - "schemas": { - "EmbedTextRequest": { - "type": "object", - "description": "Request to get a text embedding from the model.", - "properties": { - "text": { - "type": "string", - "description": "Required. The free-form input text that the model will turn into an embedding." - } - } - }, - "GenerateTextResponse": { - "properties": { - "candidates": { - "type": "array", - "description": "Candidate responses from the model.", - "items": { - "$ref": "#/components/schemas/TextCompletion" - } - } - }, - "type": "object", - "description": "The response from the model, including candidate completions." - }, - "Embedding": { - "description": "A list of floats representing the embedding.", - "type": "object", - "properties": { - "value": { - "description": "The embedding values.", - "items": { - "type": "number", - "format": "float" - }, - "type": "array" - } - } - }, - "Message": { - "type": "object", - "description": "The base unit of structured text. A `Message` includes an `author` and the `content` of the `Message`. The `author` is used to tag messages when they are fed to the model as text.", - "properties": { - "content": { - "type": "string", - "description": "Required. The text content of the structured `Message`." - }, - "citationMetadata": { - "description": "Output only. Citation information for model-generated `content` in this `Message`. If this `Message` was generated as output from the model, this field may be populated with attribution information for any text included in the `content`. This field is used only on output.", - "$ref": "#/components/schemas/CitationMetadata", - "readOnly": true - }, - "author": { - "type": "string", - "description": "Optional. The author of this Message. This serves as a key for tagging the content of this Message when it is fed to the model as text. The author can be any alphanumeric string." - } - } - }, - "GenerateMessageRequest": { - "properties": { - "topK": { - "description": "Optional. The maximum number of tokens to consider when sampling. The model uses combined Top-k and nucleus sampling. Top-k sampling considers the set of `top_k` most probable tokens.", - "format": "int32", - "type": "integer" - }, - "topP": { - "type": "number", - "format": "float", - "description": "Optional. The maximum cumulative probability of tokens to consider when sampling. The model uses combined Top-k and nucleus sampling. Nucleus sampling considers the smallest set of tokens whose probability sum is at least `top_p`." - }, - "candidateCount": { - "format": "int32", - "description": "Optional. The number of generated response messages to return. This value must be between `[1, 8]`, inclusive. If unset, this will default to `1`.", - "type": "integer" - }, - "prompt": { - "description": "Required. The structured textual input given to the model as a prompt. Given a prompt, the model will return what it predicts is the next message in the discussion.", - "$ref": "#/components/schemas/MessagePrompt" - }, - "temperature": { - "type": "number", - "description": "Optional. Controls the randomness of the output. Values can range over `[0.0,1.0]`, inclusive. A value closer to `1.0` will produce responses that are more varied, while a value closer to `0.0` will typically result in less surprising responses from the model.", - "format": "float" - } - }, - "type": "object", - "description": "Request to generate a message response from the model." - }, - "ListModelsResponse": { - "type": "object", - "description": "Response from `ListModel` containing a paginated list of Models.", - "properties": { - "models": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Model" - }, - "description": "The returned Models." - }, - "nextPageToken": { - "type": "string", - "description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no more pages." - } - } - }, - "EmbedTextResponse": { - "type": "object", - "description": "The response to a EmbedTextRequest.", - "properties": { - "embedding": { - "readOnly": true, - "description": "Output only. The embedding generated from the input text.", - "$ref": "#/components/schemas/Embedding" - } - } - }, - "Model": { - "description": "Information about a Generative Language Model.", - "properties": { - "name": { - "description": "Required. The resource name of the `Model`. Format: `models/{model}` with a `{model}` naming convention of: * \"{base_model_id}-{version}\" Examples: * `models/chat-pison-001`", - "type": "string" - }, - "topP": { - "type": "number", - "description": "For Nucleus sampling. Nucleus sampling considers the smallest set of tokens whose probability sum is at least `top_p`. This value specifies default to be used by the backend while making the call to the model.", - "format": "float" - }, - "description": { - "description": "A short description of the model.", - "type": "string" - }, - "outputTokenLimit": { - "type": "integer", - "format": "int32", - "description": "Maximum number of output tokens available for this model." - }, - "version": { - "description": "Required. The version number of the model. This represents the major version", - "type": "string" - }, - "displayName": { - "type": "string", - "description": "The human-readable name of the model. E.g. \"Chat Bison\". The name can be up to 128 characters long and can consist of any UTF-8 characters." - }, - "temperature": { - "format": "float", - "description": "Controls the randomness of the output. Values can range over `[0.0,1.0]`, inclusive. A value closer to `1.0` will produce responses that are more varied, while a value closer to `0.0` will typically result in less surprising responses from the model. This value specifies default to be used by the backend while making the call to the model.", - "type": "number" - }, - "baseModelId": { - "description": "Required. The name of the base model, pass this to the generation request. Examples: * `chat-bison`", - "type": "string" - }, - "supportedGenerationMethods": { - "description": "The model's supported generation methods. The method names are defined as Pascal case strings, such as `generateMessage` which correspond to API methods.", - "items": { - "type": "string" - }, - "type": "array" - }, - "topK": { - "description": "For Top-k sampling. Top-k sampling considers the set of `top_k` most probable tokens. This value specifies default to be used by the backend while making the call to the model.", - "format": "int32", - "type": "integer" - }, - "inputTokenLimit": { - "description": "Maximum number of input tokens allowed for this model.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "CountMessageTokensResponse": { - "type": "object", - "description": "A response from `CountMessageTokens`. It returns the model's `token_count` for the `prompt`.", - "properties": { - "tokenCount": { - "type": "integer", - "format": "int32", - "description": "The number of tokens that the `model` tokenizes the `prompt` into. Always non-negative." - } - } - }, - "CitationMetadata": { - "description": "A collection of source attributions for a piece of content.", - "type": "object", - "properties": { - "citationSources": { - "type": "array", - "description": "Citations to sources for a specific response.", - "items": { - "$ref": "#/components/schemas/CitationSource" - } - } - } - }, - "CitationSource": { - "properties": { - "startIndex": { - "type": "integer", - "format": "int32", - "description": "Optional. Start of segment of the response that is attributed to this source. Index indicates the start of the segment, measured in bytes." - }, - "uri": { - "type": "string", - "description": "Optional. URI that is attributed as a source for a portion of the text." - }, - "license": { - "description": "Optional. License for the GitHub project that is attributed as a source for segment. License info is required for code citations.", - "type": "string" - }, - "endIndex": { - "format": "int32", - "type": "integer", - "description": "Optional. End of the attributed segment, exclusive." - } - }, - "type": "object", - "description": "A citation to a source for a portion of a specific response." - }, - "GenerateMessageResponse": { - "description": "The response from the model. This includes candidate messages and conversation history in the form of chronologically-ordered messages.", - "type": "object", - "properties": { - "candidates": { - "description": "Candidate response messages from the model.", - "type": "array", - "items": { - "$ref": "#/components/schemas/Message" - } - }, - "messages": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Message" - }, - "description": "The conversation history used by the model." - } - } - }, - "GenerateTextRequest": { - "type": "object", - "properties": { - "topP": { - "description": "The maximum cumulative probability of tokens to consider when sampling. The model uses combined Top-k and nucleus sampling. Tokens are sorted based on their assigned probabilities so that only the most liekly tokens are considered. Top-k sampling directly limits the maximum number of tokens to consider, while Nucleus sampling limits number of tokens based on the cumulative probability. Note: The default value varies by model, see the `Model.top_p` attribute of the `Model` returned the `getModel` function.", - "type": "number", - "format": "float" - }, - "stopSequences": { - "items": { - "type": "string" - }, - "description": "The set of character sequences (up to 5) that will stop output generation. If specified, the API will stop at the first appearance of a stop sequence. The stop sequence will not be included as part of the response.", - "type": "array" - }, - "topK": { - "description": "The maximum number of tokens to consider when sampling. The model uses combined Top-k and nucleus sampling. Top-k sampling considers the set of `top_k` most probable tokens. Defaults to 40. Note: The default value varies by model, see the `Model.top_k` attribute of the `Model` returned the `getModel` function.", - "type": "integer", - "format": "int32" - }, - "temperature": { - "format": "float", - "description": "Controls the randomness of the output. Note: The default value varies by model, see the `Model.temperature` attribute of the `Model` returned the `getModel` function. Values can range from [0.0,1.0], inclusive. A value closer to 1.0 will produce responses that are more varied and creative, while a value closer to 0.0 will typically result in more straightforward responses from the model.", - "type": "number" - }, - "candidateCount": { - "description": "Number of generated responses to return. This value must be between [1, 8], inclusive. If unset, this will default to 1.", - "type": "integer", - "format": "int32" - }, - "prompt": { - "description": "Required. The free-form input text given to the model as a prompt. Given a prompt, the model will generate a TextCompletion response it predicts as the completion of the input text.", - "$ref": "#/components/schemas/TextPrompt" - }, - "maxOutputTokens": { - "type": "integer", - "description": "The maximum number of tokens to include in a candidate. If unset, this will default to 64.", - "format": "int32" - } - }, - "description": "Request to generate a text completion response from the model." - }, - "MessagePrompt": { - "type": "object", - "properties": { - "context": { - "description": "Optional. Text that should be provided to the model first to ground the response. If not empty, this `context` will be given to the model first before the `examples` and `messages`. When using a `context` be sure to provide it with every request to maintain continuity. This field can be a description of your prompt to the model to help provide context and guide the responses. Examples: \"Translate the phrase from English to French.\" or \"Given a statement, classify the sentiment as happy, sad or neutral.\" Anything included in this field will take precedence over message history if the total input size exceeds the model's `input_token_limit` and the input request is truncated.", - "type": "string" - }, - "examples": { - "description": "Optional. Examples of what the model should generate. This includes both user input and the response that the model should emulate. These `examples` are treated identically to conversation messages except that they take precedence over the history in `messages`: If the total input size exceeds the model's `input_token_limit` the input will be truncated. Items will be dropped from `messages` before `examples`.", - "items": { - "$ref": "#/components/schemas/Example" - }, - "type": "array" - }, - "messages": { - "items": { - "$ref": "#/components/schemas/Message" - }, - "description": "Required. A snapshot of the recent conversation history sorted chronologically. Turns alternate between two authors. If the total input size exceeds the model's `input_token_limit` the input will be truncated: The oldest items will be dropped from `messages`.", - "type": "array" - } - }, - "description": "All of the structured input text passed to the model as a prompt. A `MessagePrompt` contains a structured set of fields that provide context for the conversation, examples of user input/model output message pairs that prime the model to respond in different ways, and the conversation history or list of messages representing the alternating turns of the conversation between the user and the model." - }, - "CountMessageTokensRequest": { - "properties": { - "prompt": { - "$ref": "#/components/schemas/MessagePrompt", - "description": "Required. The prompt, whose token count is to be returned." - } - }, - "type": "object", - "description": "Counts the number of tokens in the `prompt` sent to a model. Models may tokenize text differently, so each model may return a different `token_count`." - }, - "Example": { - "description": "An input/output example used to instruct the Model. It demonstrates how the model should respond or format its response.", - "type": "object", - "properties": { - "input": { - "description": "An example of an input `Message` from the user.", - "$ref": "#/components/schemas/Message" - }, - "output": { - "description": "An example of what the model should output given the input.", - "$ref": "#/components/schemas/Message" - } - } - }, - "TextCompletion": { - "type": "object", - "description": "Output text returned from a model.", - "properties": { - "output": { - "readOnly": true, - "type": "string", - "description": "Output only. The generated text returned from the model." - } - } - }, - "TextPrompt": { - "description": "Text given to the model as a prompt. The Model will use this TextPrompt to Generate a text completion.", - "type": "object", - "properties": { - "text": { - "type": "string", - "description": "Required. The prompt text." - } - } - } - }, - "parameters": { - "oauth_token": { - "name": "oauth_token", - "in": "query", - "description": "OAuth 2.0 token for the current user.", - "schema": { - "type": "string" - } - }, - "upload_protocol": { - "name": "upload_protocol", - "in": "query", - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "schema": { - "type": "string" - } - }, - "uploadType": { - "name": "uploadType", - "in": "query", - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "schema": { - "type": "string" - } - }, - "access_token": { - "name": "access_token", - "in": "query", - "description": "OAuth access token.", - "schema": { - "type": "string" - } - }, - "alt": { - "name": "alt", - "in": "query", - "description": "Data format for response.", - "schema": { - "type": "string", - "enum": [ - "json", - "media", - "proto" - ], - "default": "json" - } - }, - "callback": { - "name": "callback", - "in": "query", - "description": "JSONP", - "schema": { - "type": "string" - } - }, - "key": { - "name": "key", - "in": "query", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "schema": { - "type": "string" - } - }, - "fields": { - "name": "fields", - "in": "query", - "description": "Selector specifying which fields to include in a partial response.", - "schema": { - "type": "string" - } - }, - "prettyPrint": { - "name": "prettyPrint", - "in": "query", - "description": "Returns response with indentations and line breaks.", - "schema": { - "type": "boolean", - "default": true - } - }, - "quotaUser": { - "name": "quotaUser", - "in": "query", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "schema": { - "type": "string" - } - }, - "_.xgafv": { - "name": "$.xgafv", - "in": "query", - "description": "V1 error format.", - "schema": { - "type": "string", - "enum": [ - "1", - "2" - ] - } - } - } - } -} \ No newline at end of file diff --git a/Sources/palm-v1beta3.json b/Sources/palm-v1beta3.json deleted file mode 100644 index b1f79d8..0000000 --- a/Sources/palm-v1beta3.json +++ /dev/null @@ -1,1643 +0,0 @@ -{ - "ownerDomain": "google.com", - "icons": { - "x32": "http://www.google.com/images/icons/product/search-32.gif", - "x16": "http://www.google.com/images/icons/product/search-16.gif" - }, - "description": "The PaLM API allows developers to build generative AI applications using the PaLM model. Large Language Models (LLMs) are a powerful, versatile type of machine learning model that enables computers to comprehend and generate natural language through a series of prompts. The PaLM API is based on Google's next generation LLM, PaLM. It excels at a variety of different tasks like code generation, reasoning, and writing. You can use the PaLM API to build generative AI applications for use cases like content generation, dialogue agents, summarization and classification systems, and more.", - "baseUrl": "https://generativelanguage.googleapis.com/", - "rootUrl": "https://generativelanguage.googleapis.com/", - "ownerName": "Google", - "protocol": "rest", - "version": "v1beta3", - "mtlsRootUrl": "https://generativelanguage.mtls.googleapis.com/", - "schemas": { - "CitationSource": { - "type": "object", - "properties": { - "endIndex": { - "description": "Optional. End of the attributed segment, exclusive.", - "format": "int32", - "type": "integer" - }, - "license": { - "type": "string", - "description": "Optional. License for the GitHub project that is attributed as a source for segment. License info is required for code citations." - }, - "startIndex": { - "description": "Optional. Start of segment of the response that is attributed to this source. Index indicates the start of the segment, measured in bytes.", - "type": "integer", - "format": "int32" - }, - "uri": { - "type": "string", - "description": "Optional. URI that is attributed as a source for a portion of the text." - } - }, - "description": "A citation to a source for a portion of a specific response.", - "id": "CitationSource" - }, - "GenerateMessageResponse": { - "type": "object", - "id": "GenerateMessageResponse", - "description": "The response from the model. This includes candidate messages and conversation history in the form of chronologically-ordered messages.", - "properties": { - "messages": { - "description": "The conversation history used by the model.", - "type": "array", - "items": { - "$ref": "Message" - } - }, - "filters": { - "type": "array", - "description": "A set of content filtering metadata for the prompt and response text. This indicates which `SafetyCategory`(s) blocked a candidate from this response, the lowest `HarmProbability` that triggered a block, and the HarmThreshold setting for that category.", - "items": { - "$ref": "ContentFilter" - } - }, - "candidates": { - "items": { - "$ref": "Message" - }, - "description": "Candidate response messages from the model.", - "type": "array" - } - } - }, - "CreateTunedModelMetadata": { - "type": "object", - "id": "CreateTunedModelMetadata", - "properties": { - "totalSteps": { - "format": "int32", - "type": "integer", - "description": "The total number of tuning steps." - }, - "snapshots": { - "type": "array", - "description": "Metrics collected during tuning.", - "items": { - "$ref": "TuningSnapshot" - } - }, - "completedPercent": { - "description": "The completed percentage for the tuning operation.", - "type": "number", - "format": "float" - }, - "completedSteps": { - "type": "integer", - "description": "The number of steps completed.", - "format": "int32" - }, - "tunedModel": { - "description": "Name of the tuned model associated with the tuning operation.", - "type": "string" - } - }, - "description": "Metadata about the state and progress of creating a tuned model returned from the long-running operation" - }, - "CountMessageTokensResponse": { - "description": "A response from `CountMessageTokens`. It returns the model's `token_count` for the `prompt`.", - "type": "object", - "id": "CountMessageTokensResponse", - "properties": { - "tokenCount": { - "type": "integer", - "description": "The number of tokens that the `model` tokenizes the `prompt` into. Always non-negative.", - "format": "int32" - } - } - }, - "Operation": { - "properties": { - "error": { - "description": "The error result of the operation in case of failure or cancellation.", - "$ref": "Status" - }, - "name": { - "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", - "type": "string" - }, - "response": { - "type": "object", - "description": "The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", - "additionalProperties": { - "type": "any", - "description": "Properties of the object. Contains field @type with type URL." - } - }, - "done": { - "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", - "type": "boolean" - }, - "metadata": { - "additionalProperties": { - "type": "any", - "description": "Properties of the object. Contains field @type with type URL." - }, - "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", - "type": "object" - } - }, - "id": "Operation", - "type": "object", - "description": "This resource represents a long-running operation that is the result of a network API call." - }, - "CountMessageTokensRequest": { - "properties": { - "prompt": { - "description": "Required. The prompt, whose token count is to be returned.", - "$ref": "MessagePrompt" - } - }, - "id": "CountMessageTokensRequest", - "type": "object", - "description": "Counts the number of tokens in the `prompt` sent to a model. Models may tokenize text differently, so each model may return a different `token_count`." - }, - "SafetyFeedback": { - "properties": { - "setting": { - "$ref": "SafetySetting", - "description": "Safety settings applied to the request." - }, - "rating": { - "$ref": "SafetyRating", - "description": "Safety rating evaluated from content." - } - }, - "description": "Safety feedback for an entire request. This field is populated if content in the input and/or response is blocked due to safety settings. SafetyFeedback may not exist for every HarmCategory. Each SafetyFeedback will return the safety settings used by the request as well as the lowest HarmProbability that should be allowed in order to return a result.", - "id": "SafetyFeedback", - "type": "object" - }, - "SafetySetting": { - "description": "Safety setting, affecting the safety-blocking behavior. Passing a safety setting for a category changes the allowed proability that content is blocked.", - "id": "SafetySetting", - "type": "object", - "properties": { - "threshold": { - "enum": [ - "HARM_BLOCK_THRESHOLD_UNSPECIFIED", - "BLOCK_LOW_AND_ABOVE", - "BLOCK_MEDIUM_AND_ABOVE", - "BLOCK_ONLY_HIGH", - "BLOCK_NONE" - ], - "description": "Required. Controls the probability threshold at which harm is blocked.", - "enumDescriptions": [ - "Threshold is unspecified.", - "Content with NEGLIGIBLE will be allowed.", - "Content with NEGLIGIBLE and LOW will be allowed.", - "Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed.", - "All content will be allowed." - ], - "type": "string" - }, - "category": { - "description": "Required. The category for this setting.", - "type": "string", - "enumDescriptions": [ - "Category is unspecified.", - "Negative or harmful comments targeting identity and/or protected attribute.", - "Content that is rude, disrepspectful, or profane.", - "Describes scenarios depictng violence against an individual or group, or general descriptions of gore.", - "Contains references to sexual acts or other lewd content.", - "Promotes unchecked medical advice.", - "Dangerous content that promotes, facilitates, or encourages harmful acts." - ], - "enum": [ - "HARM_CATEGORY_UNSPECIFIED", - "HARM_CATEGORY_DEROGATORY", - "HARM_CATEGORY_TOXICITY", - "HARM_CATEGORY_VIOLENCE", - "HARM_CATEGORY_SEXUAL", - "HARM_CATEGORY_MEDICAL", - "HARM_CATEGORY_DANGEROUS" - ] - } - } - }, - "Status": { - "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", - "type": "object", - "properties": { - "code": { - "type": "integer", - "format": "int32", - "description": "The status code, which should be an enum value of google.rpc.Code." - }, - "details": { - "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", - "type": "array", - "items": { - "additionalProperties": { - "type": "any", - "description": "Properties of the object. Contains field @type with type URL." - }, - "type": "object" - } - }, - "message": { - "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", - "type": "string" - } - }, - "id": "Status" - }, - "TuningExamples": { - "properties": { - "examples": { - "type": "array", - "items": { - "$ref": "TuningExample" - }, - "description": "Required. The examples. Example input can be for text or discuss, but all examples in a set must be of the same type." - } - }, - "description": "A set of tuning examples. Can be training or validatation data.", - "type": "object", - "id": "TuningExamples" - }, - "ListTunedModelsResponse": { - "description": "Response from `ListTunedModels` containing a paginated list of Models.", - "properties": { - "tunedModels": { - "description": "The returned Models.", - "items": { - "$ref": "TunedModel" - }, - "type": "array" - }, - "nextPageToken": { - "description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no more pages.", - "type": "string" - } - }, - "type": "object", - "id": "ListTunedModelsResponse" - }, - "CitationMetadata": { - "properties": { - "citationSources": { - "items": { - "$ref": "CitationSource" - }, - "type": "array", - "description": "Citations to sources for a specific response." - } - }, - "description": "A collection of source attributions for a piece of content.", - "id": "CitationMetadata", - "type": "object" - }, - "Model": { - "properties": { - "temperature": { - "type": "number", - "description": "Controls the randomness of the output. Values can range over `[0.0,1.0]`, inclusive. A value closer to `1.0` will produce responses that are more varied, while a value closer to `0.0` will typically result in less surprising responses from the model. This value specifies default to be used by the backend while making the call to the model.", - "format": "float" - }, - "displayName": { - "type": "string", - "description": "The human-readable name of the model. E.g. \"Chat Bison\". The name can be up to 128 characters long and can consist of any UTF-8 characters." - }, - "outputTokenLimit": { - "description": "Maximum number of output tokens available for this model.", - "type": "integer", - "format": "int32" - }, - "baseModelId": { - "type": "string", - "description": "Required. The name of the base model, pass this to the generation request. Examples: * `chat-bison`" - }, - "name": { - "description": "Required. The resource name of the `Model`. Format: `models/{model}` with a `{model}` naming convention of: * \"{base_model_id}-{version}\" Examples: * `models/chat-bison-001`", - "type": "string" - }, - "inputTokenLimit": { - "format": "int32", - "description": "Maximum number of input tokens allowed for this model.", - "type": "integer" - }, - "description": { - "type": "string", - "description": "A short description of the model." - }, - "version": { - "type": "string", - "description": "Required. The version number of the model. This represents the major version" - }, - "topK": { - "description": "For Top-k sampling. Top-k sampling considers the set of `top_k` most probable tokens. This value specifies default to be used by the backend while making the call to the model.", - "type": "integer", - "format": "int32" - }, - "topP": { - "type": "number", - "description": "For Nucleus sampling. Nucleus sampling considers the smallest set of tokens whose probability sum is at least `top_p`. This value specifies default to be used by the backend while making the call to the model.", - "format": "float" - }, - "supportedGenerationMethods": { - "description": "The model's supported generation methods. The method names are defined as Pascal case strings, such as `generateMessage` which correspond to API methods.", - "type": "array", - "items": { - "type": "string" - } - } - }, - "id": "Model", - "type": "object", - "description": "Information about a Generative Language Model." - }, - "TransferOwnershipRequest": { - "description": "Request to transfer the ownership of the tuned model.", - "type": "object", - "id": "TransferOwnershipRequest", - "properties": { - "emailAddress": { - "description": "Required. The email address of the user to whom the tuned model is being transferred to.", - "type": "string" - } - } - }, - "Permission": { - "properties": { - "name": { - "type": "string", - "description": "Output only. The permission name. A unique name will be generated on create. Example: tunedModels/{tuned_model}permssions/{permission} Output only.", - "readOnly": true - }, - "granteeType": { - "enum": [ - "GRANTEE_TYPE_UNSPECIFIED", - "USER", - "GROUP", - "EVERYONE" - ], - "description": "Required. Immutable. The type of the grantee.", - "enumDescriptions": [ - "The default value. This value is unused.", - "Represents a user. When set, you must provide email_address for the user.", - "Represents a group. When set, you must provide email_address for the group.", - "Represents access to everyone. No extra information is required." - ], - "type": "string" - }, - "emailAddress": { - "type": "string", - "description": "Optional. Immutable. The email address of the user of group which this permission refers. Field is not set when permission's grantee type is EVERYONE." - }, - "role": { - "type": "string", - "enumDescriptions": [ - "The default value. This value is unused.", - "Owner can use, update, share and delete the resource.", - "Writer can use, update and share the resource.", - "Reader can use the resource." - ], - "description": "Required. The role granted by this permission.", - "enum": [ - "ROLE_UNSPECIFIED", - "OWNER", - "WRITER", - "READER" - ] - } - }, - "type": "object", - "id": "Permission", - "description": "Permission resource grants user, group or the rest of the world access to the PaLM API resource (e.g. a tuned model, file). A role is a collection of permitted operations that allows users to perform specific actions on PaLM API resources. To make them available to users, groups, or service accounts, you assign roles. When you assign a role, you grant permissions that the role contains. There are three concentric roles. Each role is a superset of the previous role's permitted operations: - reader can use the resource (e.g. tuned model) for inference - writer has reader's permissions and additionally can edit and share - owner has writer's permissions and additionally can delete" - }, - "ListModelsResponse": { - "properties": { - "models": { - "items": { - "$ref": "Model" - }, - "description": "The returned Models.", - "type": "array" - }, - "nextPageToken": { - "description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no more pages.", - "type": "string" - } - }, - "id": "ListModelsResponse", - "type": "object", - "description": "Response from `ListModel` containing a paginated list of Models." - }, - "CountTextTokensResponse": { - "type": "object", - "properties": { - "tokenCount": { - "description": "The number of tokens that the `model` tokenizes the `prompt` into. Always non-negative.", - "format": "int32", - "type": "integer" - } - }, - "description": "A response from `CountTextTokens`. It returns the model's `token_count` for the `prompt`.", - "id": "CountTextTokensResponse" - }, - "GenerateTextRequest": { - "description": "Request to generate a text completion response from the model.", - "properties": { - "prompt": { - "$ref": "TextPrompt", - "description": "Required. The free-form input text given to the model as a prompt. Given a prompt, the model will generate a TextCompletion response it predicts as the completion of the input text." - }, - "topP": { - "format": "float", - "description": "Optional. The maximum cumulative probability of tokens to consider when sampling. The model uses combined Top-k and nucleus sampling. Tokens are sorted based on their assigned probabilities so that only the most likely tokens are considered. Top-k sampling directly limits the maximum number of tokens to consider, while Nucleus sampling limits number of tokens based on the cumulative probability. Note: The default value varies by model, see the `Model.top_p` attribute of the `Model` returned the `getModel` function.", - "type": "number" - }, - "maxOutputTokens": { - "type": "integer", - "description": "Optional. The maximum number of tokens to include in a candidate. If unset, this will default to output_token_limit specified in the `Model` specification.", - "format": "int32" - }, - "safetySettings": { - "description": "A list of unique `SafetySetting` instances for blocking unsafe content. that will be enforced on the `GenerateTextRequest.prompt` and `GenerateTextResponse.candidates`. There should not be more than one setting for each `SafetyCategory` type. The API will block any prompts and responses that fail to meet the thresholds set by these settings. This list overrides the default settings for each `SafetyCategory` specified in the safety_settings. If there is no `SafetySetting` for a given `SafetyCategory` provided in the list, the API will use the default safety setting for that category.", - "items": { - "$ref": "SafetySetting" - }, - "type": "array" - }, - "temperature": { - "type": "number", - "format": "float", - "description": "Optional. Controls the randomness of the output. Note: The default value varies by model, see the `Model.temperature` attribute of the `Model` returned the `getModel` function. Values can range from [0.0,1.0], inclusive. A value closer to 1.0 will produce responses that are more varied and creative, while a value closer to 0.0 will typically result in more straightforward responses from the model." - }, - "topK": { - "description": "Optional. The maximum number of tokens to consider when sampling. The model uses combined Top-k and nucleus sampling. Top-k sampling considers the set of `top_k` most probable tokens. Defaults to 40. Note: The default value varies by model, see the `Model.top_k` attribute of the `Model` returned the `getModel` function.", - "type": "integer", - "format": "int32" - }, - "stopSequences": { - "items": { - "type": "string" - }, - "type": "array", - "description": "The set of character sequences (up to 5) that will stop output generation. If specified, the API will stop at the first appearance of a stop sequence. The stop sequence will not be included as part of the response." - }, - "candidateCount": { - "type": "integer", - "description": "Optional. Number of generated responses to return. This value must be between [1, 8], inclusive. If unset, this will default to 1.", - "format": "int32" - } - }, - "id": "GenerateTextRequest", - "type": "object" - }, - "Dataset": { - "description": "Dataset for training or validation.", - "type": "object", - "id": "Dataset", - "properties": { - "examples": { - "description": "Optional. Inline examples.", - "$ref": "TuningExamples" - } - } - }, - "TextPrompt": { - "description": "Text given to the model as a prompt. The Model will use this TextPrompt to Generate a text completion.", - "id": "TextPrompt", - "properties": { - "text": { - "type": "string", - "description": "Required. The prompt text." - } - }, - "type": "object" - }, - "Example": { - "description": "An input/output example used to instruct the Model. It demonstrates how the model should respond or format its response.", - "type": "object", - "id": "Example", - "properties": { - "output": { - "description": "Required. An example of what the model should output given the input.", - "$ref": "Message" - }, - "input": { - "$ref": "Message", - "description": "Required. An example of an input `Message` from the user." - } - } - }, - "TuningTask": { - "properties": { - "startTime": { - "type": "string", - "format": "google-datetime", - "description": "Output only. The timestamp when tuning this model started.", - "readOnly": true - }, - "completeTime": { - "description": "Output only. The timestamp when tuning this model completed.", - "type": "string", - "format": "google-datetime", - "readOnly": true - }, - "snapshots": { - "type": "array", - "readOnly": true, - "items": { - "$ref": "TuningSnapshot" - }, - "description": "Output only. Metrics collected during tuning." - }, - "trainingData": { - "description": "Required. Input only. Immutable. The model training data.", - "$ref": "Dataset" - }, - "hyperparameters": { - "$ref": "Hyperparameters", - "description": "Immutable. Hyperparameters controlling the tuning process. If not provided, default values will be used." - } - }, - "id": "TuningTask", - "description": "Tuning tasks that create tuned models.", - "type": "object" - }, - "TuningExample": { - "id": "TuningExample", - "properties": { - "output": { - "description": "Required. The expected model output.", - "type": "string" - }, - "textInput": { - "description": "Optional. Text model input.", - "type": "string" - } - }, - "description": "A single example for tuning.", - "type": "object" - }, - "EmbedTextResponse": { - "type": "object", - "description": "The response to a EmbedTextRequest.", - "id": "EmbedTextResponse", - "properties": { - "embedding": { - "$ref": "Embedding", - "readOnly": true, - "description": "Output only. The embedding generated from the input text." - } - } - }, - "TuningSnapshot": { - "properties": { - "epoch": { - "type": "integer", - "description": "Output only. The epoch this step was part of.", - "format": "int32", - "readOnly": true - }, - "step": { - "readOnly": true, - "type": "integer", - "format": "int32", - "description": "Output only. The tuning step." - }, - "meanLoss": { - "format": "float", - "type": "number", - "description": "Output only. The mean loss of the training examples for this step.", - "readOnly": true - }, - "computeTime": { - "readOnly": true, - "description": "Output only. The timestamp when this metric was computed.", - "type": "string", - "format": "google-datetime" - } - }, - "id": "TuningSnapshot", - "description": "Record for a single tuning step.", - "type": "object" - }, - "Embedding": { - "properties": { - "value": { - "type": "array", - "description": "The embedding values.", - "items": { - "format": "float", - "type": "number" - } - } - }, - "description": "A list of floats representing the embedding.", - "type": "object", - "id": "Embedding" - }, - "ListPermissionsResponse": { - "id": "ListPermissionsResponse", - "properties": { - "permissions": { - "items": { - "$ref": "Permission" - }, - "description": "Returned permissions.", - "type": "array" - }, - "nextPageToken": { - "description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no more pages.", - "type": "string" - } - }, - "description": "Response from `ListPermissions` containing a paginated list of permissions.", - "type": "object" - }, - "BatchEmbedTextResponse": { - "properties": { - "embeddings": { - "description": "Output only. The embeddings generated from the input text.", - "type": "array", - "readOnly": true, - "items": { - "$ref": "Embedding" - } - } - }, - "id": "BatchEmbedTextResponse", - "type": "object", - "description": "The response to a EmbedTextRequest." - }, - "TransferOwnershipResponse": { - "type": "object", - "id": "TransferOwnershipResponse", - "description": "Response from `TransferOwnership`.", - "properties": {} - }, - "Message": { - "description": "The base unit of structured text. A `Message` includes an `author` and the `content` of the `Message`. The `author` is used to tag messages when they are fed to the model as text.", - "type": "object", - "id": "Message", - "properties": { - "citationMetadata": { - "$ref": "CitationMetadata", - "description": "Output only. Citation information for model-generated `content` in this `Message`. If this `Message` was generated as output from the model, this field may be populated with attribution information for any text included in the `content`. This field is used only on output.", - "readOnly": true - }, - "author": { - "description": "Optional. The author of this Message. This serves as a key for tagging the content of this Message when it is fed to the model as text. The author can be any alphanumeric string.", - "type": "string" - }, - "content": { - "type": "string", - "description": "Required. The text content of the structured `Message`." - } - } - }, - "Empty": { - "type": "object", - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }", - "properties": {}, - "id": "Empty" - }, - "EmbedTextRequest": { - "type": "object", - "id": "EmbedTextRequest", - "properties": { - "text": { - "description": "Required. The free-form input text that the model will turn into an embedding.", - "type": "string" - } - }, - "description": "Request to get a text embedding from the model." - }, - "GenerateMessageRequest": { - "properties": { - "topP": { - "format": "float", - "description": "Optional. The maximum cumulative probability of tokens to consider when sampling. The model uses combined Top-k and nucleus sampling. Nucleus sampling considers the smallest set of tokens whose probability sum is at least `top_p`.", - "type": "number" - }, - "topK": { - "description": "Optional. The maximum number of tokens to consider when sampling. The model uses combined Top-k and nucleus sampling. Top-k sampling considers the set of `top_k` most probable tokens.", - "format": "int32", - "type": "integer" - }, - "prompt": { - "description": "Required. The structured textual input given to the model as a prompt. Given a prompt, the model will return what it predicts is the next message in the discussion.", - "$ref": "MessagePrompt" - }, - "temperature": { - "description": "Optional. Controls the randomness of the output. Values can range over `[0.0,1.0]`, inclusive. A value closer to `1.0` will produce responses that are more varied, while a value closer to `0.0` will typically result in less surprising responses from the model.", - "format": "float", - "type": "number" - }, - "candidateCount": { - "type": "integer", - "format": "int32", - "description": "Optional. The number of generated response messages to return. This value must be between `[1, 8]`, inclusive. If unset, this will default to `1`." - } - }, - "id": "GenerateMessageRequest", - "type": "object", - "description": "Request to generate a message response from the model." - }, - "GenerateTextResponse": { - "properties": { - "safetyFeedback": { - "description": "Returns any safety feedback related to content filtering.", - "type": "array", - "items": { - "$ref": "SafetyFeedback" - } - }, - "filters": { - "description": "A set of content filtering metadata for the prompt and response text. This indicates which `SafetyCategory`(s) blocked a candidate from this response, the lowest `HarmProbability` that triggered a block, and the HarmThreshold setting for that category. This indicates the smallest change to the `SafetySettings` that would be necessary to unblock at least 1 response. The blocking is configured by the `SafetySettings` in the request (or the default `SafetySettings` of the API).", - "type": "array", - "items": { - "$ref": "ContentFilter" - } - }, - "candidates": { - "description": "Candidate responses from the model.", - "items": { - "$ref": "TextCompletion" - }, - "type": "array" - } - }, - "id": "GenerateTextResponse", - "description": "The response from the model, including candidate completions.", - "type": "object" - }, - "TextCompletion": { - "id": "TextCompletion", - "description": "Output text returned from a model.", - "type": "object", - "properties": { - "citationMetadata": { - "readOnly": true, - "description": "Output only. Citation information for model-generated `output` in this `TextCompletion`. This field may be populated with attribution information for any text included in the `output`.", - "$ref": "CitationMetadata" - }, - "output": { - "type": "string", - "readOnly": true, - "description": "Output only. The generated text returned from the model." - }, - "safetyRatings": { - "description": "Ratings for the safety of a response. There is at most one rating per category.", - "type": "array", - "items": { - "$ref": "SafetyRating" - } - } - } - }, - "TunedModelSource": { - "properties": { - "baseModel": { - "readOnly": true, - "type": "string", - "description": "Output only. The name of the base `Model` this `TunedModel` was tuned from. Example: `models/text-bison-001`" - }, - "tunedModel": { - "type": "string", - "description": "Immutable. The name of the `TunedModel` to use as the starting point for training the new model. Example: `tunedModels/my-tuned-model`" - } - }, - "description": "Tuned model as a source for training a new model.", - "type": "object", - "id": "TunedModelSource" - }, - "SafetyRating": { - "properties": { - "probability": { - "description": "Required. The probability of harm for this content.", - "enumDescriptions": [ - "Probability is unspecified.", - "Content has a negligible chance of being unsafe.", - "Content has a low chance of being unsafe.", - "Content has a medium chance of being unsafe.", - "Content has a high chance of being unsafe." - ], - "enum": [ - "HARM_PROBABILITY_UNSPECIFIED", - "NEGLIGIBLE", - "LOW", - "MEDIUM", - "HIGH" - ], - "type": "string" - }, - "category": { - "enumDescriptions": [ - "Category is unspecified.", - "Negative or harmful comments targeting identity and/or protected attribute.", - "Content that is rude, disrepspectful, or profane.", - "Describes scenarios depictng violence against an individual or group, or general descriptions of gore.", - "Contains references to sexual acts or other lewd content.", - "Promotes unchecked medical advice.", - "Dangerous content that promotes, facilitates, or encourages harmful acts." - ], - "type": "string", - "enum": [ - "HARM_CATEGORY_UNSPECIFIED", - "HARM_CATEGORY_DEROGATORY", - "HARM_CATEGORY_TOXICITY", - "HARM_CATEGORY_VIOLENCE", - "HARM_CATEGORY_SEXUAL", - "HARM_CATEGORY_MEDICAL", - "HARM_CATEGORY_DANGEROUS" - ], - "description": "Required. The category for this rating." - } - }, - "type": "object", - "id": "SafetyRating", - "description": "Safety rating for a piece of content. The safety rating contains the category of harm and the harm probability level in that category for a piece of content. Content is classified for safety across a number of harm categories and the probability of the harm classification is included here." - }, - "Hyperparameters": { - "properties": { - "epochCount": { - "type": "integer", - "format": "int32", - "description": "Immutable. The number of training epochs. An epoch is one pass through the training data. If not set, a default of 10 will be used." - }, - "learningRate": { - "format": "float", - "description": "Immutable. The learning rate hyperparameter for tuning. If not set, a default of 0.0002 or 0.002 will be calculated based on the number of training examples.", - "type": "number" - }, - "batchSize": { - "description": "Immutable. The batch size hyperparameter for tuning. If not set, a default of 16 or 64 will be used based on the number of training examples.", - "format": "int32", - "type": "integer" - } - }, - "description": "Hyperparameters controlling the tuning process.", - "type": "object", - "id": "Hyperparameters" - }, - "ContentFilter": { - "type": "object", - "properties": { - "reason": { - "enum": [ - "BLOCKED_REASON_UNSPECIFIED", - "SAFETY", - "OTHER" - ], - "description": "The reason content was blocked during request processing.", - "type": "string", - "enumDescriptions": [ - "A blocked reason was not specified.", - "Content was blocked by safety settings.", - "Content was blocked, but the reason is uncategorized." - ] - }, - "message": { - "description": "A string that describes the filtering behavior in more detail.", - "type": "string" - } - }, - "description": "Content filtering metadata associated with processing a single request. ContentFilter contains a reason and an optional supporting string. The reason may be unspecified.", - "id": "ContentFilter" - }, - "MessagePrompt": { - "type": "object", - "id": "MessagePrompt", - "properties": { - "context": { - "description": "Optional. Text that should be provided to the model first to ground the response. If not empty, this `context` will be given to the model first before the `examples` and `messages`. When using a `context` be sure to provide it with every request to maintain continuity. This field can be a description of your prompt to the model to help provide context and guide the responses. Examples: \"Translate the phrase from English to French.\" or \"Given a statement, classify the sentiment as happy, sad or neutral.\" Anything included in this field will take precedence over message history if the total input size exceeds the model's `input_token_limit` and the input request is truncated.", - "type": "string" - }, - "messages": { - "items": { - "$ref": "Message" - }, - "type": "array", - "description": "Required. A snapshot of the recent conversation history sorted chronologically. Turns alternate between two authors. If the total input size exceeds the model's `input_token_limit` the input will be truncated: The oldest items will be dropped from `messages`." - }, - "examples": { - "items": { - "$ref": "Example" - }, - "type": "array", - "description": "Optional. Examples of what the model should generate. This includes both user input and the response that the model should emulate. These `examples` are treated identically to conversation messages except that they take precedence over the history in `messages`: If the total input size exceeds the model's `input_token_limit` the input will be truncated. Items will be dropped from `messages` before `examples`." - } - }, - "description": "All of the structured input text passed to the model as a prompt. A `MessagePrompt` contains a structured set of fields that provide context for the conversation, examples of user input/model output message pairs that prime the model to respond in different ways, and the conversation history or list of messages representing the alternating turns of the conversation between the user and the model." - }, - "TunedModel": { - "properties": { - "topK": { - "format": "int32", - "type": "integer", - "description": "Optional. For Top-k sampling. Top-k sampling considers the set of `top_k` most probable tokens. This value specifies default to be used by the backend while making the call to the model. This value specifies default to be the one used by the base model while creating the model." - }, - "temperature": { - "format": "float", - "type": "number", - "description": "Optional. Controls the randomness of the output. Values can range over `[0.0,1.0]`, inclusive. A value closer to `1.0` will produce responses that are more varied, while a value closer to `0.0` will typically result in less surprising responses from the model. This value specifies default to be the one used by the base model while creating the model." - }, - "baseModel": { - "description": "Immutable. The name of the `Model` to tune. Example: `models/text-bison-001`", - "type": "string" - }, - "createTime": { - "format": "google-datetime", - "type": "string", - "readOnly": true, - "description": "Output only. The timestamp when this model was created." - }, - "state": { - "type": "string", - "description": "Output only. The state of the tuned model.", - "readOnly": true, - "enumDescriptions": [ - "The default value. This value is unused.", - "The model is being created.", - "The model is ready to be used.", - "The model failed to be created." - ], - "enum": [ - "STATE_UNSPECIFIED", - "CREATING", - "ACTIVE", - "FAILED" - ] - }, - "name": { - "type": "string", - "readOnly": true, - "description": "Output only. The tuned model name. A unique name will be generated on create. Example: `tunedModels/az2mb0bpw6i` If display_name is set on create, the id portion of the name will be set by concatenating the words of the display_name with hyphens and adding a random portion for uniqueness. Example: display_name = \"Sentence Translator\" name = \"tunedModels/sentence-translator-u3b7m\"" - }, - "description": { - "type": "string", - "description": "Optional. A short description of this model." - }, - "topP": { - "type": "number", - "description": "Optional. For Nucleus sampling. Nucleus sampling considers the smallest set of tokens whose probability sum is at least `top_p`. This value specifies default to be the one used by the base model while creating the model.", - "format": "float" - }, - "tunedModelSource": { - "$ref": "TunedModelSource", - "description": "Optional. TunedModel to use as the starting point for training the new model." - }, - "updateTime": { - "type": "string", - "description": "Output only. The timestamp when this model was updated.", - "readOnly": true, - "format": "google-datetime" - }, - "displayName": { - "description": "Optional. The name to display for this model in user interfaces. The display name must be up to 40 characters including spaces.", - "type": "string" - }, - "tuningTask": { - "description": "Required. The tuning task that creates the tuned model.", - "$ref": "TuningTask" - } - }, - "description": "A fine-tuned model created using ModelService.CreateTunedModel.", - "id": "TunedModel", - "type": "object" - }, - "BatchEmbedTextRequest": { - "description": "Batch request to get a text embedding from the model.", - "properties": { - "texts": { - "type": "array", - "description": "Required. The free-form input texts that the model will turn into an embedding. The current limit is 100 texts, over which an error will be thrown.", - "items": { - "type": "string" - } - } - }, - "type": "object", - "id": "BatchEmbedTextRequest" - }, - "CountTextTokensRequest": { - "id": "CountTextTokensRequest", - "type": "object", - "description": "Counts the number of tokens in the `prompt` sent to a model. Models may tokenize text differently, so each model may return a different `token_count`.", - "properties": { - "prompt": { - "description": "Required. The free-form input text given to the model as a prompt.", - "$ref": "TextPrompt" - } - } - } - }, - "fullyEncodeReservedExpansion": true, - "discoveryVersion": "v1", - "kind": "discovery#restDescription", - "version_module": true, - "title": "Generative Language API", - "canonicalName": "Generative Language", - "basePath": "", - "documentationLink": "https://developers.generativeai.google/api", - "servicePath": "", - "name": "generativelanguage", - "parameters": { - "callback": { - "type": "string", - "description": "JSONP", - "location": "query" - }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "location": "query", - "type": "string" - }, - "prettyPrint": { - "default": "true", - "location": "query", - "type": "boolean", - "description": "Returns response with indentations and line breaks." - }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" - }, - "$.xgafv": { - "location": "query", - "type": "string", - "description": "V1 error format.", - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "enum": [ - "1", - "2" - ] - }, - "oauth_token": { - "type": "string", - "location": "query", - "description": "OAuth 2.0 token for the current user." - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" - }, - "key": { - "type": "string", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "location": "query" - }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" - }, - "access_token": { - "description": "OAuth access token.", - "location": "query", - "type": "string" - }, - "alt": { - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "description": "Data format for response.", - "type": "string", - "enum": [ - "json", - "media", - "proto" - ], - "location": "query" - } - }, - "batchPath": "batch", - "revision": "20230925", - "resources": { - "tunedModels": { - "resources": { - "permissions": { - "methods": { - "patch": { - "httpMethod": "PATCH", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "required": true, - "pattern": "^tunedModels/[^/]+/permissions/[^/]+$", - "location": "path", - "type": "string", - "description": "Output only. The permission name. A unique name will be generated on create. Example: tunedModels/{tuned_model}permssions/{permission} Output only." - }, - "updateMask": { - "format": "google-fieldmask", - "location": "query", - "type": "string", - "description": "Required. The list of fields to update. Accepted ones: - role (`Permission.role` field)" - } - }, - "path": "v1beta3/{+name}", - "flatPath": "v1beta3/tunedModels/{tunedModelsId}/permissions/{permissionsId}", - "request": { - "$ref": "Permission" - }, - "description": "Updates the permission.", - "response": { - "$ref": "Permission" - }, - "id": "generativelanguage.tunedModels.permissions.patch" - }, - "delete": { - "id": "generativelanguage.tunedModels.permissions.delete", - "httpMethod": "DELETE", - "description": "Deletes the permission.", - "parameters": { - "name": { - "pattern": "^tunedModels/[^/]+/permissions/[^/]+$", - "location": "path", - "type": "string", - "description": "Required. The resource name of the permission. Format: `tunedModels/{tuned_model}/permissions/{permission}`", - "required": true - } - }, - "flatPath": "v1beta3/tunedModels/{tunedModelsId}/permissions/{permissionsId}", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "name" - ], - "path": "v1beta3/{+name}" - }, - "create": { - "flatPath": "v1beta3/tunedModels/{tunedModelsId}/permissions", - "parameterOrder": [ - "parent" - ], - "httpMethod": "POST", - "path": "v1beta3/{+parent}/permissions", - "description": "Create a permission to a specific resource.", - "id": "generativelanguage.tunedModels.permissions.create", - "request": { - "$ref": "Permission" - }, - "response": { - "$ref": "Permission" - }, - "parameters": { - "parent": { - "description": "Required. The parent resource of the `Permission`. Format: tunedModels/{tuned_model}", - "required": true, - "location": "path", - "pattern": "^tunedModels/[^/]+$", - "type": "string" - } - } - }, - "list": { - "httpMethod": "GET", - "description": "Lists permissions for the specific resource.", - "id": "generativelanguage.tunedModels.permissions.list", - "parameterOrder": [ - "parent" - ], - "flatPath": "v1beta3/tunedModels/{tunedModelsId}/permissions", - "path": "v1beta3/{+parent}/permissions", - "response": { - "$ref": "ListPermissionsResponse" - }, - "parameters": { - "pageSize": { - "format": "int32", - "location": "query", - "description": "Optional. The maximum number of `Permission`s to return (per page). The service may return fewer permissions. If unspecified, at most 10 permissions will be returned. This method returns at most 1000 permissions per page, even if you pass larger page_size.", - "type": "integer" - }, - "pageToken": { - "description": "Optional. A page token, received from a previous `ListPermissions` call. Provide the `page_token` returned by one request as an argument to the next request to retrieve the next page. When paginating, all other parameters provided to `ListPermissions` must match the call that provided the page token.", - "location": "query", - "type": "string" - }, - "parent": { - "description": "Required. The parent resource of the permissions. Format: tunedModels/{tuned_model}", - "pattern": "^tunedModels/[^/]+$", - "type": "string", - "location": "path", - "required": true - } - } - }, - "get": { - "parameters": { - "name": { - "description": "Required. The resource name of the permission. Format: `tunedModels/{tuned_model}permissions/{permission}`", - "required": true, - "type": "string", - "location": "path", - "pattern": "^tunedModels/[^/]+/permissions/[^/]+$" - } - }, - "parameterOrder": [ - "name" - ], - "path": "v1beta3/{+name}", - "description": "Gets information about a specific Permission.", - "httpMethod": "GET", - "id": "generativelanguage.tunedModels.permissions.get", - "flatPath": "v1beta3/tunedModels/{tunedModelsId}/permissions/{permissionsId}", - "response": { - "$ref": "Permission" - } - } - } - } - }, - "methods": { - "create": { - "response": { - "$ref": "Operation" - }, - "httpMethod": "POST", - "request": { - "$ref": "TunedModel" - }, - "flatPath": "v1beta3/tunedModels", - "parameters": { - "tunedModelId": { - "location": "query", - "type": "string", - "description": "Optional. The unique id for the tuned model if specified. This value should be up to 40 characters, the first character must be a letter, the last could be a letter or a number. The id must match the regular expression: [a-z]([a-z0-9-]{0,38}[a-z0-9])?." - } - }, - "parameterOrder": [], - "path": "v1beta3/tunedModels", - "description": "Creates a tuned model. Intermediate tuning progress (if any) is accessed through the [google.longrunning.Operations] service. Status and results can be accessed through the Operations service. Example: GET /v1/tunedModels/az2mb0bpw6i/operations/000-111-222", - "id": "generativelanguage.tunedModels.create" - }, - "transferOwnership": { - "parameters": { - "name": { - "description": "Required. The resource name of the tuned model to transfer ownership . Format: `tunedModels/my-model-id`", - "required": true, - "type": "string", - "location": "path", - "pattern": "^tunedModels/[^/]+$" - } - }, - "httpMethod": "POST", - "flatPath": "v1beta3/tunedModels/{tunedModelsId}:transferOwnership", - "response": { - "$ref": "TransferOwnershipResponse" - }, - "description": "Transfers ownership of the tuned model. This is the only way to change ownership of the tuned model. The current owner will be downgraded to writer role.", - "path": "v1beta3/{+name}:transferOwnership", - "parameterOrder": [ - "name" - ], - "id": "generativelanguage.tunedModels.transferOwnership", - "request": { - "$ref": "TransferOwnershipRequest" - } - }, - "generateText": { - "parameters": { - "model": { - "description": "Required. The name of the `Model` or `TunedModel` to use for generating the completion. Examples: models/text-bison-001 tunedModels/sentence-translator-u3b7m", - "location": "path", - "pattern": "^tunedModels/[^/]+$", - "type": "string", - "required": true - } - }, - "flatPath": "v1beta3/tunedModels/{tunedModelsId}:generateText", - "parameterOrder": [ - "model" - ], - "id": "generativelanguage.tunedModels.generateText", - "description": "Generates a response from the model given an input message.", - "httpMethod": "POST", - "response": { - "$ref": "GenerateTextResponse" - }, - "path": "v1beta3/{+model}:generateText", - "request": { - "$ref": "GenerateTextRequest" - } - }, - "get": { - "response": { - "$ref": "TunedModel" - }, - "id": "generativelanguage.tunedModels.get", - "description": "Gets information about a specific TunedModel.", - "path": "v1beta3/{+name}", - "httpMethod": "GET", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "type": "string", - "required": true, - "description": "Required. The resource name of the model. Format: `tunedModels/my-model-id`", - "location": "path", - "pattern": "^tunedModels/[^/]+$" - } - }, - "flatPath": "v1beta3/tunedModels/{tunedModelsId}" - }, - "delete": { - "httpMethod": "DELETE", - "description": "Deletes a tuned model.", - "parameters": { - "name": { - "required": true, - "pattern": "^tunedModels/[^/]+$", - "location": "path", - "description": "Required. The resource name of the model. Format: `tunedModels/my-model-id`", - "type": "string" - } - }, - "id": "generativelanguage.tunedModels.delete", - "flatPath": "v1beta3/tunedModels/{tunedModelsId}", - "response": { - "$ref": "Empty" - }, - "path": "v1beta3/{+name}", - "parameterOrder": [ - "name" - ] - }, - "list": { - "httpMethod": "GET", - "parameterOrder": [], - "response": { - "$ref": "ListTunedModelsResponse" - }, - "path": "v1beta3/tunedModels", - "flatPath": "v1beta3/tunedModels", - "description": "Lists tuned models owned by the user.", - "parameters": { - "pageToken": { - "description": "Optional. A page token, received from a previous `ListTunedModels` call. Provide the `page_token` returned by one request as an argument to the next request to retrieve the next page. When paginating, all other parameters provided to `ListTunedModels` must match the call that provided the page token.", - "location": "query", - "type": "string" - }, - "filter": { - "description": "Optional. A filter is a full text search over the tuned model's description and display name. By default, results will not include tuned models shared with everyone. Additional operators: - owner:me - writers:me - readers:me - readers:everyone Examples: \"owner:me\" returns all tuned models to which caller has owner role \"readers:me\" returns all tuned models to which caller has reader role \"readers:everyone\" returns all tuned models that are shared with everyone", - "type": "string", - "location": "query" - }, - "pageSize": { - "type": "integer", - "description": "Optional. The maximum number of `TunedModels` to return (per page). The service may return fewer tuned models. If unspecified, at most 10 tuned models will be returned. This method returns at most 1000 models per page, even if you pass a larger page_size.", - "location": "query", - "format": "int32" - } - }, - "id": "generativelanguage.tunedModels.list" - }, - "patch": { - "httpMethod": "PATCH", - "parameterOrder": [ - "name" - ], - "description": "Updates a tuned model.", - "parameters": { - "updateMask": { - "format": "google-fieldmask", - "type": "string", - "description": "Required. The list of fields to update.", - "location": "query" - }, - "name": { - "description": "Output only. The tuned model name. A unique name will be generated on create. Example: `tunedModels/az2mb0bpw6i` If display_name is set on create, the id portion of the name will be set by concatenating the words of the display_name with hyphens and adding a random portion for uniqueness. Example: display_name = \"Sentence Translator\" name = \"tunedModels/sentence-translator-u3b7m\"", - "pattern": "^tunedModels/[^/]+$", - "type": "string", - "location": "path", - "required": true - } - }, - "id": "generativelanguage.tunedModels.patch", - "path": "v1beta3/{+name}", - "flatPath": "v1beta3/tunedModels/{tunedModelsId}", - "response": { - "$ref": "TunedModel" - }, - "request": { - "$ref": "TunedModel" - } - } - } - }, - "models": { - "methods": { - "generateMessage": { - "response": { - "$ref": "GenerateMessageResponse" - }, - "parameterOrder": [ - "model" - ], - "description": "Generates a response from the model given an input `MessagePrompt`.", - "parameters": { - "model": { - "pattern": "^models/[^/]+$", - "required": true, - "type": "string", - "location": "path", - "description": "Required. The name of the model to use. Format: `name=models/{model}`." - } - }, - "request": { - "$ref": "GenerateMessageRequest" - }, - "path": "v1beta3/{+model}:generateMessage", - "httpMethod": "POST", - "flatPath": "v1beta3/models/{modelsId}:generateMessage", - "id": "generativelanguage.models.generateMessage" - }, - "get": { - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "Model" - }, - "flatPath": "v1beta3/models/{modelsId}", - "parameters": { - "name": { - "description": "Required. The resource name of the model. This name should match a model name returned by the `ListModels` method. Format: `models/{model}`", - "location": "path", - "pattern": "^models/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1beta3/{+name}", - "description": "Gets information about a specific Model.", - "httpMethod": "GET", - "id": "generativelanguage.models.get" - }, - "countMessageTokens": { - "parameterOrder": [ - "model" - ], - "request": { - "$ref": "CountMessageTokensRequest" - }, - "path": "v1beta3/{+model}:countMessageTokens", - "flatPath": "v1beta3/models/{modelsId}:countMessageTokens", - "response": { - "$ref": "CountMessageTokensResponse" - }, - "parameters": { - "model": { - "required": true, - "type": "string", - "pattern": "^models/[^/]+$", - "description": "Required. The model's resource name. This serves as an ID for the Model to use. This name should match a model name returned by the `ListModels` method. Format: `models/{model}`", - "location": "path" - } - }, - "id": "generativelanguage.models.countMessageTokens", - "httpMethod": "POST", - "description": "Runs a model's tokenizer on a string and returns the token count." - }, - "list": { - "id": "generativelanguage.models.list", - "flatPath": "v1beta3/models", - "description": "Lists models available through the API.", - "parameters": { - "pageToken": { - "type": "string", - "description": "A page token, received from a previous `ListModels` call. Provide the `page_token` returned by one request as an argument to the next request to retrieve the next page. When paginating, all other parameters provided to `ListModels` must match the call that provided the page token.", - "location": "query" - }, - "pageSize": { - "type": "integer", - "description": "The maximum number of `Models` to return (per page). The service may return fewer models. If unspecified, at most 50 models will be returned per page. This method returns at most 1000 models per page, even if you pass a larger page_size.", - "location": "query", - "format": "int32" - } - }, - "httpMethod": "GET", - "parameterOrder": [], - "path": "v1beta3/models", - "response": { - "$ref": "ListModelsResponse" - } - }, - "embedText": { - "httpMethod": "POST", - "id": "generativelanguage.models.embedText", - "response": { - "$ref": "EmbedTextResponse" - }, - "description": "Generates an embedding from the model given an input message.", - "flatPath": "v1beta3/models/{modelsId}:embedText", - "parameterOrder": [ - "model" - ], - "path": "v1beta3/{+model}:embedText", - "parameters": { - "model": { - "required": true, - "location": "path", - "pattern": "^models/[^/]+$", - "description": "Required. The model name to use with the format model=models/{model}.", - "type": "string" - } - }, - "request": { - "$ref": "EmbedTextRequest" - } - }, - "batchEmbedText": { - "flatPath": "v1beta3/models/{modelsId}:batchEmbedText", - "parameters": { - "model": { - "location": "path", - "description": "Required. The name of the `Model` to use for generating the embedding. Examples: models/embedding-gecko-001", - "pattern": "^models/[^/]+$", - "required": true, - "type": "string" - } - }, - "response": { - "$ref": "BatchEmbedTextResponse" - }, - "parameterOrder": [ - "model" - ], - "httpMethod": "POST", - "request": { - "$ref": "BatchEmbedTextRequest" - }, - "id": "generativelanguage.models.batchEmbedText", - "description": "Generates multiple embeddings from the model given input text in a synchronous call.", - "path": "v1beta3/{+model}:batchEmbedText" - }, - "generateText": { - "parameters": { - "model": { - "required": true, - "description": "Required. The name of the `Model` or `TunedModel` to use for generating the completion. Examples: models/text-bison-001 tunedModels/sentence-translator-u3b7m", - "location": "path", - "pattern": "^models/[^/]+$", - "type": "string" - } - }, - "httpMethod": "POST", - "request": { - "$ref": "GenerateTextRequest" - }, - "path": "v1beta3/{+model}:generateText", - "id": "generativelanguage.models.generateText", - "description": "Generates a response from the model given an input message.", - "flatPath": "v1beta3/models/{modelsId}:generateText", - "response": { - "$ref": "GenerateTextResponse" - }, - "parameterOrder": [ - "model" - ] - }, - "countTextTokens": { - "path": "v1beta3/{+model}:countTextTokens", - "flatPath": "v1beta3/models/{modelsId}:countTextTokens", - "response": { - "$ref": "CountTextTokensResponse" - }, - "parameterOrder": [ - "model" - ], - "request": { - "$ref": "CountTextTokensRequest" - }, - "id": "generativelanguage.models.countTextTokens", - "parameters": { - "model": { - "type": "string", - "pattern": "^models/[^/]+$", - "required": true, - "location": "path", - "description": "Required. The model's resource name. This serves as an ID for the Model to use. This name should match a model name returned by the `ListModels` method. Format: `models/{model}`" - } - }, - "description": "Runs a model's tokenizer on a text and returns the token count.", - "httpMethod": "POST" - } - } - } - }, - "id": "generativelanguage:v1beta3" -} diff --git a/Tests/GoogleAITests/ChatTests.swift b/Tests/GoogleAITests/ChatTests.swift new file mode 100644 index 0000000..8c8a3a8 --- /dev/null +++ b/Tests/GoogleAITests/ChatTests.swift @@ -0,0 +1,92 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation +@testable import GoogleGenerativeAI +import XCTest + +@available(iOS 15.0, tvOS 15.0, *) +final class ChatTests: XCTestCase { + var urlSession: URLSession! + + override func setUp() { + let configuration = URLSessionConfiguration.default + configuration.protocolClasses = [MockURLProtocol.self] + urlSession = URLSession(configuration: configuration) + } + + override func tearDown() { + MockURLProtocol.requestHandler = nil + } + + func testMergingText() async throws { + let fileURL = try XCTUnwrap(Bundle.module.url( + forResource: "ExampleStreamingResponse", + withExtension: "json" + )) + + MockURLProtocol.requestHandler = { request in + let response = HTTPURLResponse( + url: request.url!, + statusCode: 200, + httpVersion: nil, + headerFields: nil + )! + return (response, fileURL.lines) + } + + let model = GenerativeModel(name: "my-model", apiKey: "API_KEY", urlSession: urlSession) + let chat = Chat(model: model, history: []) + let input = "Test input" + let stream = chat.sendMessageStream(input) + + // Ensure the values are parsed correctly + for try await value in stream { + XCTAssertNotNil(value.text) + } + + XCTAssertEqual(chat.history.count, 2) + XCTAssertEqual(chat.history[0].parts[0].text, input) + + let finalText = """ + As an AI language model, I am designed to help you with a wide range of topics and \ + questions. Here are some examples of the types of questions you can ask me: + - **General knowledge:** Ask me about a variety of topics, including history, science, \ + technology, art, culture, and more. + - **Creative writing:** Request me to write a story, poem, or any other creative piece \ + based on your specifications. + - **Language translation:** I can translate text from one language to another. + - **Math problems:** I can solve math equations and provide step-by-step solutions. + - **Trivia and quizzes:** Test your knowledge by asking me trivia questions or creating \ + quizzes on various subjects. + - **Conversation:** Engage in casual conversation on any topic of your interest. + - **Advice and suggestions:** Seek advice on various matters, such as relationships, \ + career choices, or personal growth. + - **Entertainment:** Request jokes, riddles, or fun facts to lighten up your day. + - **Code generation:** Ask me to write code snippets in different programming languages. + - **Real-time information:** Inquire about current events, weather conditions, or other \ + up-to-date information. + - **Creative ideas:** Generate creative ideas for projects, hobbies, or any other endeavor. + - **Health and wellness:** Get information about health, fitness, and nutrition. + - **Travel and geography:** Ask about places, landmarks, cultures, and travel tips. + + Remember that my responses are based on the information available to me up until my \ + training cutoff date in September 2021. For the most up-to-date information, especially \ + on rapidly changing topics, it's always a good idea to consult reliable and recent sources. + """ + let assembledExpectation = ModelContent(role: "model", parts: finalText) + XCTAssertEqual(chat.history[0].parts[0].text, input) + XCTAssertEqual(chat.history[1], assembledExpectation) + } +} diff --git a/Tests/GoogleAITests/GenerateContentResponses/streaming-failure-empty-content.txt b/Tests/GoogleAITests/GenerateContentResponses/streaming-failure-empty-content.txt new file mode 100644 index 0000000..5762b51 --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/streaming-failure-empty-content.txt @@ -0,0 +1 @@ +data: {"candidates": [{"content": {},"index": 0}],"promptFeedback": {"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}} diff --git a/Tests/GoogleAITests/GenerateContentResponses/streaming-failure-finish-reason-safety.txt b/Tests/GoogleAITests/GenerateContentResponses/streaming-failure-finish-reason-safety.txt new file mode 100644 index 0000000..b73c75c --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/streaming-failure-finish-reason-safety.txt @@ -0,0 +1,2 @@ +data: {"candidates": [{"content": {"parts": [{"text": "No"}]},"finishReason": "SAFETY","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "HIGH"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}],"promptFeedback": {"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}} + diff --git a/Tests/GoogleAITests/GenerateContentResponses/streaming-failure-prompt-blocked-safety.txt b/Tests/GoogleAITests/GenerateContentResponses/streaming-failure-prompt-blocked-safety.txt new file mode 100644 index 0000000..58c914a --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/streaming-failure-prompt-blocked-safety.txt @@ -0,0 +1,2 @@ +data: {"promptFeedback": {"blockReason": "SAFETY","safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "HIGH"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}} + diff --git a/Tests/GoogleAITests/GenerateContentResponses/streaming-failure-recitation-no-content.txt b/Tests/GoogleAITests/GenerateContentResponses/streaming-failure-recitation-no-content.txt new file mode 100644 index 0000000..60ec91d --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/streaming-failure-recitation-no-content.txt @@ -0,0 +1,6 @@ +data: {"candidates": [{"content": {"parts": [{"text": "Some information"}],"role": "model"},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}],"promptFeedback": {"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}} + +data: {"candidates": [{"content": {"parts": [{"text": "Some information cited from an external source"}],"role": "model"},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "LOW"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}],"citationMetadata": {"citationSources": [{"startIndex": 30,"endIndex": 179,"uri": "https://www.example.com/some-citation","license": ""}]}}]} + +data: {"candidates": [{"finishReason": "RECITATION","index": 0}]} + diff --git a/Tests/GoogleAITests/GenerateContentResponses/streaming-failure-unknown-finish-enum.txt b/Tests/GoogleAITests/GenerateContentResponses/streaming-failure-unknown-finish-enum.txt new file mode 100644 index 0000000..6194abd --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/streaming-failure-unknown-finish-enum.txt @@ -0,0 +1,11 @@ +data: {"candidates": [{"content": {"parts": [{"text": "**Cats:**\n\n- **Physical Characteristics:**\n - Size: Cats come"}]},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}],"promptFeedback": {"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}} + +data: {"candidates": [{"content": {"parts": [{"text": " in a wide range of sizes, from small breeds like the Singapura to large breeds like the Maine Coon.\n - Fur: Cats have soft, furry coats"}]},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": " that can vary in length and texture depending on the breed.\n - Eyes: Cats have large, expressive eyes that can be various colors, including green, blue, yellow, and hazel.\n - Ears: Cats have pointed, erect ears that are sensitive to sound.\n - Tail: Cats have long"}]},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": ", flexible tails that they use for balance and communication.\n\n- **Behavior and Personality:**\n - Independent: Cats are often described as independent animals that enjoy spending time alone.\n - Affectionate: Despite their independent nature, cats can be very affectionate and form strong bonds with their owners.\n - Playful: Cats are naturally playful and enjoy engaging in activities such as chasing toys, climbing, and pouncing.\n - Curious: Cats are curious creatures that love to explore their surroundings.\n - Vocal: Cats communicate through a variety of vocalizations, including meows, purrs, hisses, and grow"}]},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": "ls.\n\n- **Health and Care:**\n - Diet: Cats are obligate carnivores, meaning they require animal-based protein for optimal health.\n - Grooming: Cats spend a significant amount of time grooming themselves to keep their fur clean and free of mats.\n - Exercise: Cats need regular exercise to stay healthy and active. This can be achieved through play sessions or access to outdoor space.\n - Veterinary Care: Regular veterinary checkups are essential for maintaining a cat's health and detecting any potential health issues early on.\n\n**Dogs:**\n\n- **Physical Characteristics:**\n - Size: Dogs come in a wide range of sizes, from small breeds like the Chihuahua to giant breeds like the Great Dane.\n - Fur: Dogs have fur coats that can vary in length, texture, and color depending on the breed.\n - Eyes: Dogs have expressive eyes that can be various colors, including brown, blue, green, and hazel.\n - Ears: Dogs have floppy or erect ears that are sensitive to sound.\n - Tail: Dogs have long, wagging tails that they use for communication and expressing emotions.\n\n- **Behavior and Personality:**\n - Loyal: Dogs are known for their loyalty and"}]},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": " devotion to their owners.\n - Friendly: Dogs are generally friendly and outgoing animals that enjoy interacting with people and other animals.\n - Playful: Dogs are playful and energetic creatures that love to engage in activities such as fetching, running, and playing with toys.\n - Trainable: Dogs are highly trainable and can learn a variety of commands and tricks.\n - Vocal: Dogs communicate through a variety of vocalizations, including barking, howling, whining, and growling.\n\n- **Health and Care:**\n - Diet: Dogs are omnivores and can eat a variety of foods, including meat, vegetables, and grains.\n - Grooming: Dogs require regular grooming to keep their fur clean and free of mats. The frequency of grooming depends on the breed and coat type.\n - Exercise: Dogs need regular exercise to stay healthy and active. The amount of exercise required varies depending on the breed and age of the dog.\n - Veterinary Care: Regular veterinary checkups are essential for maintaining a dog's health and detecting any potential health issues early on."}]},"finishReason": "FAKE_ENUM","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT_NEW_ENUM","probability": "NEGLIGIBLE_UNKNOWN_ENUM"}]}]} diff --git a/Tests/GoogleAITests/GenerateContentResponses/streaming-success-basic-reply-long.txt b/Tests/GoogleAITests/GenerateContentResponses/streaming-success-basic-reply-long.txt new file mode 100644 index 0000000..fe662e6 --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/streaming-success-basic-reply-long.txt @@ -0,0 +1,12 @@ +data: {"candidates": [{"content": {"parts": [{"text": "**Cats:**\n\n- **Physical Characteristics:**\n - Size: Cats come"}]},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}],"promptFeedback": {"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}} + +data: {"candidates": [{"content": {"parts": [{"text": " in a wide range of sizes, from small breeds like the Singapura to large breeds like the Maine Coon.\n - Fur: Cats have soft, furry coats"}]},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": " that can vary in length and texture depending on the breed.\n - Eyes: Cats have large, expressive eyes that can be various colors, including green, blue, yellow, and hazel.\n - Ears: Cats have pointed, erect ears that are sensitive to sound.\n - Tail: Cats have long"}]},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": ", flexible tails that they use for balance and communication.\n\n- **Behavior and Personality:**\n - Independent: Cats are often described as independent animals that enjoy spending time alone.\n - Affectionate: Despite their independent nature, cats can be very affectionate and form strong bonds with their owners.\n - Playful: Cats are naturally playful and enjoy engaging in activities such as chasing toys, climbing, and pouncing.\n - Curious: Cats are curious creatures that love to explore their surroundings.\n - Vocal: Cats communicate through a variety of vocalizations, including meows, purrs, hisses, and grow"}]},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": "ls.\n\n- **Health and Care:**\n - Diet: Cats are obligate carnivores, meaning they require animal-based protein for optimal health.\n - Grooming: Cats spend a significant amount of time grooming themselves to keep their fur clean and free of mats.\n - Exercise: Cats need regular exercise to stay healthy and active. This can be achieved through play sessions or access to outdoor space.\n - Veterinary Care: Regular veterinary checkups are essential for maintaining a cat's health and detecting any potential health issues early on.\n\n**Dogs:**\n\n- **Physical Characteristics:**\n - Size: Dogs come in a wide range of sizes, from small breeds like the Chihuahua to giant breeds like the Great Dane.\n - Fur: Dogs have fur coats that can vary in length, texture, and color depending on the breed.\n - Eyes: Dogs have expressive eyes that can be various colors, including brown, blue, green, and hazel.\n - Ears: Dogs have floppy or erect ears that are sensitive to sound.\n - Tail: Dogs have long, wagging tails that they use for communication and expressing emotions.\n\n- **Behavior and Personality:**\n - Loyal: Dogs are known for their loyalty and"}]},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": " devotion to their owners.\n - Friendly: Dogs are generally friendly and outgoing animals that enjoy interacting with people and other animals.\n - Playful: Dogs are playful and energetic creatures that love to engage in activities such as fetching, running, and playing with toys.\n - Trainable: Dogs are highly trainable and can learn a variety of commands and tricks.\n - Vocal: Dogs communicate through a variety of vocalizations, including barking, howling, whining, and growling.\n\n- **Health and Care:**\n - Diet: Dogs are omnivores and can eat a variety of foods, including meat, vegetables, and grains.\n - Grooming: Dogs require regular grooming to keep their fur clean and free of mats. The frequency of grooming depends on the breed and coat type.\n - Exercise: Dogs need regular exercise to stay healthy and active. The amount of exercise required varies depending on the breed and age of the dog.\n - Veterinary Care: Regular veterinary checkups are essential for maintaining a dog's health and detecting any potential health issues early on."}]},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + diff --git a/Tests/GoogleAITests/GenerateContentResponses/streaming-success-basic-reply-short.txt b/Tests/GoogleAITests/GenerateContentResponses/streaming-success-basic-reply-short.txt new file mode 100644 index 0000000..a7f5476 --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/streaming-success-basic-reply-short.txt @@ -0,0 +1,2 @@ +data: {"candidates": [{"content": {"parts": [{"text": "Cheyenne"}]},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}],"promptFeedback": {"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}} + diff --git a/Tests/GoogleAITests/GenerateContentResponses/streaming-success-citations.txt b/Tests/GoogleAITests/GenerateContentResponses/streaming-success-citations.txt new file mode 100644 index 0000000..4f50be7 --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/streaming-success-citations.txt @@ -0,0 +1,13 @@ +data: {"candidates": [{"content": {"parts": [{"text": "Some information"}],"role": "model"},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}],"promptFeedback": {"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}} + +data: {"candidates": [{"content": {"parts": [{"text": " More information"}],"role": "model"},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": ", Even more information"}],"role": "model"},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": " Some information cited from an external source"}],"role": "model"},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}],"citationMetadata": {"citationSources": [{"startIndex": 574,"endIndex": 705,"uri": "https://www.example.com/citation-1","license": ""},{"startIndex": 899,"endIndex": 1026,"uri": "https://www.example.com/citation-2","license": ""}]}}]} + +data: {"candidates": [{"content": {"parts": [{"text": "More information cited from an external source"}],"role": "model"},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}],"citationMetadata": {"citationSources": [{"startIndex": 574,"endIndex": 705,"uri": "https://www.example.com/citation-3","license": ""},{"startIndex": 899,"endIndex": 1026,"uri": "https://www.example.com/citation-4","license": ""}]}}]} + +data: {"candidates": [{"content": {"parts": [{"text": "Even more information cited from an external source"}],"role": "model"},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}],"citationMetadata": {"citationSources": [{"startIndex": 574,"endIndex": 705,"uri": "https://www.example.com/citation-5","license": ""},{"startIndex": 899,"endIndex": 1026,"uri": "https://www.example.com/citation-6","license": ""}]}}]} + +data: {"candidates": [{"content": {"parts": [{"text": "Physics (YouTube Channel)"}],"role": "model"},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}],"citationMetadata": {"citationSources": [{"startIndex": 574,"endIndex": 705,"uri": "https://www.google.com","license": ""},{"startIndex": 899,"endIndex": 1026,"uri": "https://www.google.com","license": ""}]}}]} diff --git a/Tests/GoogleAITests/GenerateContentResponses/streaming-success-unknown-safety-enum.txt b/Tests/GoogleAITests/GenerateContentResponses/streaming-success-unknown-safety-enum.txt new file mode 100644 index 0000000..8f7a8ff --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/streaming-success-unknown-safety-enum.txt @@ -0,0 +1,2 @@ +data: {"candidates": [{"content": {"parts": [{"text": "Cheyenne"}]},"finishReason": "STOP","index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SOMETHING_NEW","probability": "NEGLIGIBLE"}]}],"promptFeedback": {"safetyRatings": [{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}} + diff --git a/Tests/GoogleAITests/GenerateContentResponses/unary-failure-api-key.json b/Tests/GoogleAITests/GenerateContentResponses/unary-failure-api-key.json new file mode 100644 index 0000000..ecf6f6b --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/unary-failure-api-key.json @@ -0,0 +1,21 @@ +{ + "error": { + "code": 400, + "message": "API key not valid. Please pass a valid API key.", + "status": "INVALID_ARGUMENT", + "details": [ + { + "@type": "type.googleapis.com/google.rpc.ErrorInfo", + "reason": "API_KEY_INVALID", + "domain": "googleapis.com", + "metadata": { + "service": "generativelanguage.googleapis.com" + } + }, + { + "@type": "type.googleapis.com/google.rpc.DebugInfo", + "detail": "Invalid API key: AIzv00G7VmUCUeC-5OglO3hcXM" + } + ] + } +} diff --git a/Tests/GoogleAITests/GenerateContentResponses/unary-failure-empty-content.json b/Tests/GoogleAITests/GenerateContentResponses/unary-failure-empty-content.json new file mode 100644 index 0000000..4e18896 --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/unary-failure-empty-content.json @@ -0,0 +1,28 @@ +{ + "candidates": [ + { + "content": {}, + "index": 0 + } + ], + "promptFeedback": { + "safetyRatings": [ + { + "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HATE_SPEECH", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HARASSMENT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + "probability": "NEGLIGIBLE" + } + ] + } +} diff --git a/Tests/GoogleAITests/GenerateContentResponses/unary-failure-finish-reason-safety-no-content.json b/Tests/GoogleAITests/GenerateContentResponses/unary-failure-finish-reason-safety-no-content.json new file mode 100644 index 0000000..03958d4 --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/unary-failure-finish-reason-safety-no-content.json @@ -0,0 +1,46 @@ +{ + "candidates": [ + { + "finishReason": "SAFETY", + "index": 0, + "safetyRatings": [ + { + "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HATE_SPEECH", + "probability": "HIGH" + }, + { + "category": "HARM_CATEGORY_HARASSMENT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + "probability": "NEGLIGIBLE" + } + ] + } + ], + "promptFeedback": { + "safetyRatings": [ + { + "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HATE_SPEECH", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HARASSMENT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + "probability": "NEGLIGIBLE" + } + ] + } +} diff --git a/Tests/GoogleAITests/GenerateContentResponses/unary-failure-finish-reason-safety.json b/Tests/GoogleAITests/GenerateContentResponses/unary-failure-finish-reason-safety.json new file mode 100644 index 0000000..3249c73 --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/unary-failure-finish-reason-safety.json @@ -0,0 +1,53 @@ +{ + "candidates": [ + { + "content": { + "parts": [ + { + "text": "No" + } + ] + }, + "finishReason": "SAFETY", + "index": 0, + "safetyRatings": [ + { + "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HATE_SPEECH", + "probability": "HIGH" + }, + { + "category": "HARM_CATEGORY_HARASSMENT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + "probability": "NEGLIGIBLE" + } + ] + } + ], + "promptFeedback": { + "safetyRatings": [ + { + "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HATE_SPEECH", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HARASSMENT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + "probability": "NEGLIGIBLE" + } + ] + } +} diff --git a/Tests/GoogleAITests/GenerateContentResponses/unary-failure-image-rejected.json b/Tests/GoogleAITests/GenerateContentResponses/unary-failure-image-rejected.json new file mode 100644 index 0000000..9dacdc7 --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/unary-failure-image-rejected.json @@ -0,0 +1,13 @@ +{ + "error": { + "code": 400, + "message": "Request contains an invalid argument.", + "status": "INVALID_ARGUMENT", + "details": [ + { + "@type": "type.googleapis.com/google.rpc.DebugInfo", + "detail": "[ORIGINAL ERROR] generic::invalid_argument: invalid status photos.thumbnailer.Status.Code::5: Source image 0 too short" + } + ] + } +} diff --git a/Tests/GoogleAITests/GenerateContentResponses/unary-failure-invalid-response.json b/Tests/GoogleAITests/GenerateContentResponses/unary-failure-invalid-response.json new file mode 100644 index 0000000..49d05e1 --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/unary-failure-invalid-response.json @@ -0,0 +1,14 @@ +{ + "this": [ + { + "is": { + "not": [ + { + "a": "valid" + } + ] + }, + "response": {} + } + ] +} diff --git a/Tests/GoogleAITests/GenerateContentResponses/unary-failure-malformed-content.json b/Tests/GoogleAITests/GenerateContentResponses/unary-failure-malformed-content.json new file mode 100644 index 0000000..737f2e0 --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/unary-failure-malformed-content.json @@ -0,0 +1,30 @@ +{ + "candidates": [ + { + "content": { + "invalid-field": true + }, + "index": 0 + } + ], + "promptFeedback": { + "safetyRatings": [ + { + "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HATE_SPEECH", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HARASSMENT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + "probability": "NEGLIGIBLE" + } + ] + } +} diff --git a/Tests/GoogleAITests/GenerateContentResponses/unary-failure-prompt-blocked-safety.json b/Tests/GoogleAITests/GenerateContentResponses/unary-failure-prompt-blocked-safety.json new file mode 100644 index 0000000..9d2abbb --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/unary-failure-prompt-blocked-safety.json @@ -0,0 +1,23 @@ +{ + "promptFeedback": { + "blockReason": "SAFETY", + "safetyRatings": [ + { + "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HATE_SPEECH", + "probability": "HIGH" + }, + { + "category": "HARM_CATEGORY_HARASSMENT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + "probability": "NEGLIGIBLE" + } + ] + } +} diff --git a/Tests/GoogleAITests/GenerateContentResponses/unary-failure-unknown-model.json b/Tests/GoogleAITests/GenerateContentResponses/unary-failure-unknown-model.json new file mode 100644 index 0000000..60b3f55 --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/unary-failure-unknown-model.json @@ -0,0 +1,13 @@ +{ + "error": { + "code": 404, + "message": "models/unknown is not found for API version v1, or is not supported for GenerateContent. Call ListModels to see the list of available models and their supported methods.", + "status": "NOT_FOUND", + "details": [ + { + "@type": "type.googleapis.com/google.rpc.DebugInfo", + "detail": "[ORIGINAL ERROR] generic::not_found: models/unknown is not found for API version v1, or is not supported for GenerateContent. Call ListModels to see the list of available models and their supported methods. [google.rpc.error_details_ext] { message: \"models/unknown is not found for API version v1, or is not supported for GenerateContent. Call ListModels to see the list of available models and their supported methods.\" }" + } + ] + } +} diff --git a/Tests/GoogleAITests/GenerateContentResponses/unary-success-basic-reply-long.json b/Tests/GoogleAITests/GenerateContentResponses/unary-success-basic-reply-long.json new file mode 100644 index 0000000..3639b79 --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/unary-success-basic-reply-long.json @@ -0,0 +1,52 @@ +{ + "candidates": [ + { + "content": { + "parts": [ + { + "text": "1. **Use Freshly Ground Coffee**:\n - Grind your coffee beans just before brewing to preserve their flavor and aroma.\n - Use a burr grinder for a consistent grind size.\n\n\n2. **Choose the Right Water**:\n - Use filtered or spring water for the best taste.\n - Avoid using tap water, as it may contain impurities that can affect the flavor.\n\n\n3. **Measure Accurately**:\n - Use a kitchen scale to measure your coffee and water precisely.\n - A general rule of thumb is to use 1:16 ratio of coffee to water (e.g., 15 grams of coffee to 240 grams of water).\n\n\n4. **Preheat Your Equipment**:\n - Preheat your coffee maker or espresso machine before brewing to ensure a consistent temperature.\n\n\n5. **Control the Water Temperature**:\n - The ideal water temperature for brewing coffee is between 195°F (90°C) and 205°F (96°C).\n - Too hot water can extract bitter flavors, while too cold water won't extract enough flavor.\n\n\n6. **Steep the Coffee**:\n - For drip coffee, let the water slowly drip through the coffee grounds for optimal extraction.\n - For pour-over coffee, pour the water in a circular motion over the coffee grounds, allowing it to steep for 30-45 seconds before continuing.\n\n\n7. **Clean Your Equipment**:\n - Regularly clean your coffee maker or espresso machine to prevent the buildup of oils and residue that can affect the taste of your coffee.\n\n\n8. **Experiment with Different Coffee Beans**:\n - Try different coffee beans from various regions and roasts to find your preferred flavor profile.\n - Experiment with different grind sizes and brewing methods to optimize the flavor of your chosen beans.\n\n\n9. **Store Coffee Properly**:\n - Store your coffee beans in an airtight container in a cool, dark place to preserve their freshness and flavor.\n - Avoid storing coffee in the refrigerator or freezer, as this can cause condensation and affect the taste.\n\n\n10. **Enjoy Freshly Brewed Coffee**:\n - Drink your coffee as soon as possible after brewing to enjoy its peak flavor and aroma.\n - Coffee starts to lose its flavor and aroma within 30 minutes of brewing." + } + ] + }, + "index": 0, + "safetyRatings": [ + { + "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HATE_SPEECH", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HARASSMENT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + "probability": "NEGLIGIBLE" + } + ] + } + ], + "promptFeedback": { + "safetyRatings": [ + { + "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HATE_SPEECH", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HARASSMENT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + "probability": "NEGLIGIBLE" + } + ] + } +} diff --git a/Tests/GoogleAITests/GenerateContentResponses/unary-success-basic-reply-short.json b/Tests/GoogleAITests/GenerateContentResponses/unary-success-basic-reply-short.json new file mode 100644 index 0000000..40a9a6d --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/unary-success-basic-reply-short.json @@ -0,0 +1,54 @@ +{ + "candidates": [ + { + "content": { + "parts": [ + { + "text": "Mountain View, California, United States" + } + ], + "role": "model" + }, + "finishReason": "STOP", + "index": 0, + "safetyRatings": [ + { + "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HATE_SPEECH", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HARASSMENT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + "probability": "NEGLIGIBLE" + } + ] + } + ], + "promptFeedback": { + "safetyRatings": [ + { + "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HATE_SPEECH", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HARASSMENT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + "probability": "NEGLIGIBLE" + } + ] + } +} diff --git a/Tests/GoogleAITests/GenerateContentResponses/unary-success-citations.json b/Tests/GoogleAITests/GenerateContentResponses/unary-success-citations.json new file mode 100644 index 0000000..ac99fcf --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/unary-success-citations.json @@ -0,0 +1,64 @@ +{ + "candidates": [ + { + "content": { + "parts": [ + { + "text": "Some information cited from an external source" + } + ], + "role": "model" + }, + "finishReason": "STOP", + "index": 0, + "safetyRatings": [ + { + "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HATE_SPEECH", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HARASSMENT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + "probability": "NEGLIGIBLE" + } + ], + "citationMetadata": { + "citationSources": [ + { + "startIndex": 574, + "endIndex": 705, + "uri": "https://www.example.com/some-citation", + "license": "" + } + ] + } + } + ], + "promptFeedback": { + "safetyRatings": [ + { + "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HATE_SPEECH", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HARASSMENT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + "probability": "NEGLIGIBLE" + } + ] + } +} diff --git a/Tests/GoogleAITests/GenerateContentResponses/unary-success-quote-reply.json b/Tests/GoogleAITests/GenerateContentResponses/unary-success-quote-reply.json new file mode 100644 index 0000000..873d334 --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/unary-success-quote-reply.json @@ -0,0 +1,53 @@ +{ + "candidates": [ + { + "content": { + "parts": [ + { + "text": "1. \"The greatest glory in living lies not in never falling, but in rising every time we fall.\" - Nelson Mandela\n\n\n2. \"The future belongs to those who believe in the beauty of their dreams.\" - Eleanor Roosevelt\n\n\n3. \"It does not matter how slow you go so long as you do not stop.\" - Confucius\n\n\n4. \"If you want to live a happy life, tie it to a goal, not to people or things.\" - Albert Einstein\n\n\n5. \"The only person you are destined to become is the person you decide to be.\" - Ralph Waldo Emerson\n\n\n6. \"It's not how much you have, but how much you enjoy that makes happiness.\" - Charles Spurgeon\n\n\n7. \"The greatest wealth is to live content with little.\" - Plato\n\n\n8. \"The only way to do great work is to love what you do.\" - Steve Jobs\n\n\n9. \"Don't be afraid to fail. Be afraid not to try.\" - Michael Jordan\n\n\n10. \"The best way to predict the future is to create it.\" - Abraham Lincoln" + } + ] + }, + "finishReason": "STOP", + "index": 0, + "safetyRatings": [ + { + "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HATE_SPEECH", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HARASSMENT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + "probability": "NEGLIGIBLE" + } + ] + } + ], + "promptFeedback": { + "safetyRatings": [ + { + "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HATE_SPEECH", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HARASSMENT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + "probability": "NEGLIGIBLE" + } + ] + } +} diff --git a/Tests/GoogleAITests/GenerateContentResponses/unary-success-unknown-enum.json b/Tests/GoogleAITests/GenerateContentResponses/unary-success-unknown-enum.json new file mode 100644 index 0000000..b27a11a --- /dev/null +++ b/Tests/GoogleAITests/GenerateContentResponses/unary-success-unknown-enum.json @@ -0,0 +1,52 @@ +{ + "candidates": [ + { + "content": { + "parts": [ + { + "text": "1. **Use Freshly Ground Coffee**:\n - Grind your coffee beans just before brewing to preserve their flavor and aroma.\n - Use a burr grinder for a consistent grind size.\n\n\n2. **Choose the Right Water**:\n - Use filtered or spring water for the best taste.\n - Avoid using tap water, as it may contain impurities that can affect the flavor.\n\n\n3. **Measure Accurately**:\n - Use a kitchen scale to measure your coffee and water precisely.\n - A general rule of thumb is to use 1:16 ratio of coffee to water (e.g., 15 grams of coffee to 240 grams of water).\n\n\n4. **Preheat Your Equipment**:\n - Preheat your coffee maker or espresso machine before brewing to ensure a consistent temperature.\n\n\n5. **Control the Water Temperature**:\n - The ideal water temperature for brewing coffee is between 195°F (90°C) and 205°F (96°C).\n - Too hot water can extract bitter flavors, while too cold water won't extract enough flavor.\n\n\n6. **Steep the Coffee**:\n - For drip coffee, let the water slowly drip through the coffee grounds for optimal extraction.\n - For pour-over coffee, pour the water in a circular motion over the coffee grounds, allowing it to steep for 30-45 seconds before continuing.\n\n\n7. **Clean Your Equipment**:\n - Regularly clean your coffee maker or espresso machine to prevent the buildup of oils and residue that can affect the taste of your coffee.\n\n\n8. **Experiment with Different Coffee Beans**:\n - Try different coffee beans from various regions and roasts to find your preferred flavor profile.\n - Experiment with different grind sizes and brewing methods to optimize the flavor of your chosen beans.\n\n\n9. **Store Coffee Properly**:\n - Store your coffee beans in an airtight container in a cool, dark place to preserve their freshness and flavor.\n - Avoid storing coffee in the refrigerator or freezer, as this can cause condensation and affect the taste.\n\n\n10. **Enjoy Freshly Brewed Coffee**:\n - Drink your coffee as soon as possible after brewing to enjoy its peak flavor and aroma.\n - Coffee starts to lose its flavor and aroma within 30 minutes of brewing." + } + ] + }, + "index": 0, + "safetyRatings": [ + { + "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT_ENUM_NEW", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HATE_SPEECH", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HARASSMENT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + "probability": "NEGLIGIBLE" + } + ] + } + ], + "promptFeedback": { + "safetyRatings": [ + { + "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HATE_SPEECH", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HARASSMENT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT_LIKE_A_NEW_ENUM", + "probability": "NEGLIGIBLE_NEW_ENUM" + } + ] + } +} diff --git a/Tests/GoogleAITests/GenerativeModelTests.swift b/Tests/GoogleAITests/GenerativeModelTests.swift new file mode 100644 index 0000000..82e2fb6 --- /dev/null +++ b/Tests/GoogleAITests/GenerativeModelTests.swift @@ -0,0 +1,755 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +@testable import GoogleGenerativeAI +import XCTest + +@available(iOS 15.0, tvOS 15.0, *) +final class GenerativeModelTests: XCTestCase { + let testPrompt = "What sorts of questions can I ask you?" + let safetyRatingsNegligible: [SafetyRating] = [ + .init(category: .sexuallyExplicit, probability: .negligible), + .init(category: .hateSpeech, probability: .negligible), + .init(category: .harassment, probability: .negligible), + .init(category: .dangerousContent, probability: .negligible), + ] + + var urlSession: URLSession! + var model: GenerativeModel! + + override func setUp() async throws { + let configuration = URLSessionConfiguration.default + configuration.protocolClasses = [MockURLProtocol.self] + urlSession = try XCTUnwrap(URLSession(configuration: configuration)) + model = GenerativeModel(name: "my-model", apiKey: "API_KEY", urlSession: urlSession) + } + + override func tearDown() { + MockURLProtocol.requestHandler = nil + } + + // MARK: - Generate Content + + func testGenerateContent_success_basicReplyLong() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "unary-success-basic-reply-long", + withExtension: "json" + ) + + let content = try await model.generateContent(testPrompt) + + XCTAssertNotNil(content.text) + // TODO: Add assertions + } + + func testGenerateContent_success_basicReplyShort() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "unary-success-basic-reply-short", + withExtension: "json" + ) + + let content = try await model + .generateContent("Where is the Google headquarters located?") + + XCTAssertEqual(content.candidates.count, 1) + let candidate = try XCTUnwrap(content.candidates.first) + let finishReason = try XCTUnwrap(candidate.finishReason) + XCTAssertEqual(finishReason, .stop) + XCTAssertEqual(candidate.safetyRatings, safetyRatingsNegligible) + XCTAssertEqual(candidate.content.parts.count, 1) + let part = try XCTUnwrap(candidate.content.parts.first) + XCTAssertEqual(part.text, "Mountain View, California, United States") + XCTAssertEqual(content.text, part.text) + let promptFeedback = try XCTUnwrap(content.promptFeedback) + XCTAssertNil(promptFeedback.blockReason) + XCTAssertEqual(promptFeedback.safetyRatings, safetyRatingsNegligible) + } + + func testGenerateContent_success_citations() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "unary-success-citations", + withExtension: "json" + ) + + let content = try await model.generateContent(testPrompt) + + XCTAssertNotNil(content.text) + // TODO: Add assertions + } + + func testGenerateContent_success_quoteReply() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "unary-success-quote-reply", + withExtension: "json" + ) + + let content = try await model.generateContent(testPrompt) + + XCTAssertNotNil(content.text) + // TODO: Add assertions + } + + func testGenerateContent_success_unknownEnum() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "unary-success-unknown-enum", + withExtension: "json" + ) + + let content = try await model.generateContent(testPrompt) + + XCTAssertNotNil(content.text) + // TODO: Add assertions + } + + private func internalTestGenerateContent(resource: String, + safetyRatingsCount: Int = 6) async throws { + MockURLProtocol.requestHandler = try httpRequestHandler( + forResource: resource, + withExtension: "json" + ) + + let content = try await model.generateContent("What sorts of questions can I ask you?") + + // TODO: Add assertions for response content + let promptFeedback = try XCTUnwrap(content.promptFeedback) + XCTAssertEqual(promptFeedback.safetyRatings.count, safetyRatingsCount) + XCTAssertNotNil(content.text) + } + + func testGenerateContent_failure_invalidAPIKey() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "unary-failure-api-key", + withExtension: "json", + statusCode: 400 + ) + + var responseError: Error? + var content: GenerateContentResponse? + do { + content = try await model.generateContent(testPrompt) + } catch { + responseError = error + } + + XCTAssertNotNil(responseError) + XCTAssertNil(content) + // TODO: Add assertions about `responseError`. + } + + func testGenerateContent_failure_emptyContent() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "unary-failure-empty-content", + withExtension: "json" + ) + + var responseError: Error? + var content: GenerateContentResponse? + do { + content = try await model.generateContent(testPrompt) + } catch { + responseError = error + } + + XCTAssertNil(content) + XCTAssertNotNil(responseError) + let generateContentError = try XCTUnwrap(responseError as? GenerateContentError) + guard case let .internalError(underlyingError) = generateContentError else { + XCTFail("Not an internal error: \(generateContentError)") + return + } + let invalidCandidateError = try XCTUnwrap(underlyingError as? InvalidCandidateError) + guard case let .emptyContent(emptyContentUnderlyingError) = invalidCandidateError else { + XCTFail("Not an empty content error: \(invalidCandidateError)") + return + } + _ = try XCTUnwrap( + emptyContentUnderlyingError as? DecodingError, + "Not a decoding error: \(emptyContentUnderlyingError)" + ) + } + + func testGenerateContent_failure_finishReasonSafety() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "unary-failure-finish-reason-safety", + withExtension: "json" + ) + + do { + _ = try await model.generateContent(testPrompt) + XCTFail("Should throw") + } catch let GenerateContentError.responseStoppedEarly(reason, response) { + XCTAssertEqual(reason, .safety) + XCTAssertEqual(response.text, "No") + } catch { + XCTFail("Should throw a responseStoppedEarly") + } + } + + func testGenerateContent_failure_finishReasonSafety_noContent() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "unary-failure-finish-reason-safety-no-content", + withExtension: "json" + ) + + do { + _ = try await model.generateContent(testPrompt) + XCTFail("Should throw") + } catch let GenerateContentError.responseStoppedEarly(reason, response) { + XCTAssertEqual(reason, .safety) + XCTAssertNil(response.text) + } catch { + XCTFail("Should throw a responseStoppedEarly") + } + } + + func testGenerateContent_failure_imageRejected() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "unary-failure-image-rejected", + withExtension: "json", + statusCode: 400 + ) + + var responseError: Error? + var content: GenerateContentResponse? + do { + content = try await model.generateContent(testPrompt) + } catch { + responseError = error + } + + XCTAssertNotNil(responseError) + XCTAssertNil(content) + // TODO: Add assertions about `responseError`. + } + + func testGenerateContent_failure_promptBlockedSafety() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "unary-failure-prompt-blocked-safety", + withExtension: "json" + ) + + do { + _ = try await model.generateContent(testPrompt) + XCTFail("Should throw") + } catch let GenerateContentError.promptBlocked(response) { + XCTAssertNil(response.text) + } catch { + XCTFail("Should throw a promptBlocked]") + } + } + + func testGenerateContent_failure_unknownModel() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "unary-failure-unknown-model", + withExtension: "json", + statusCode: 404 + ) + + var responseError: Error? + var content: GenerateContentResponse? + do { + content = try await model.generateContent(testPrompt) + } catch { + responseError = error + } + + XCTAssertNotNil(responseError) + XCTAssertNil(content) + // TODO: Add assertions about `responseError`. + } + + func testGenerateContent_failure_nonHTTPResponse() async throws { + MockURLProtocol.requestHandler = try nonHTTPRequestHandler() + + var responseError: Error? + var content: GenerateContentResponse? + do { + content = try await model.generateContent(testPrompt) + } catch { + responseError = error + } + + XCTAssertNil(content) + XCTAssertNotNil(responseError) + let generateContentError = try XCTUnwrap(responseError as? GenerateContentError) + guard case let .internalError(underlyingError) = generateContentError else { + XCTFail("Not an internal error: \(generateContentError)") + return + } + XCTAssertEqual(underlyingError.localizedDescription, "Response was not an HTTP response.") + } + + func testGenerateContent_failure_invalidResponse() async throws { + MockURLProtocol.requestHandler = try httpRequestHandler( + forResource: "unary-failure-invalid-response", + withExtension: "json" + ) + + var responseError: Error? + var content: GenerateContentResponse? + do { + content = try await model.generateContent(testPrompt) + } catch { + responseError = error + } + + XCTAssertNil(content) + XCTAssertNotNil(responseError) + let generateContentError = try XCTUnwrap(responseError as? GenerateContentError) + guard case let .internalError(underlyingError) = generateContentError else { + XCTFail("Not an internal error: \(generateContentError)") + return + } + let decodingError = try XCTUnwrap(underlyingError as? DecodingError) + guard case let .dataCorrupted(context) = decodingError else { + XCTFail("Not a data corrupted error: \(decodingError)") + return + } + XCTAssert(context.debugDescription.hasPrefix("Failed to decode GenerateContentResponse")) + } + + func testGenerateContent_failure_malformedContent() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "unary-failure-malformed-content", + withExtension: "json" + ) + + var responseError: Error? + var content: GenerateContentResponse? + do { + content = try await model.generateContent(testPrompt) + } catch { + responseError = error + } + + XCTAssertNil(content) + XCTAssertNotNil(responseError) + let generateContentError = try XCTUnwrap(responseError as? GenerateContentError) + guard case let .internalError(underlyingError) = generateContentError else { + XCTFail("Not an internal error: \(generateContentError)") + return + } + let invalidCandidateError = try XCTUnwrap(underlyingError as? InvalidCandidateError) + guard case let .malformedContent(malformedContentUnderlyingError) = invalidCandidateError else { + XCTFail("Not a malformed content error: \(invalidCandidateError)") + return + } + _ = try XCTUnwrap( + malformedContentUnderlyingError as? DecodingError, + "Not a decoding error: \(malformedContentUnderlyingError)" + ) + } + + func testGenerateContentMissingSafetyRatings() async throws { + try await internalTestGenerateContent(resource: "MissingSafetyRatings", safetyRatingsCount: 0) + } + + // MARK: - Generate Content (Streaming) + + func testGenerateContentStream_failureEmptyContent() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "streaming-failure-empty-content", + withExtension: "txt" + ) + + do { + let stream = model.generateContentStream("Hi") + for try await _ in stream { + XCTFail("No content is there, this shouldn't happen.") + } + } catch { + // TODO: Catch specific error. + return + } + + XCTFail("Should have caught an error.") + } + + func testGenerateContentStream_failureFinishReasonSafety() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "streaming-failure-finish-reason-safety", + withExtension: "txt" + ) + + do { + let stream = model.generateContentStream("Hi") + for try await _ in stream { + XCTFail("Content shouldn't be shown, this shouldn't happen.") + } + } catch let GenerateContentError.responseStoppedEarly(reason, _) { + XCTAssertEqual(reason, .safety) + return + } catch { + XCTFail("Wrong error generated: \(error)") + } + + XCTFail("Should have caught an error.") + } + + func testGenerateContentStream_failurePromptBlockedSafety() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "streaming-failure-prompt-blocked-safety", + withExtension: "txt" + ) + + do { + let stream = model.generateContentStream("Hi") + for try await _ in stream { + XCTFail("Content shouldn't be shown, this shouldn't happen.") + } + } catch let GenerateContentError.promptBlocked(response) { + XCTAssertEqual(response.promptFeedback?.blockReason, .safety) + return + } + + XCTFail("Should have caught an error.") + } + + func testGenerateContentStream_failureUnknownFinishEnum() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "streaming-failure-unknown-finish-enum", + withExtension: "txt" + ) + + let stream = model.generateContentStream("Hi") + do { + for try await content in stream { + XCTAssertNotNil(content.text) + } + } catch let GenerateContentError.responseStoppedEarly(reason, _) { + XCTAssertEqual(reason, .unknown) + return + } + + XCTFail("Should have caught an error.") + } + + func testGenerateContentStream_successBasicReplyLong() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "streaming-success-basic-reply-long", + withExtension: "txt" + ) + + var responses = 0 + let stream = model.generateContentStream("Hi") + for try await content in stream { + XCTAssertNotNil(content.text) + responses += 1 + } + + XCTAssertEqual(responses, 6) + } + + func testGenerateContentStream_successBasicReplyShort() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "streaming-success-basic-reply-short", + withExtension: "txt" + ) + + var responses = 0 + let stream = model.generateContentStream("Hi") + for try await content in stream { + XCTAssertNotNil(content.text) + responses += 1 + } + + XCTAssertEqual(responses, 1) + } + + func testGenerateContentStream_successUnknownSafetyEnum() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "streaming-success-unknown-safety-enum", + withExtension: "txt" + ) + + var hadUnknown = false + let stream = model.generateContentStream("Hi") + for try await content in stream { + XCTAssertNotNil(content.text) + if let ratings = content.candidates.first?.safetyRatings, + ratings.contains(where: { $0.category == .unknown }) { + hadUnknown = true + } + } + + XCTAssertTrue(hadUnknown) + } + + func testGenerateContentStream_successWithCitations() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "streaming-success-citations", + withExtension: "txt" + ) + + let stream = model.generateContentStream("Can you explain quantum physics?") + var citations: [Citation] = [] + for try await content in stream { + XCTAssertNotNil(content.text) + let candidate = try XCTUnwrap(content.candidates.first) + XCTAssertEqual(candidate.finishReason, .stop) + if let sources = candidate.citationMetadata?.citationSources { + citations.append(contentsOf: sources) + } + } + + XCTAssertEqual(citations.count, 8) + XCTAssertTrue(citations + .contains(where: { $0.startIndex == 574 && $0.endIndex == 705 && !$0.uri.isEmpty })) + XCTAssertTrue(citations + .contains(where: { $0.startIndex == 899 && $0.endIndex == 1026 && !$0.uri.isEmpty })) + } + + func testGenerateContentStream_errorMidStream() async throws { + MockURLProtocol.requestHandler = try httpRequestHandler( + forResource: "ExampleErrorMidStream", + withExtension: "json" + ) + + let stream = model.generateContentStream("What sorts of questions can I ask you?") + + var textResponses = [String]() + var errorResponse: Error? + do { + for try await content in stream { + XCTAssertNotNil(content.text) + let text = try XCTUnwrap(content.text) + textResponses.append(text) + } + } catch { + errorResponse = error + } + + // TODO: Add assertions for response content + XCTAssertEqual(textResponses.count, 2) + XCTAssertNotNil(errorResponse) + } + + func testGenerateContentStream_nonHTTPResponse() async throws { + MockURLProtocol.requestHandler = try nonHTTPRequestHandler() + + let stream = model.generateContentStream("What sorts of questions can I ask you?") + var responseError: Error? + do { + for try await content in stream { + XCTFail("Unexpected content in stream: \(content)") + } + } catch { + responseError = error + } + + XCTAssertNotNil(responseError) + let generateContentError = try XCTUnwrap(responseError as? GenerateContentError) + guard case let .internalError(underlyingError) = generateContentError else { + XCTFail("Not an internal error: \(generateContentError)") + return + } + XCTAssertEqual(underlyingError.localizedDescription, "Response was not an HTTP response.") + } + + func testGenerateContentStream_invalidResponse() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "InvalidStreamingResponse", + withExtension: "json" + ) + + let stream = model.generateContentStream(testPrompt) + var responseError: Error? + do { + for try await content in stream { + XCTFail("Unexpected content in stream: \(content)") + } + } catch { + responseError = error + } + + XCTAssertNotNil(responseError) + let generateContentError = try XCTUnwrap(responseError as? GenerateContentError) + guard case let .internalError(underlyingError) = generateContentError else { + XCTFail("Not an internal error: \(generateContentError)") + return + } + let decodingError = try XCTUnwrap(underlyingError as? DecodingError) + guard case let .dataCorrupted(context) = decodingError else { + XCTFail("Not a data corrupted error: \(decodingError)") + return + } + XCTAssert(context.debugDescription.hasPrefix("Failed to decode GenerateContentResponse")) + } + + func testGenerateContentStream_emptyContent() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "EmptyContentStreamingResponse", + withExtension: "json" + ) + + let stream = model.generateContentStream(testPrompt) + var responseError: Error? + do { + for try await content in stream { + XCTFail("Unexpected content in stream: \(content)") + } + } catch { + responseError = error + } + + XCTAssertNotNil(responseError) + let generateContentError = try XCTUnwrap(responseError as? GenerateContentError) + guard case let .internalError(underlyingError) = generateContentError else { + XCTFail("Not an internal error: \(generateContentError)") + return + } + let invalidCandidateError = try XCTUnwrap(underlyingError as? InvalidCandidateError) + guard case let .emptyContent(emptyContentUnderlyingError) = invalidCandidateError else { + XCTFail("Not an empty content error: \(invalidCandidateError)") + return + } + _ = try XCTUnwrap( + emptyContentUnderlyingError as? DecodingError, + "Not a decoding error: \(emptyContentUnderlyingError)" + ) + } + + func testGenerateContentStream_malformedContent() async throws { + MockURLProtocol + .requestHandler = try httpRequestHandler( + forResource: "MalformedContentStreamingResponse", + withExtension: "json" + ) + + let stream = model.generateContentStream(testPrompt) + var responseError: Error? + do { + for try await content in stream { + XCTFail("Unexpected content in stream: \(content)") + } + } catch { + responseError = error + } + + XCTAssertNotNil(responseError) + let generateContentError = try XCTUnwrap(responseError as? GenerateContentError) + guard case let .internalError(underlyingError) = generateContentError else { + XCTFail("Not an internal error: \(generateContentError)") + return + } + let invalidCandidateError = try XCTUnwrap(underlyingError as? InvalidCandidateError) + guard case let .malformedContent(malformedContentUnderlyingError) = invalidCandidateError else { + XCTFail("Not a malformed content error: \(invalidCandidateError)") + return + } + _ = try XCTUnwrap( + malformedContentUnderlyingError as? DecodingError, + "Not a decoding error: \(malformedContentUnderlyingError)" + ) + } + + // MARK: - Count Tokens + + func testCountTokens_succeeds() async throws { + MockURLProtocol.requestHandler = try httpRequestHandler( + forResource: "CountTokensResponse", + withExtension: "json" + ) + + let response = try await model.countTokens("Why is the sky blue?") + + XCTAssertEqual(response.totalTokens, 6) + } + + func testCountTokens_modelNotFound() async throws { + MockURLProtocol.requestHandler = try httpRequestHandler( + forResource: "CountTokensModelNotFound", withExtension: "json", + statusCode: 404 + ) + + var response: CountTokensResponse? + var responseError: Error? + do { + response = try await model.countTokens("Why is the sky blue?") + } catch { + responseError = error + } + + XCTAssertNil(response) + XCTAssertNotNil(responseError) + let countTokensError = try XCTUnwrap(responseError as? CountTokensError) + guard case let .internalError(underlyingError) = countTokensError else { + XCTFail("Not an internal error: \(countTokensError)") + return + } + let rpcError = try XCTUnwrap(underlyingError as? RPCError) + XCTAssertEqual(rpcError.httpResponseCode, 404) + XCTAssertEqual(rpcError.status, .notFound) + XCTAssert(rpcError.message.hasPrefix("models/test-model-name is not found")) + } + + // MARK: - Helpers + + private func nonHTTPRequestHandler() throws -> ((URLRequest) -> ( + URLResponse, + AsyncLineSequence? + )) { + return { request in + // This is *not* an HTTPURLResponse + let response = URLResponse( + url: request.url!, + mimeType: nil, + expectedContentLength: 0, + textEncodingName: nil + ) + return (response, nil) + } + } + + private func httpRequestHandler(forResource name: String, + withExtension ext: String, + statusCode: Int = 200) throws -> ((URLRequest) -> ( + URLResponse, + AsyncLineSequence? + )) { + let fileURL = try XCTUnwrap(Bundle.module.url(forResource: name, withExtension: ext)) + return { request in + let response = HTTPURLResponse( + url: request.url!, + statusCode: statusCode, + httpVersion: nil, + headerFields: nil + )! + return (response, fileURL.lines) + } + } +} diff --git a/Tests/GoogleAITests/GoogleAITests.swift b/Tests/GoogleAITests/GoogleAITests.swift new file mode 100644 index 0000000..c7474b4 --- /dev/null +++ b/Tests/GoogleAITests/GoogleAITests.swift @@ -0,0 +1,167 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import GoogleGenerativeAI +import XCTest +#if canImport(AppKit) + import AppKit // For NSImage extensions. +#elseif canImport(UIKit) + import UIKit // For UIImage extensions. +#endif + +final class GoogleGenerativeAITests: XCTestCase { + func codeSamples() async throws { + let config = GenerationConfig(temperature: 0.2, + topP: 0.1, + topK: 16, + candidateCount: 4, + maxOutputTokens: 256, + stopSequences: ["..."]) + let filters = [SafetySetting(harmCategory: .dangerousContent, threshold: .blockOnlyHigh)] + + // Permutations without optional arguments. + let _ = GenerativeModel(name: "gemini-pro@001", apiKey: "API_KEY") + let _ = GenerativeModel(name: "gemini-pro@001", apiKey: "API_KEY", safetySettings: filters) + let _ = GenerativeModel(name: "gemini-pro@001", apiKey: "API_KEY", generationConfig: config) + + // All arguments passed. + let genAI = GenerativeModel(name: "gemini-pro@001", + apiKey: "API_KEY", + generationConfig: config, // Optional + safetySettings: filters // Optional + ) + // Full Typed Usage + let pngData = Data() // .... + let contents = [ModelContent(role: "user", + parts: [ + .text("Is it a cat?"), + .png(pngData), + ])] + + do { + let response = try await genAI.generateContent(contents) + print(response.text ?? "Couldn't get text... check status") + } catch { + print("Error generating content: \(error)") + } + + // Content input combinations. + let _ = try await genAI.generateContent("Constant String") + let str = "String Variable" + let _ = try await genAI.generateContent(str) + let _ = try await genAI.generateContent([str]) + let _ = try await genAI.generateContent(str, "abc", "def") + #if canImport(AppKit) + _ = try await genAI.generateContent(NSImage()) + _ = try await genAI.generateContent([NSImage()]) + _ = try await genAI.generateContent(str, NSImage(), "def", NSImage()) + _ = try await genAI.generateContent([str, NSImage(), "def", NSImage()]) + #elseif canImport(UIKit) + _ = try await genAI.generateContent(UIImage()) + _ = try await genAI.generateContent([UIImage()]) + _ = try await genAI + .generateContent([str, UIImage(), ModelContent.Part.text(str)]) + _ = try await genAI.generateContent(str, UIImage(), "def", UIImage()) + _ = try await genAI.generateContent([str, UIImage(), "def", UIImage()]) + _ = try await genAI.generateContent([ModelContent("def", UIImage()), + ModelContent("def", UIImage())]) + #endif + + // PartsRepresentable combinations. + let _ = ModelContent(parts: [.text(str)]) + let _ = ModelContent(role: "model", parts: [.text(str)]) + let _ = ModelContent(parts: "Constant String") + let _ = ModelContent(parts: str) + let _ = ModelContent(parts: [str]) + // Note: without `as [any PartsRepresentable]` this will fail to compile with "Cannot convert + // value of type `[Any]` to expected type `[any PartsRepresentable]`. Not sure if there's a + // way we can get it to work. + let _ = ModelContent(parts: [str, ModelContent.Part.data( + mimetype: "foo", + Data() + )] as [any PartsRepresentable]) + #if canImport(AppKit) + _ = ModelContent(role: "user", parts: NSImage()) + _ = ModelContent(role: "user", parts: [NSImage()]) + // Note: without `as [any PartsRepresentable]` this will fail to compile with "Cannot convert + // value of type `[Any]` to expected type `[any PartsRepresentable]`. Not sure if there's a + // way we can get it to work. + _ = ModelContent(parts: [str, NSImage()] as [any PartsRepresentable]) + // Alternatively, you can explicitly declare the type in a variable and pass it in. + let representable2: [any PartsRepresentable] = [str, NSImage()] + _ = ModelContent(parts: representable2) + _ = + ModelContent(parts: [str, NSImage(), + ModelContent.Part.text(str)] as [any PartsRepresentable]) + #elseif canImport(UIKit) + _ = ModelContent(role: "user", parts: UIImage()) + _ = ModelContent(role: "user", parts: [UIImage()]) + // Note: without `as [any PartsRepresentable]` this will fail to compile with "Cannot convert + // value of type `[Any]` to expected type `[any PartsRepresentable]`. Not sure if there's a + // way we can get it to work. + _ = ModelContent(parts: [str, UIImage()] as [any PartsRepresentable]) + // Alternatively, you can explicitly declare the type in a variable and pass it in. + let representable2: [any PartsRepresentable] = [str, UIImage()] + _ = ModelContent(parts: representable2) + _ = ModelContent(parts: [str, UIImage(), + ModelContent.Part.text(str)] as [any PartsRepresentable]) + #endif + + // countTokens API + let _: CountTokensResponse = try await genAI.countTokens("What color is the Sky?") + #if canImport(UIKit) + let _: CountTokensResponse = try await genAI.countTokens("What color is the Sky?", + UIImage()) + let _: CountTokensResponse = try await genAI.countTokens([ + ModelContent("What color is the Sky?", UIImage()), + ModelContent(UIImage(), "What color is the Sky?", UIImage()), + ]) + #endif + + // Chat + _ = genAI.startChat() + _ = genAI.startChat(history: [ModelContent(parts: "abc")]) + } + + // Result builder alternative + + /* + let pngData = Data() // .... + let contents = [GenAIContent(role: "user", + parts: [ + .text("Is it a cat?"), + .png(pngData) + ])] + + // Turns into... + + let contents = GenAIContent { + Role("user") { + Text("Is this a cat?") + Image(png: pngData) + } + } + + GenAIContent { + ForEach(myInput) { input in + Role(input.role) { + input.contents + } + } + } + + // Thoughts: this looks great from a code demo, but since I assume most content will be + // user generated, the result builder may not be the best API. + */ +} diff --git a/Tests/GoogleAITests/MockURLProtocol.swift b/Tests/GoogleAITests/MockURLProtocol.swift new file mode 100644 index 0000000..1f63aaf --- /dev/null +++ b/Tests/GoogleAITests/MockURLProtocol.swift @@ -0,0 +1,59 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation +import XCTest + +@available(iOS 15.0, tvOS 15.0, *) +class MockURLProtocol: URLProtocol { + static var requestHandler: ((URLRequest) -> (URLResponse, AsyncLineSequence?))? + + override class func canInit(with request: URLRequest) -> Bool { return true } + + override class func canonicalRequest(for request: URLRequest) -> URLRequest { return request } + + override func startLoading() { + guard let requestHandler = MockURLProtocol.requestHandler else { + fatalError("`requestHandler` is nil.") + } + guard let client = client else { + fatalError("`client` is nil.") + } + + Task { + let (response, stream) = requestHandler(self.request) + client.urlProtocol(self, didReceive: response, cacheStoragePolicy: .notAllowed) + if let stream = stream { + do { + for try await line in stream { + guard let data = line.data(using: .utf8) else { + fatalError("Failed to convert \"\(line)\" to UTF8 data.") + } + client.urlProtocol(self, didLoad: data) + // Add a newline character since AsyncLineSequence strips them when reading line by + // line; + // without the following, the whole file is delivered as a single line. + client.urlProtocol(self, didLoad: "\n".data(using: .utf8)!) + } + } catch { + client.urlProtocol(self, didFailWithError: error) + XCTFail("Unexpected failure reading lines from stream: \(error.localizedDescription)") + } + } + client.urlProtocolDidFinishLoading(self) + } + } + + override func stopLoading() {} +} diff --git a/Tests/GoogleAITests/SampleResponses/CountTokensModelNotFound.json b/Tests/GoogleAITests/SampleResponses/CountTokensModelNotFound.json new file mode 100644 index 0000000..50fcb72 --- /dev/null +++ b/Tests/GoogleAITests/SampleResponses/CountTokensModelNotFound.json @@ -0,0 +1,13 @@ +{ + "error": { + "code": 404, + "message": "models/test-model-name is not found for API version v1beta, or is not supported for countTokens. Call ListModels to see the list of available models and their supported methods.", + "status": "NOT_FOUND", + "details": [ + { + "@type": "type.googleapis.com/google.rpc.DebugInfo", + "detail": "[ORIGINAL ERROR] generic::not_found: models/test-model-name is not found for API version v1beta, or is not supported for countTokens. Call ListModels to see the list of available models and their supported methods. [google.rpc.error_details_ext] { message: \"models/test-model-name is not found for API version v1beta, or is not supported for countTokens. Call ListModels to see the list of available models and their supported methods.\" }" + } + ] + } +} diff --git a/Tests/GoogleAITests/SampleResponses/CountTokensResponse.json b/Tests/GoogleAITests/SampleResponses/CountTokensResponse.json new file mode 100644 index 0000000..0bccd9e --- /dev/null +++ b/Tests/GoogleAITests/SampleResponses/CountTokensResponse.json @@ -0,0 +1,3 @@ +{ + "totalTokens": 6 +} diff --git a/Tests/GoogleAITests/SampleResponses/EmptyContentStreamingResponse.json b/Tests/GoogleAITests/SampleResponses/EmptyContentStreamingResponse.json new file mode 100644 index 0000000..175585e --- /dev/null +++ b/Tests/GoogleAITests/SampleResponses/EmptyContentStreamingResponse.json @@ -0,0 +1 @@ +data: {"candidates": [{"content": {},"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "MEDIUM"},{"category": "HARM_CATEGORY_SEXUAL","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DEROGATORY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS","probability": "NEGLIGIBLE"}]}]} diff --git a/Tests/GoogleAITests/SampleResponses/ExampleErrorMidStream.json b/Tests/GoogleAITests/SampleResponses/ExampleErrorMidStream.json new file mode 100644 index 0000000..c0471e8 --- /dev/null +++ b/Tests/GoogleAITests/SampleResponses/ExampleErrorMidStream.json @@ -0,0 +1,17 @@ +data: {"candidates": [{"content": {"parts": [{"text": " Sure, here are some questions you can ask me:\n\n* What is the weather like today?\n* What is the capital of France?\n*"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}],"promptFeedback": {"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}} + +data: {"candidates": [{"content": {"parts": [{"text": " Who is the president of the United States?\n* What is the square root of 144?\n* What is the definition of the word \""}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +{ + "error": { + "code": 499, + "message": "The operation was cancelled.", + "status": "CANCELLED", + "details": [ + { + "@type": "type.googleapis.com/google.rpc.DebugInfo", + "detail": "[ORIGINAL ERROR] generic::cancelled: " + } + ] + } +} diff --git a/Tests/GoogleAITests/SampleResponses/ExampleStreamingResponse.json b/Tests/GoogleAITests/SampleResponses/ExampleStreamingResponse.json new file mode 100644 index 0000000..48b054e --- /dev/null +++ b/Tests/GoogleAITests/SampleResponses/ExampleStreamingResponse.json @@ -0,0 +1,45 @@ +data: {"candidates": [{"content": {"parts": [{"text": " As an AI language model, I am designed to help you with a wide range"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}],"promptFeedback": {"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}} + +data: {"candidates": [{"content": {"parts": [{"text": " of topics and questions. Here are some examples of the types of questions you can"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": " ask me:\n- **General knowledge:** Ask me about a variety of topics"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": ", including history, science, technology, art, culture, and more.\n"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": "- **Creative writing:** Request me to write a story, poem, or any"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": " other creative piece based on your specifications.\n- **Language translation:** I can"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": " translate text from one language to another.\n- **Math problems:** I can"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": " solve math equations and provide step-by-step solutions.\n- **Trivia"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": " and quizzes:** Test your knowledge by asking me trivia questions or creating quizzes on various"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": " subjects.\n- **Conversation:** Engage in casual conversation on any topic of your"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": " interest.\n- **Advice and suggestions:** Seek advice on various matters, such"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": " as relationships, career choices, or personal growth.\n- **Entertainment:** Request"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": " jokes, riddles, or fun facts to lighten up your day.\n- **"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": "Code generation:** Ask me to write code snippets in different programming languages.\n-"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": " **Real-time information:** Inquire about current events, weather conditions, or"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": " other up-to-date information.\n- **Creative ideas:** Generate creative"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": " ideas for projects, hobbies, or any other endeavor.\n- **Health and"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": " wellness:** Get information about health, fitness, and nutrition.\n- **Travel"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": " and geography:** Ask about places, landmarks, cultures, and travel tips.\n\n"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": "Remember that my responses are based on the information available to me up until my training"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": " cutoff date in September 2021. For the most up-to"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": "-date information, especially on rapidly changing topics, it's always a good"}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} + +data: {"candidates": [{"content": {"parts": [{"text": " idea to consult reliable and recent sources."}]},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}]} diff --git a/Tests/GoogleAITests/SampleResponses/InvalidStreamingResponse.json b/Tests/GoogleAITests/SampleResponses/InvalidStreamingResponse.json new file mode 100644 index 0000000..3aea784 --- /dev/null +++ b/Tests/GoogleAITests/SampleResponses/InvalidStreamingResponse.json @@ -0,0 +1 @@ +data: {"this": [{"is": {"not": [{"a": "valid"}]}, "response": {}}]} diff --git a/Tests/GoogleAITests/SampleResponses/MalformedContentStreamingResponse.json b/Tests/GoogleAITests/SampleResponses/MalformedContentStreamingResponse.json new file mode 100644 index 0000000..95b35e7 --- /dev/null +++ b/Tests/GoogleAITests/SampleResponses/MalformedContentStreamingResponse.json @@ -0,0 +1 @@ +data: {"candidates": [{"content": {"invalid-field": true},"index": 0,"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}],"promptFeedback": {"safetyRatings": [{"category": "HARM_CATEGORY_TOXICITY","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_VIOLENCE","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"}]}} diff --git a/Tests/GoogleAITests/SampleResponses/MissingSafetyRatings.json b/Tests/GoogleAITests/SampleResponses/MissingSafetyRatings.json new file mode 100644 index 0000000..9e7ff8d --- /dev/null +++ b/Tests/GoogleAITests/SampleResponses/MissingSafetyRatings.json @@ -0,0 +1,16 @@ +{ + "candidates": [ + { + "content": { + "parts": [ + { + "text": "The sky is blue because of a phenomenon called Rayleigh scattering. This refers to the scattering of light by particles that are smaller than the wavelength of light. In the case of the sky, the particles that are doing the scattering are molecules of nitrogen and oxygen.\n\nWhen sunlight hits these molecules, the shorter wavelengths of light (blue and violet) are scattered more than the longer wavelengths (red and orange). This is because the shorter wavelengths have a higher energy and are therefore more likely to interact with the molecules.\n\nThe scattered blue light is then redirected in all directions, which is why we see the sky as being blue. The amount of scattering depends on the wavelength of light and the size of the particles, which is why the sky appears to be a darker shade of blue when the sun is low in the sky (when the sunlight has to travel through more of the atmosphere to reach our eyes) and a lighter shade of blue when the sun is high in the sky (when the sunlight has to travel through less of the atmosphere to reach our eyes).\n\nIt is important to note that the sky is not actually blue. It only appears to be blue because of the way that light is scattered by the molecules in the atmosphere. If you were to travel to space, you would see that the sky is actually black." + } + ] + }, + "index": 0 + } + ], + "promptFeedback": { + } +} diff --git a/Tests/GoogleGenerativeAITests/GoogleGenerativeAITests.swift b/Tests/GoogleGenerativeAITests/GoogleGenerativeAITests.swift deleted file mode 100644 index 8e757b8..0000000 --- a/Tests/GoogleGenerativeAITests/GoogleGenerativeAITests.swift +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import XCTest -@testable import GoogleGenerativeAI - -final class GenerativeLanguageTests: XCTestCase { - let apiKey = "" - - func testSetup() { - let client = GenerativeLanguage(apiKey: apiKey) - - XCTAssertNotNil(client) - XCTAssertEqual(client.apiKey, apiKey) - } - - func testGenerateText() async throws { - let client = GenerativeLanguage(apiKey: apiKey) - let model = "models/text-bison-001" - - let result = try await client.generateText(with: "Say something nice", model: model) - print(result) - } - - func testChat() async throws { - let client = GenerativeLanguage(apiKey: apiKey) - let model = "models/chat-bison-001" - - let result = try await client.chat(message: "Say something nice", model: model) - print(result) - } - - func testListModels() async throws { - let client = GenerativeLanguage(apiKey: apiKey) - - let result = try await client.listModels() - print(result.models ?? []) - } - - func testGetModel() async throws { - let client = GenerativeLanguage(apiKey: apiKey) - let model = "chat-bison-001" - - let result = try await client.getModel(name: model) - print(result.displayName ?? []) - } -} diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index e635be7..8956a61 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -27,6 +27,6 @@ Guidelines](https://opensource.google/conduct/). ### Code Reviews -All submissions, including submissions by project members, require review. We +All submissions, including submissions by project members, require review. We use [GitHub pull requests](https://docs.github.com/articles/about-pull-requests) for this purpose. diff --git a/docs/DEVELOPING.md b/docs/DEVELOPING.md index a078c87..ed6fe82 100644 --- a/docs/DEVELOPING.md +++ b/docs/DEVELOPING.md @@ -1,13 +1,21 @@ # Developing -## Re-generating the API -All code in the [OpenAPI](../Sources/OpenAPI) is generated based on the API specification in [generativelanguage-v1beta3.json](../Sources/generativelanguage-v1beta3.json). +## Prerequisites -To re-generate the API based on any changes to the API, follow these steps: +* Xcode 15 or newer -* Obtain the API discovery document from https://generativelanguage.googleapis.com/$discovery/rest?version=v1beta3&key=$YOUR_API_KEY -* Convert the discovery document to OpenAPI 3.0 (Swagger) format - * https://github.com/APIs-guru/google-discovery-to-swagger - * https://github.com/LucyBot-Inc/api-spec-converter -* Use [CreateAPI](https://github.com/CreateAPI/CreateAPI) to generate helper classes for accessing the API -* Manually write the API surface ([protocol](../Sources/GoogleGenerativeAI/GenerativeLanguageProtocol.swift) / [implementation)](../Sources/GoogleGenerativeAI/GenerativeLanguage.swift) and [REST routes for the API client](../Sources/GoogleGenerativeAI/Endpoints.swift) to access the API using the generated helper classes +## Setting up your development environment + +* `git clone git@github.com:google/generative-ai-swift.git` +* `open Package.swift` +* Command-u to build and run the library and unit tests +* `open Examples/GenerativeAISample/GenerativeAISample.xcodeproj` to build and run the Examples + +## Checking in code + +Before submitting a pull request, make sure to check your code against the +style guide by running the following command: + +```bash +$ ./scripts/style.sh +``` diff --git a/scripts/build.sh b/scripts/build.sh new file mode 100755 index 0000000..9e150c6 --- /dev/null +++ b/scripts/build.sh @@ -0,0 +1,206 @@ +#!/bin/bash + +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# USAGE: build.sh product platform method [workspace] +# +# Builds the given product for the given platform using the given build method + +set -euo pipefail + +if [[ $# -lt 1 ]]; then + cat 1>&2 <&2 + sleep 5 + + result=0 + xcodebuild "$@" | tee xcodebuild.log | "${xcpretty_cmd[@]}" || result=$? + fi + + if [[ $result != 0 ]]; then + echo "xcodebuild exited with $result" 1>&2 + + ExportLogs "$@" + return $result + fi +} + +# Exports any logs output captured in the xcresult +function ExportLogs() { + python "${scripts_dir}/xcresult_logs.py" "$@" +} + +ios_flags=( + -sdk 'iphonesimulator' +) + +ios_device_flags=( + -sdk 'iphoneos' +) + +ipad_flags=( + -sdk 'iphonesimulator' +) + +macos_flags=( + -sdk 'macosx' +) +tvos_flags=( + -sdk "appletvsimulator" +) +watchos_flags=( +) +visionos_flags=( +) +catalyst_flags=( + ARCHS=x86_64 VALID_ARCHS=x86_64 SUPPORTS_MACCATALYST=YES -sdk macosx + CODE_SIGN_IDENTITY=- CODE_SIGNING_REQUIRED=NO CODE_SIGNING_ALLOWED=NO +) + +destination= + +# Compute standard flags for all platforms +case "$platform" in + iOS) + xcb_flags=("${ios_flags[@]}") + gen_platform=ios + destination="platform=iOS Simulator,name=iPhone 15" + ;; + + iOS-device) + xcb_flags=("${ios_device_flags[@]}") + gen_platform=ios + destination='generic/platform=iOS' + ;; + + iPad) + xcb_flags=("${ipad_flags[@]}") + destination='platform=iOS Simulator,name=iPad Pro (9.7-inch)' + ;; + + macOS) + xcb_flags=("${macos_flags[@]}") + gen_platform=macos + destination="platform=OS X,arch=x86_64" + ;; + + tvOS) + xcb_flags=("${tvos_flags[@]}") + gen_platform=tvos + destination='platform=tvOS Simulator,name=Apple TV' + ;; + + watchOS) + xcb_flags=("${watchos_flags[@]}") + destination='platform=watchOS Simulator,name=Apple Watch Series 7 (45mm)' + ;; + + visionOS) + xcb_flags=("${visionos_flags[@]}") + destination='platform=visionOS Simulator' + ;; + + catalyst) + xcb_flags=("${catalyst_flags[@]}") + destination='platform="macOS,variant=Mac Catalyst,arch=x86_64" TARGETED_DEVICE_FAMILY=2' + ;; + + all) + xcb_flags=() + ;; + + Linux) + xcb_flags=() + ;; + + *) + echo "Unknown platform '$platform'" 1>&2 + exit 1 + ;; +esac + +xcb_flags+=( + ONLY_ACTIVE_ARCH=YES + CODE_SIGNING_REQUIRED=NO + CODE_SIGNING_ALLOWED=YES + COMPILER_INDEX_STORE_ENABLE=NO +) + +if [[ $workspace = *.xcodeproj ]] ; then + RunXcodebuild -project $workspace -scheme $product "${xcb_flags[@]}" $method +else + RunXcodebuild -workspace $workspace -scheme $product -destination "$destination" "${xcb_flags[@]}" $method +fi diff --git a/scripts/check_copyright.sh b/scripts/check_copyright.sh new file mode 100755 index 0000000..f6c2f54 --- /dev/null +++ b/scripts/check_copyright.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Check source files for copyright notices + +options=( + -E # Use extended regexps + -I # Exclude binary files + -L # Show files that don't have a match + 'Copyright [0-9]{4}.*Google LLC' +) + +list=$(git grep "${options[@]}" -- \ + '*.'{c,cc,cmake,h,js,m,mm,py,rb,sh,swift} \ + CMakeLists.txt '**/CMakeLists.txt' \ + ':(exclude)**/third_party/**') + +if [[ $result ]]; then + echo "$result" + echo "ERROR: Missing copyright notices in the files above. Please fix." + exit 1 +fi diff --git a/scripts/check_filename_spaces.sh b/scripts/check_filename_spaces.sh new file mode 100755 index 0000000..e1eebab --- /dev/null +++ b/scripts/check_filename_spaces.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Fail on spaces in file names, excluding the patterns listed below. + +# A sed program that removes filename patterns that are allowed to have spaces +# in them. +function remove_valid_names() { + sed ' + # Xcode-generated asset files + /Assets.xcassets/ d + + # Files without spaces + /^[^ ]*$/ d + ' +} + +count=$(git ls-files | remove_valid_names | wc -l | xargs) + +if [[ ${count} != 0 ]]; then + echo 'ERROR: Spaces in filenames are not permitted in this repo. Please fix.' + echo '' + + git ls-files | remove_valid_names + exit 1 +fi diff --git a/scripts/check_secrets.sh b/scripts/check_secrets.sh new file mode 100755 index 0000000..1d56743 --- /dev/null +++ b/scripts/check_secrets.sh @@ -0,0 +1,35 @@ +#!/bin/sh + +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Check if secrets are available + +set -x +echo "GITHUB_BASE_REF: ${GITHUB_BASE_REF:-}" +echo "GITHUB_HEAD_REF: ${GITHUB_HEAD_REF:-}" + +check_secrets() +{ + # GitHub Actions: Secrets are available if we're not running on a fork. + # See https://help.github.com/en/actions/automating-your-workflow-with-github-actions/using-environment-variables + # TODO- Both GITHUB_BASE_REF and GITHUB_HEAD_REF are set in master repo + # PRs even thought the docs say otherwise. They are not set in cron jobs on master. + # Investigate how do to distinguish fork PRs from master repo PRs. + if [[ -n "${GITHUB_WORKFLOW:-}" ]]; then + return 0 + fi + return 1 +} diff --git a/scripts/check_whitespace.sh b/scripts/check_whitespace.sh new file mode 100755 index 0000000..e59691e --- /dev/null +++ b/scripts/check_whitespace.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Fail on an trailing whitespace characters, excluding +# * binary files (-I) +# * nanopb-generated files +# * protoc-generated files +# +# Note: specifying revisions we care about makes this go slower than just +# grepping through the whole repo. +options=( + -n # show line numbers + -I # exclude binary files + ' $' +) + +git grep "${options[@]}" + +if [[ $? == 0 ]]; then + echo "ERROR: Trailing whitespace found in the files above. Please fix." + exit 1 +fi diff --git a/scripts/style.sh b/scripts/style.sh new file mode 100755 index 0000000..fac54fe --- /dev/null +++ b/scripts/style.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Usage: +# ./scripts/style.sh [branch-name | filenames] +# +# With no arguments, formats all eligible files in the repo +# Pass a branch name to format all eligible files changed since that branch +# Pass a specific file or directory name to format just files found there +# +# Commonly +# ./scripts/style.sh main + +# Ensure that tools in `Mintfile` are installed locally to avoid permissions +# problems that would otherwise arise from the default of installing in +# /usr/local. + +export MINT_PATH=Mint + +if ! which mint >/dev/null 2>&1; then + echo "mint is not available, install with 'brew install mint'" + exit 1 +fi + +system=$(uname -s) + +# Joins the given arguments with the separator given as the first argument. +function join() { + local IFS="$1" + shift + echo "$*" +} + +# Rules to disable in swiftformat: +swift_disable=( + # sortedImports is broken, sorting into the middle of the copyright notice. + sortedImports + + # Too many of our swift files have simplistic examples. While technically + # it's correct to remove the unused argument labels, it makes our examples + # look wrong. + unusedArguments + + # We prefer trailing braces. + wrapMultilineStatementBraces +) + +swift_options=( + # Mimic Google style. + --indent 2 + --maxwidth 100 + --wrapparameters afterfirst + --disable $(join , "${swift_disable[@]}") +) + +if [[ $# -gt 0 && "$1" == "test-only" ]]; then + test_only=true + swift_options+=(--dryrun) + shift +else + test_only=false +fi + +#TODO - Find a way to handle spaces in filenames + +files=$( +( + if [[ $# -gt 0 ]]; then + if git rev-parse "$1" -- >& /dev/null; then + # Argument was a branch name show files changed since that branch + git diff --name-only --relative --diff-filter=ACMR "$1" + else + # Otherwise assume the passed things are files or directories + find "$@" -type f + fi + else + # Do everything by default + find . -type f + fi +) | sed -E -n ' +# find . includes a leading "./" that git does not include +s%^./%% + +# Build outputs +\%^build/% d +\%^Debug/% d +\%^Release/% d +\%^.build/% d +\%^.swiftpm/% d + +# Sources controlled outside this tree +\%/third_party/% d + +# Sources pulled in by the Mint package manager +\%^mint% d + +# Format Swift sources only +\%\.swift$% p +' +) + +needs_formatting=false +for f in $files; do + # Match output that says: + # 1/1 files would have been formatted. (with --dryrun) + # 1/1 files formatted. (without --dryrun) + mint run swiftformat "${swift_options[@]}" "$f" 2>&1 | grep '^1/1 files' > /dev/null + if [[ "$test_only" == true && $? -ne 1 ]]; then + echo "$f needs formatting." + needs_formatting=true + fi +done + +if [[ "$needs_formatting" == true ]]; then + echo "Proposed commit is not style compliant." + echo "Run scripts/style.sh and git add the result." + exit 1 +fi diff --git a/scripts/third_party/travis/LICENSE b/scripts/third_party/travis/LICENSE new file mode 100644 index 0000000..0cafc33 --- /dev/null +++ b/scripts/third_party/travis/LICENSE @@ -0,0 +1,20 @@ +MIT LICENSE + +Copyright (c) 2018 Travis CI GmbH + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/scripts/third_party/travis/retry.sh b/scripts/third_party/travis/retry.sh new file mode 100755 index 0000000..4f55eeb --- /dev/null +++ b/scripts/third_party/travis/retry.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +#MIT LICENSE +# +#Copyright (c) 2018 Travis CI GmbH +# +#Permission is hereby granted, free of charge, to any person obtaining a copy of +#this software and associated documentation files (the "Software"), to deal in +#the Software without restriction, including without limitation the rights to +#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +#the Software, and to permit persons to whom the Software is furnished to do so, +#subject to the following conditions: +# +#The above copyright notice and this permission notice shall be included in all +#copies or substantial portions of the Software. +# +#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +# From https://github.com/tianon/travis-build/blob/e3400b7bd417407492e3916e9f7a62315f584ad5/lib/travis/build/templates/header.sh + +ANSI_RED="\033[31;1m" +ANSI_GREEN="\033[32;1m" +ANSI_YELLOW="\033[33;1m" +ANSI_RESET="\033[0m" +ANSI_CLEAR="\033[0K" + +# Number of attempts. +RETRY_COUNT=2 + +travis_retry() { + local result=0 + local count=1 + while [ $count -le $RETRY_COUNT ]; do + [ $result -ne 0 ] && { + echo -e "\n${ANSI_RED}The command \"$@\" failed. Retrying, $count of ${RETRY_COUNT}.${ANSI_RESET}\n" >&2 + } + "$@" && { result=0 && break; } || result=$? + count=$(($count + 1)) + sleep 1 + done + + [ $count -gt $RETRY_COUNT ] && { + echo -e "\n${ANSI_RED}The command \"$@\" failed ${RETRY_COUNT} times.${ANSI_RESET}\n" >&2 + } + + return $result +} + +travis_retry "$@" diff --git a/scripts/xcresult_logs.py b/scripts/xcresult_logs.py new file mode 100755 index 0000000..656ec7d --- /dev/null +++ b/scripts/xcresult_logs.py @@ -0,0 +1,290 @@ +#!/usr/bin/env python + +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Prints logs from test runs captured in Apple .xcresult bundles. + +USAGE: xcresult_logs.py -workspace -scheme [other flags...] + +xcresult_logs.py finds and displays the log output associated with an xcodebuild +invocation. Pass your entire xcodebuild command-line as arguments to this script +and it will find the output associated with the most recent invocation. +""" + +import json +import logging +import os +import re +import shutil +import subprocess +import sys + +from lib import command_trace + +_logger = logging.getLogger('xcresult') + + +def main(): + args = sys.argv[1:] + if not args: + sys.stdout.write(__doc__) + sys.exit(1) + + logging.basicConfig(format='%(message)s', level=logging.DEBUG) + + flags = parse_xcodebuild_flags(args) + + # If the result bundle path is specified in the xcodebuild flags, use that + # otherwise, deduce + xcresult_path = flags.get('-resultBundlePath') + if xcresult_path is None: + project = project_from_workspace_path(flags['-workspace']) + scheme = flags['-scheme'] + xcresult_path = find_xcresult_path(project, scheme) + + log_id = find_log_id(xcresult_path) + log = export_log(xcresult_path, log_id) + + # Avoid a potential UnicodeEncodeError raised by sys.stdout.write() by + # doing a relaxed encoding ourselves. + if hasattr(sys.stdout, 'buffer'): + log_encoded = log.encode('utf8', errors='backslashreplace') + sys.stdout.flush() + sys.stdout.buffer.write(log_encoded) + else: + log_encoded = log.encode('ascii', errors='backslashreplace') + log_decoded = log_encoded.decode('ascii', errors='strict') + sys.stdout.write(log_decoded) + + +# Most flags on the xcodebuild command-line are uninteresting, so only pull +# flags with known behavior with names in this set. +INTERESTING_FLAGS = { + '-resultBundlePath', + '-scheme', + '-workspace', +} + + +def parse_xcodebuild_flags(args): + """Parses the xcodebuild command-line. + + Extracts flags like -workspace and -scheme that dictate the location of the + logs. + """ + result = {} + key = None + for arg in args: + if arg.startswith('-'): + if arg in INTERESTING_FLAGS: + key = arg + elif key is not None: + result[key] = arg + key = None + + return result + + +def project_from_workspace_path(path): + """Extracts the project name from a workspace path. + Args: + path: The path to a .xcworkspace file + + Returns: + The project name from the basename of the path. For example, if path were + 'Firestore/Example/Firestore.xcworkspace', returns 'Firestore'. + """ + root, ext = os.path.splitext(os.path.basename(path)) + if ext == '.xcworkspace': + _logger.debug('Using project %s from workspace %s', root, path) + return root + + raise ValueError('%s is not a valid workspace path' % path) + + +def find_xcresult_path(project, scheme): + """Finds an xcresult bundle for the given project and scheme. + + Args: + project: The project name, like 'Firestore' + scheme: The Xcode scheme that was tested + + Returns: + The path to the newest xcresult bundle that matches. + """ + project_path = find_project_path(project) + bundle_dir = os.path.join(project_path, 'Logs/Test') + prefix = re.compile('([^-]*)-' + re.escape(scheme) + '-') + + _logger.debug('Logging for xcresult bundles in %s', bundle_dir) + xcresult = find_newest_matching_prefix(bundle_dir, prefix) + if xcresult is None: + raise LookupError( + 'Could not find xcresult bundle for %s in %s' % (scheme, bundle_dir)) + + _logger.debug('Found xcresult: %s', xcresult) + return xcresult + + +def find_project_path(project): + """Finds the newest project output within Xcode's DerivedData. + + Args: + project: A project name; the Foo in Foo.xcworkspace + + Returns: + The path containing the newest project output. + """ + path = os.path.expanduser('~/Library/Developer/Xcode/DerivedData') + prefix = re.compile(re.escape(project) + '-') + + # DerivedData has directories like Firestore-csljdukzqbozahdjizcvrfiufrkb. Use + # the most recent one if there are more than one such directory matching the + # project name. + result = find_newest_matching_prefix(path, prefix) + if result is None: + raise LookupError( + 'Could not find project derived data for %s in %s' % (project, path)) + + _logger.debug('Using project derived data in %s', result) + return result + + +def find_newest_matching_prefix(path, prefix): + """Lists the given directory and returns the newest entry matching prefix. + + Args: + path: A directory to list + prefix: A regular expression that matches the filenames to consider + + Returns: + The path to the newest entry in the directory whose basename starts with + the prefix. + """ + entries = os.listdir(path) + result = None + for entry in entries: + if prefix.match(entry): + fq_entry = os.path.join(path, entry) + if result is None: + result = fq_entry + else: + result_mtime = os.path.getmtime(result) + entry_mtime = os.path.getmtime(fq_entry) + if entry_mtime > result_mtime: + result = fq_entry + + return result + + +def find_legacy_log_files(xcresult_path): + """Finds the log files produced by Xcode 10 and below.""" + + result = [] + + for root, dirs, files in os.walk(xcresult_path, topdown=True): + for file in files: + if file.endswith('.txt'): + file = os.path.join(root, file) + result.append(file) + + # Sort the files by creation time. + result.sort(key=lambda f: os.stat(f).st_ctime) + return result + + +def cat_files(files, output): + """Reads the contents of all the files and copies them to the output. + + Args: + files: A list of filenames + output: A file-like object in which all the data should be copied. + """ + for file in files: + with open(file, 'r') as fd: + shutil.copyfileobj(fd, output) + + +def find_log_id(xcresult_path): + """Finds the id of the last action's logs. + + Args: + xcresult_path: The path to an xcresult bundle. + + Returns: + The id of the log output, suitable for use with xcresulttool get --id. + """ + parsed = xcresulttool_json('get', '--path', xcresult_path) + actions = parsed['actions']['_values'] + action = actions[-1] + + result = action['actionResult']['logRef']['id']['_value'] + _logger.debug('Using log id %s', result) + return result + + +def export_log(xcresult_path, log_id): + """Exports the log data with the given id from the xcresult bundle. + + Args: + xcresult_path: The path to an xcresult bundle. + log_id: The id that names the log output (obtained by find_log_id) + + Returns: + The logged output, as a string. + """ + contents = xcresulttool_json('get', '--path', xcresult_path, '--id', log_id) + + result = [] + collect_log_output(contents, result) + return ''.join(result) + + +def collect_log_output(activity_log, result): + """Recursively collects emitted output from the activity log. + + Args: + activity_log: Parsed JSON of an xcresult activity log. + result: An array into which all log data should be appended. + """ + output = activity_log.get('emittedOutput') + if output: + result.append(output['_value']) + else: + subsections = activity_log.get('subsections') + if subsections: + for subsection in subsections['_values']: + collect_log_output(subsection, result) + + +def xcresulttool(*args): + """Runs xcresulttool and returns its output as a string.""" + cmd = ['xcrun', 'xcresulttool'] + cmd.extend(args) + + command_trace.log(cmd) + + return subprocess.check_output(cmd) + + +def xcresulttool_json(*args): + """Runs xcresulttool and its output as parsed JSON.""" + args = list(args) + ['--format', 'json'] + contents = xcresulttool(*args) + return json.loads(contents) + + +if __name__ == '__main__': + main()