From e53bc1583d6d5eb666ee85d0dff065eeab20a9bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Paul=20Heidekr=C3=BCger?= Date: Mon, 19 Aug 2024 21:55:01 +0200 Subject: [PATCH] Docs: Handle Throwing Expression in SpeziLLMOpenAI.md's LLMOpenAIDemo Example (#61) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Handle Throwing Expression in SpeziLLMOpenAI.md's LLMOpenAIDemo Example ## :recycle: Current situation & Problem After copying a code example from the SpeziLLM documentation, I noticed that ... - a throwing expression wasn’t being handled. To make life eseier for future readers, add a simple `do … catch` for handling any exceptions that may be thrown. Also, happy to take suggestions for a more elegant way to do this! - some imports were missing. To make life eseier for future readers, add the missing imports. I hope I didn’t miss any. - I was getting an error around the use of `@EnvironmentObject`. Fix the error by substituting `@EnvironmentObject` with `@Environment`, which is also in line with the code in `TestApp`. ## :gear: Release Notes - Update documentation ## :books: Documentation - Update Documentation ## :pencil: Code of Conduct & Contributing Guidelines By submitting creating this pull request, you agree to follow our [Code of Conduct](https://github.com/StanfordSpezi/.github/blob/main/CODE_OF_CONDUCT.md) and [Contributing Guidelines](https://github.com/StanfordSpezi/.github/blob/main/CONTRIBUTING.md): - [x] I agree to follow the [Code of Conduct](https://github.com/StanfordSpezi/.github/blob/main/CODE_OF_CONDUCT.md) and [Contributing Guidelines](https://github.com/StanfordSpezi/.github/blob/main/CONTRIBUTING.md). --------- Co-authored-by: Paul Schmiedmayer --- README.md | 32 +++++++++++++++---- Sources/SpeziLLMFog/LLMFogSession.swift | 8 +++-- .../SpeziLLMFog.docc/SpeziLLMFog.md | 8 +++-- Sources/SpeziLLMLocal/LLMLocalSession.swift | 8 +++-- .../SpeziLLMLocal.docc/SpeziLLMLocal.md | 8 +++-- Sources/SpeziLLMOpenAI/LLMOpenAISession.swift | 12 +++++-- .../SpeziLLMOpenAI.docc/SpeziLLMOpenAI.md | 21 ++++++++++-- 7 files changed, 78 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index 311b3b70..f173a3a8 100644 --- a/README.md +++ b/README.md @@ -127,8 +127,12 @@ struct LLMLocalDemoView: View { ) ) - for try await token in try await llmSession.generate() { - responseText.append(token) + do { + for try await token in try await llmSession.generate() { + responseText.append(token) + } + } catch { + // Handle errors here. E.g., you can use `ViewState` and `viewStateAlert` from SpeziViews. } } } @@ -150,6 +154,10 @@ In order to use OpenAI LLMs within the Spezi ecosystem, the [SpeziLLM](https://s See the [SpeziLLM documentation](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm) for more details. ```swift +import Spezi +import SpeziLLM +import SpeziLLMOpenAI + class LLMOpenAIAppDelegate: SpeziAppDelegate { override var configuration: Configuration { Configuration { @@ -171,6 +179,10 @@ The code example below showcases the interaction with an OpenAI LLM through the The `LLMOpenAISchema` defines the type and configurations of the to-be-executed `LLMOpenAISession`. This transformation is done via the [`LLMRunner`](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm/llmrunner) that uses the `LLMOpenAIPlatform`. The inference via `LLMOpenAISession/generate()` returns an `AsyncThrowingStream` that yields all generated `String` pieces. ```swift +import SpeziLLM +import SpeziLLMOpenAI +import SwiftUI + struct LLMOpenAIDemoView: View { @Environment(LLMRunner.self) var runner @State var responseText = "" @@ -189,8 +201,12 @@ struct LLMOpenAIDemoView: View { ) ) - for try await token in try await llmSession.generate() { - responseText.append(token) + do { + for try await token in try await llmSession.generate() { + responseText.append(token) + } + } catch { + // Handle errors here. E.g., you can use `ViewState` and `viewStateAlert` from SpeziViews. } } } @@ -263,8 +279,12 @@ struct LLMFogDemoView: View { ) ) - for try await token in try await llmSession.generate() { - responseText.append(token) + do { + for try await token in try await llmSession.generate() { + responseText.append(token) + } + } catch { + // Handle errors here. E.g., you can use `ViewState` and `viewStateAlert` from SpeziViews. } } } diff --git a/Sources/SpeziLLMFog/LLMFogSession.swift b/Sources/SpeziLLMFog/LLMFogSession.swift index 6a17194a..4e5cf452 100644 --- a/Sources/SpeziLLMFog/LLMFogSession.swift +++ b/Sources/SpeziLLMFog/LLMFogSession.swift @@ -50,8 +50,12 @@ import SpeziLLM /// ) /// ) /// -/// for try await token in try await llmSession.generate() { -/// responseText.append(token) +/// do { +/// for try await token in try await llmSession.generate() { +/// responseText.append(token) +/// } +/// } catch { +/// // Handle errors here. E.g., you can use `ViewState` and `viewStateAlert` from SpeziViews. /// } /// } /// } diff --git a/Sources/SpeziLLMFog/SpeziLLMFog.docc/SpeziLLMFog.md b/Sources/SpeziLLMFog/SpeziLLMFog.docc/SpeziLLMFog.md index 5ee63643..d968332c 100644 --- a/Sources/SpeziLLMFog/SpeziLLMFog.docc/SpeziLLMFog.md +++ b/Sources/SpeziLLMFog/SpeziLLMFog.docc/SpeziLLMFog.md @@ -100,8 +100,12 @@ struct LLMFogDemoView: View { ) ) - for try await token in try await llmSession.generate() { - responseText.append(token) + do { + for try await token in try await llmSession.generate() { + responseText.append(token) + } + } catch { + // Handle errors here. E.g., you can use `ViewState` and `viewStateAlert` from SpeziViews. } } } diff --git a/Sources/SpeziLLMLocal/LLMLocalSession.swift b/Sources/SpeziLLMLocal/LLMLocalSession.swift index 61a2fa81..6771ba84 100644 --- a/Sources/SpeziLLMLocal/LLMLocalSession.swift +++ b/Sources/SpeziLLMLocal/LLMLocalSession.swift @@ -46,8 +46,12 @@ import SpeziLLM /// ) /// ) /// -/// for try await token in try await llmSession.generate() { -/// responseText.append(token) +/// do { +/// for try await token in try await llmSession.generate() { +/// responseText.append(token) +/// } +/// } catch { +/// // Handle errors here. E.g., you can use `ViewState` and `viewStateAlert` from SpeziViews. /// } /// } /// } diff --git a/Sources/SpeziLLMLocal/SpeziLLMLocal.docc/SpeziLLMLocal.md b/Sources/SpeziLLMLocal/SpeziLLMLocal.docc/SpeziLLMLocal.md index e246f8e9..c6823d3f 100644 --- a/Sources/SpeziLLMLocal/SpeziLLMLocal.docc/SpeziLLMLocal.md +++ b/Sources/SpeziLLMLocal/SpeziLLMLocal.docc/SpeziLLMLocal.md @@ -111,8 +111,12 @@ struct LLMLocalDemoView: View { ) ) - for try await token in try await llmSession.generate() { - responseText.append(token) + do { + for try await token in try await llmSession.generate() { + responseText.append(token) + } + } catch { + // Handle errors here. E.g., you can use `ViewState` and `viewStateAlert` from SpeziViews. } } } diff --git a/Sources/SpeziLLMOpenAI/LLMOpenAISession.swift b/Sources/SpeziLLMOpenAI/LLMOpenAISession.swift index d9e0e6ab..52347518 100644 --- a/Sources/SpeziLLMOpenAI/LLMOpenAISession.swift +++ b/Sources/SpeziLLMOpenAI/LLMOpenAISession.swift @@ -33,6 +33,10 @@ import SpeziSecureStorage /// The example below demonstrates a minimal usage of the ``LLMOpenAISession`` via the `LLMRunner`. /// /// ```swift +/// import SpeziLLM +/// import SpeziLLMOpenAI +/// import SwiftUI +/// /// struct LLMOpenAIDemoView: View { /// @Environment(LLMRunner.self) var runner /// @State var responseText = "" @@ -51,8 +55,12 @@ import SpeziSecureStorage /// ) /// ) /// -/// for try await token in try await llmSession.generate() { -/// responseText.append(token) +/// do { +/// for try await token in try await llmSession.generate() { +/// responseText.append(token) +/// } +/// } catch { +/// // Handle errors here. E.g., you can use `ViewState` and `viewStateAlert` from SpeziViews. /// } /// } /// } diff --git a/Sources/SpeziLLMOpenAI/SpeziLLMOpenAI.docc/SpeziLLMOpenAI.md b/Sources/SpeziLLMOpenAI/SpeziLLMOpenAI.docc/SpeziLLMOpenAI.md index cecc6209..8b1634c6 100644 --- a/Sources/SpeziLLMOpenAI/SpeziLLMOpenAI.docc/SpeziLLMOpenAI.md +++ b/Sources/SpeziLLMOpenAI/SpeziLLMOpenAI.docc/SpeziLLMOpenAI.md @@ -65,6 +65,10 @@ In order to use OpenAI LLMs, the [SpeziLLM](https://swiftpackageindex.com/stanfo See the [SpeziLLM documentation](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm) for more details. ```swift +import Spezi +import SpeziLLM +import SpeziLLMOpenAI + class LLMOpenAIAppDelegate: SpeziAppDelegate { override var configuration: Configuration { Configuration { @@ -86,6 +90,10 @@ The ``LLMOpenAISession`` contains the ``LLMOpenAISession/context`` property whic Ensure the property always contains all necessary information, as the ``LLMOpenAISession/generate()`` function executes the inference based on the ``LLMOpenAISession/context`` ```swift +import SpeziLLM +import SpeziLLMOpenAI +import SwiftUI + struct LLMOpenAIDemoView: View { @Environment(LLMRunner.self) var runner @State var responseText = "" @@ -104,8 +112,12 @@ struct LLMOpenAIDemoView: View { ) ) - for try await token in try await llmSession.generate() { - responseText.append(token) + do { + for try await token in try await llmSession.generate() { + responseText.append(token) + } + } catch { + // Handle errors here. E.g., you can use `ViewState` and `viewStateAlert` from SpeziViews. } } } @@ -125,10 +137,12 @@ The ``LLMOpenAIAPITokenOnboardingStep`` provides a view that can be used for the First, create a new view to show the onboarding step: ```swift +import SpeziLLMOpenAI import SpeziOnboarding +import SwiftUI struct OpenAIAPIKey: View { - @EnvironmentObject private var onboardingNavigationPath: OnboardingNavigationPath + @Environment(OnboardingNavigationPath.self) private var onboardingNavigationPath: OnboardingNavigationPath var body: some View { LLMOpenAIAPITokenOnboardingStep { @@ -142,6 +156,7 @@ This view can then be added to the `OnboardingFlow` within the Spezi Template Ap ```swift import SpeziOnboarding +import SwiftUI struct OnboardingFlow: View { @AppStorage(StorageKeys.onboardingFlowComplete) var completedOnboardingFlow = false