diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 00000000000..9c2c295f023 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,36 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the +// README at: https://github.com/devcontainers/templates/tree/main/src/ubuntu +{ + "name": "Ubuntu", + // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile + "image": "mcr.microsoft.com/devcontainers/base:jammy", + "features": { + "ghcr.io/devcontainers/features/go:1": { + "version": "latest" + }, + "ghcr.io/devcontainers/features/hugo:1": { + "extended": true, + "version": "latest" + }, + "ghcr.io/devcontainers/features/node:1": { + "nodeGypDependencies": true, + "version": "lts", + "nvmVersion": "latest" + } + }, + "customizations": { + "vscode": { + "extensions": [ + "streetsidesoftware.code-spell-checker", + "tamasfe.even-better-toml", + "davidanson.vscode-markdownlint", + "budparr.language-hugo-vscode" + ], + "settings": { + "git.alwaysSignOff": true + } + } + }, + "forwardPorts": [1313], + "postAttachCommand": "bash scripts/init-container.sh" +} diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000000..5dc46e6b38b --- /dev/null +++ b/.gitattributes @@ -0,0 +1,3 @@ +* text=auto eol=lf +*.{cmd,[cC][mM][dD]} text eol=crlf +*.{bat,[bB][aA][tT]} text eol=crlf \ No newline at end of file diff --git a/README.md b/README.md index a189c74f09e..98bc0a4c4ff 100644 --- a/README.md +++ b/README.md @@ -29,21 +29,44 @@ The Dapr docs are built using [Hugo](https://gohugo.io/) with the [Docsy](https: The [daprdocs](./daprdocs) directory contains the hugo project, markdown files, and theme configurations. -## Pre-requisites +## Setup with a devcontainer -- [Hugo extended version](https://gohugo.io/getting-started/installing) -- [Node.js](https://nodejs.org/en/) +This repository comes with a [devcontainer](/.devcontainer/devcontainer.json) configuration that automatically installs all the required dependencies and VSCode extensions to build and run the docs. + +This devcontainer can be used to develop locally with VSCode or via GitHub Codespaces completely in the browser. Other IDEs that support [devcontainers](https://containers.dev/) can be used but won't have the extensions preconfigured and will likely have different performance characteristics. + +### Pre-requisites -## Environment setup +- [Docker Desktop](https://www.docker.com/products/docker-desktop) +- [VSCode](https://code.visualstudio.com/download) -1. Ensure pre-requisites are installed -2. Clone this repository +### Environment setup + +1. [Fork](https://github.com/dapr/docs/fork) and clone this repository. + +1. Open the forked repository in VS Code ```sh -git clone https://github.com/dapr/docs.git +code . ``` -3. Change to daprdocs directory: +1. When prompted, click "Reopen in Container" to open the repository in the devcontainer. + +Continue with the [Run local server](#run-local-server) steps. + +## Setup without a devcontainer + +### Pre-requisites + +- [Hugo extended version](https://gohugo.io/getting-started/installing) +- [Node.js](https://nodejs.org/en/) + +### Environment setup + +1. Ensure pre-requisites are installed. +1. [Fork](https://github.com/dapr/docs/fork) and clone this repository. + +1. Change to daprdocs directory: ```sh cd ./daprdocs @@ -63,7 +86,7 @@ npm install ## Run local server -1. Make sure you're still in the `daprdocs` directory +1. Make sure you're in the `daprdocs` directory 2. Run ```sh @@ -72,14 +95,13 @@ hugo server 3. Navigate to `http://localhost:1313/` - ## Update docs -1. Fork repo into your account -1. Create new branch -1. Commit and push changes to forked branch -1. Submit pull request from downstream branch to the upstream branch for the correct version you are targeting -1. Staging site will automatically get created and linked to PR to review and test +1. Ensure you are in your forked repo +2. Create new branch +3. Commit and push changes to forked branch +4. Submit pull request from downstream branch to the upstream branch for the correct version you are targeting +5. Staging site will automatically get created and linked to PR to review and test ## Code of Conduct diff --git a/daprdocs/config.toml b/daprdocs/config.toml index 3aa6d95f85a..bcc1eb945eb 100644 --- a/daprdocs/config.toml +++ b/daprdocs/config.toml @@ -124,6 +124,18 @@ id = "G-60C6Q1ETC1" source = "../translations/docs-zh/content/sdks_dotnet" target = "content/developing-applications/sdks/dotnet" lang = "zh-hans" + [[module.mounts]] + source = "../translations/docs-zh/content/sdks_java" + target = "content/developing-applications/sdks/java" + lang = "zh-hans" + [[module.mounts]] + source = "../translations/docs-zh/content/sdks_go" + target = "content/developing-applications/sdks/go" + lang = "zh-hans" + [[module.mounts]] + source = "../translations/docs-zh/content/sdks_js" + target = "content/developing-applications/sdks/js" + lang = "zh-hans" # Markdown Engine - Allow inline html [markup] @@ -197,18 +209,6 @@ url_latest_version = "https://docs.dapr.io" [[params.versions]] version = "v1.7" url = "https://v1-7.docs.dapr.io" -[[params.versions]] - version = "v1.6" - url = "https://v1-6.docs.dapr.io" -[[params.versions]] - version = "v1.5" - url = "https://v1-5.docs.dapr.io" -[[params.versions]] - version = "v1.4" - url = "https://v1-4.docs.dapr.io" -[[params.versions]] - version = "v1.3" - url = "https://v1-3.docs.dapr.io" # UI Customization [params.ui] diff --git a/daprdocs/content/en/concepts/security-concept.md b/daprdocs/content/en/concepts/security-concept.md index d7ceef443b4..64667e34d4b 100644 --- a/daprdocs/content/en/concepts/security-concept.md +++ b/daprdocs/content/en/concepts/security-concept.md @@ -81,7 +81,7 @@ The diagram below shows how the Sentry system service issues certificates for ap ### Preventing IP addresses on Dapr -To prevent Dapr sidecars from being called on any IP address (especially in production environments such as Kubernetes), Dapr restricts its listening IP addresses only to `localhost`. Use the [dapr-listen-addresses]({{}}) setting you need to enable other addresses. +To prevent Dapr sidecars from being called on any IP address (especially in production environments such as Kubernetes), Dapr restricts its listening IP addresses to `localhost`. Use the [dapr-listen-addresses]({{}}) setting if you need to enable access from external addresses. ## Secure Dapr to application communication diff --git a/daprdocs/content/en/contributing/codespaces.md b/daprdocs/content/en/contributing/codespaces.md index bbefdcc626f..630b932abb4 100644 --- a/daprdocs/content/en/contributing/codespaces.md +++ b/daprdocs/content/en/contributing/codespaces.md @@ -30,6 +30,7 @@ If you haven't already forked the repo, creating the Codespace will also create - [dapr/dapr](https://github.com/dapr/dapr) - [dapr/components-contrib](https://github.com/dapr/components-contrib) - [dapr/cli](https://github.com/dapr/cli) +- [dapr/docs](https://github.com/dapr/docs) - [dapr/python-sdk](https://github.com/dapr/python-sdk) ## Developing Dapr Components in a Codespace diff --git a/daprdocs/content/en/contributing/docs-contrib/contributing-docs.md b/daprdocs/content/en/contributing/docs-contrib/contributing-docs.md index 85d16302b70..2ed1f81bd0e 100644 --- a/daprdocs/content/en/contributing/docs-contrib/contributing-docs.md +++ b/daprdocs/content/en/contributing/docs-contrib/contributing-docs.md @@ -8,6 +8,8 @@ description: Get started with contributing to the Dapr docs In this guide, you'll learn how to contribute to the [Dapr docs repository](https://github.com/dapr/docs). Since Dapr docs are published to [docs.dapr.io](https://docs.dapr.io), you must make sure your contributions compile and publish correctly. + + ## Prerequisites Before contributing to the Dapr docs: diff --git a/daprdocs/content/en/developing-applications/building-blocks/actors/actors-features-concepts.md b/daprdocs/content/en/developing-applications/building-blocks/actors/actors-features-concepts.md index a9e82c287b2..e486b3243ec 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/actors/actors-features-concepts.md +++ b/daprdocs/content/en/developing-applications/building-blocks/actors/actors-features-concepts.md @@ -14,7 +14,9 @@ Now that you've learned about the [actor building block]({{< ref "actors-overvie Dapr actors are virtual, meaning that their lifetime is not tied to their in-memory representation. As a result, they do not need to be explicitly created or destroyed. The Dapr actor runtime automatically activates an actor the first time it receives a request for that actor ID. If an actor is not used for a period of time, the Dapr actor runtime garbage-collects the in-memory object. It will also maintain knowledge of the actor's existence should it need to be reactivated later. -Invocation of actor methods and reminders reset the idle time, e.g. reminder firing will keep the actor active. Actor reminders fire whether an actor is active or inactive, if fired for inactive actor, it will activate the actor first. Actor timers do not reset the idle time, so timer firing will not keep the actor active. Timers only fire while the actor is active. +Invocation of actor methods, timers, and reminders reset the actor idle time. For example, a reminder firing keeps the actor active. +- Actor reminders fire whether an actor is active or inactive. If fired for an inactive actor, it activates the actor first. +- Actor timers firing reset the idle time; however, timers only fire while the actor is active. The idle timeout and scan interval Dapr runtime uses to see if an actor can be garbage-collected is configurable. This information can be passed when Dapr runtime calls into the actor service to get supported actor types. diff --git a/daprdocs/content/en/developing-applications/building-blocks/actors/actors-runtime-config.md b/daprdocs/content/en/developing-applications/building-blocks/actors/actors-runtime-config.md index ce7008c1962..99b08040217 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/actors/actors-runtime-config.md +++ b/daprdocs/content/en/developing-applications/building-blocks/actors/actors-runtime-config.md @@ -59,7 +59,7 @@ public void ConfigureServices(IServiceCollection services) services.AddSingleton(); } ``` -[See the .NET SDK documentation on registring actors]({{< ref "dotnet-actors-usage.md#registring-actors" >}}). +[See the .NET SDK documentation on registering actors]({{< ref "dotnet-actors-usage.md#registring-actors" >}}). {{% /codetab %}} diff --git a/daprdocs/content/en/developing-applications/building-blocks/actors/howto-actors-partitioning.md b/daprdocs/content/en/developing-applications/building-blocks/actors/howto-actors-partitioning.md index ad3473d9093..0d4017096e9 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/actors/howto-actors-partitioning.md +++ b/daprdocs/content/en/developing-applications/building-blocks/actors/howto-actors-partitioning.md @@ -57,7 +57,7 @@ public void ConfigureServices(IServiceCollection services) } ``` -[See the .NET SDK documentation on registring actors]({{< ref "dotnet-actors-usage.md#registring-actors" >}}). +[See the .NET SDK documentation on registering actors]({{< ref "dotnet-actors-usage.md#registring-actors" >}}). {{% /codetab %}} diff --git a/daprdocs/content/en/developing-applications/building-blocks/actors/howto-actors.md b/daprdocs/content/en/developing-applications/building-blocks/actors/howto-actors.md index 9a309f26604..16c7bbf4383 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/actors/howto-actors.md +++ b/daprdocs/content/en/developing-applications/building-blocks/actors/howto-actors.md @@ -26,7 +26,7 @@ Alternatively, you can use [Dapr SDKs to use actors]({{< ref "developing-applica ## Save state with actors -You can interact with Dapr via HTTP/gRPC endpoints to save state reliably using the Dapr actor state mangement capabaility. +You can interact with Dapr via HTTP/gRPC endpoints to save state reliably using the Dapr actor state management capabaility. To use actors, your state store must support multi-item transactions. This means your state store component must implement the `TransactionalStore` interface. diff --git a/daprdocs/content/en/developing-applications/building-blocks/bindings/howto-triggers.md b/daprdocs/content/en/developing-applications/building-blocks/bindings/howto-triggers.md index 56a24b0aece..70320818a3e 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/bindings/howto-triggers.md +++ b/daprdocs/content/en/developing-applications/building-blocks/bindings/howto-triggers.md @@ -253,7 +253,7 @@ async function start() { } }); await server.binding.receive('checkout', async (orderId) => console.log(`Received Message: ${JSON.stringify(orderId)}`)); - await server.startServer(); + await server.start(); } ``` @@ -292,4 +292,4 @@ Event delivery guarantees are controlled by the binding implementation. Dependin - [Bindings building block]({{< ref bindings >}}) - [Bindings API]({{< ref bindings_api.md >}}) - [Components concept]({{< ref components-concept.md >}}) -- [Supported bindings]({{< ref supported-bindings >}}) \ No newline at end of file +- [Supported bindings]({{< ref supported-bindings >}}) diff --git a/daprdocs/content/en/developing-applications/building-blocks/cryptography/cryptography-overview.md b/daprdocs/content/en/developing-applications/building-blocks/cryptography/cryptography-overview.md index 79792f588cc..e8af63f1aaf 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/cryptography/cryptography-overview.md +++ b/daprdocs/content/en/developing-applications/building-blocks/cryptography/cryptography-overview.md @@ -45,7 +45,7 @@ While both HTTP and gRPC are supported in the alpha release, using the gRPC APIs ### Cryptographic components -The Dapr cryptography building block incldues two kinds of components: +The Dapr cryptography building block includes two kinds of components: - **Components that allow interacting with management services or vaults ("key vaults").** Similar to how Dapr offers an "abstraction layer" on top of various secret stores or state stores, these components allow interacting with various key vaults such as Azure Key Vault (with more coming in future Dapr releases). With these components, cryptographic operations on the private keys are performed within the vaults and Dapr never sees your private keys. @@ -85,4 +85,5 @@ Watch this [demo video of the Cryptography API from the Dapr Community Call #83] ## Related links - [Cryptography overview]({{< ref cryptography-overview.md >}}) -- [Cryptography component specs]({{< ref supported-cryptography >}}) \ No newline at end of file +- [Cryptography component specs]({{< ref supported-cryptography >}}) +- [Cryptography API reference doc]({{< ref cryptography_api >}}) \ No newline at end of file diff --git a/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-cloudevents.md b/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-cloudevents.md index 47c115ef6c2..ca14d145eae 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-cloudevents.md +++ b/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-cloudevents.md @@ -160,7 +160,7 @@ The JSON payload then reflects the new `source` and `id` values: ``` {{% alert title="Important" color="warning" %}} -While you can replace `traceid`/`traceparent` and `tracestate`, doing this may interfere with tracing events and report inconsistent results in tracing tools. It's recommended to use Open Telementry for distributed traces. [Learn more about distributed tracing.]({{< ref tracing-overview.md >}}) +While you can replace `traceid`/`traceparent` and `tracestate`, doing this may interfere with tracing events and report inconsistent results in tracing tools. It's recommended to use Open Telemetry for distributed traces. [Learn more about distributed tracing.]({{< ref tracing-overview.md >}}) {{% /alert %}} diff --git a/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-overview.md b/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-overview.md index ed6b72cc38d..041dcec8b82 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-overview.md +++ b/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-overview.md @@ -114,7 +114,7 @@ All Dapr pub/sub components support the at-least-once guarantee. ### Consumer groups and competing consumers pattern -Dapr handles the burden of dealing with consumer groups and the competing consumers pattern. In the competing consumers pattern, multiple application instances using a single consumer group compete for the message. Dapr enforces the competing consumer pattern when replicas use the same `app-id` without explict consumer group overrides. +Dapr handles the burden of dealing with consumer groups and the competing consumers pattern. In the competing consumers pattern, multiple application instances using a single consumer group compete for the message. Dapr enforces the competing consumer pattern when replicas use the same `app-id` without explicit consumer group overrides. When multiple instances of the same application (with same `app-id`) subscribe to a topic, Dapr delivers each message to *only one instance of **that** application*. This concept is illustrated in the diagram below. diff --git a/daprdocs/content/en/developing-applications/building-blocks/state-management/howto-state-query-api.md b/daprdocs/content/en/developing-applications/building-blocks/state-management/howto-state-query-api.md index 6b321d34d11..1fe01bf0efa 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/state-management/howto-state-query-api.md +++ b/daprdocs/content/en/developing-applications/building-blocks/state-management/howto-state-query-api.md @@ -28,9 +28,14 @@ The `filter` specifies the query conditions in the form of a tree, where each no The following operations are supported: -| Operator | Operands | Description | -|----------|-------------|--------------| -| `EQ` | key:value | key == value | +| Operator | Operands | Description | +|----------|-------------|--------------------------------------------------------------| +| `EQ` | key:value | key == value | +| `NEQ` | key:value | key != value | +| `GT` | key:value | key > value | +| `GTE` | key:value | key >= value | +| `LT` | key:value | key < value | +| `LTE` | key:value | key <= value | | `IN` | key:[]value | key == value[0] OR key == value[1] OR ... OR key == value[n] | | `AND` | []operation | operation[0] AND operation[1] AND ... AND operation[n] | | `OR` | []operation | operation[0] OR operation[1] OR ... OR operation[n] | diff --git a/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-patterns.md b/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-patterns.md index 9d23a64062f..3caeddad25f 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-patterns.md +++ b/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-patterns.md @@ -109,21 +109,53 @@ catch (TaskFailedException) // Task failures are surfaced as TaskFailedException ```java -public static void main(String[] args) throws InterruptedException { - DaprWorkflowClient client = new DaprWorkflowClient(); +public class ChainWorkflow extends Workflow { + @Override + public WorkflowStub create() { + return ctx -> { + StringBuilder sb = new StringBuilder(); + String wfInput = ctx.getInput(String.class); + String result1 = ctx.callActivity("Step1", wfInput, String.class).await(); + String result2 = ctx.callActivity("Step2", result1, String.class).await(); + String result3 = ctx.callActivity("Step3", result2, String.class).await(); + String result = sb.append(result1).append(',').append(result2).append(',').append(result3).toString(); + ctx.complete(result); + }; + } +} - try (client) { - client.raiseEvent(instanceId, "TestEvent", "TestEventPayload"); + class Step1 implements WorkflowActivity { - System.out.println(separatorStr); - System.out.println("** Registering parallel Events to be captured by allOf(t1,t2,t3) **"); - client.raiseEvent(instanceId, "event1", "TestEvent 1 Payload"); - client.raiseEvent(instanceId, "event2", "TestEvent 2 Payload"); - client.raiseEvent(instanceId, "event3", "TestEvent 3 Payload"); - System.out.printf("Events raised for workflow with instanceId: %s\n", instanceId); + @Override + public Object run(WorkflowActivityContext ctx) { + Logger logger = LoggerFactory.getLogger(Step1.class); + logger.info("Starting Activity: " + ctx.getName()); + // Do some work + return null; + } + } - } -} + class Step2 implements WorkflowActivity { + + @Override + public Object run(WorkflowActivityContext ctx) { + Logger logger = LoggerFactory.getLogger(Step2.class); + logger.info("Starting Activity: " + ctx.getName()); + // Do some work + return null; + } + } + + class Step3 implements WorkflowActivity { + + @Override + public Object run(WorkflowActivityContext ctx) { + Logger logger = LoggerFactory.getLogger(Step3.class); + logger.info("Starting Activity: " + ctx.getName()); + // Do some work + return null; + } + } ``` {{% /codetab %}} @@ -225,46 +257,23 @@ await context.CallActivityAsync("PostResults", sum); ```java -public static void main(String[] args) throws InterruptedException { - DaprWorkflowClient client = new DaprWorkflowClient(); - - try (client) { - - System.out.println(separatorStr); - System.out.println("**SendExternalMessage**"); - client.raiseEvent(instanceId, "TestEvent", "TestEventPayload"); - - // Get events to process in parallel - System.out.println(separatorStr); - System.out.println("** Registering parallel Events to be captured by allOf(t1,t2,t3) **"); - client.raiseEvent(instanceId, "event1", "TestEvent 1 Payload"); - client.raiseEvent(instanceId, "event2", "TestEvent 2 Payload"); - client.raiseEvent(instanceId, "event3", "TestEvent 3 Payload"); - System.out.printf("Events raised for workflow with instanceId: %s\n", instanceId); - - // Register the raised events to be captured - System.out.println(separatorStr); - System.out.println("** Registering Event to be captured by anyOf(t1,t2,t3) **"); - client.raiseEvent(instanceId, "e2", "event 2 Payload"); - System.out.printf("Event raised for workflow with instanceId: %s\n", instanceId); - - // Wait for all tasks to complete and aggregate results - System.out.println(separatorStr); - System.out.println("**WaitForInstanceCompletion**"); - try { - WorkflowInstanceStatus waitForInstanceCompletionResult = - client.waitForInstanceCompletion(instanceId, Duration.ofSeconds(60), true); - System.out.printf("Result: %s%n", waitForInstanceCompletionResult); - } catch (TimeoutException ex) { - System.out.printf("waitForInstanceCompletion has an exception:%s%n", ex); +public class FaninoutWorkflow extends Workflow { + @Override + public WorkflowStub create() { + return ctx -> { + // Get a list of N work items to process in parallel. + Object[] workBatch = ctx.callActivity("GetWorkBatch", Object[].class).await(); + // Schedule the parallel tasks, but don't wait for them to complete yet. + List> tasks = Arrays.stream(workBatch) + .map(workItem -> ctx.callActivity("ProcessWorkItem", workItem, int.class)) + .collect(Collectors.toList()); + // Everything is scheduled. Wait here until all parallel tasks have completed. + List results = ctx.allOf(tasks).await(); + // Aggregate all N outputs and publish the result. + int sum = results.stream().mapToInt(Integer::intValue).sum(); + ctx.complete(sum); + }; } - - System.out.println(separatorStr); - System.out.println("**purgeInstance**"); - boolean purgeResult = client.purgeInstance(instanceId); - System.out.printf("purgeResult: %s%n", purgeResult); - - } } ``` @@ -487,9 +496,8 @@ public class MonitorWorkflow extends Workflow { } // Put the workflow to sleep until the determined time - // Note: ctx.createTimer() method is not supported in the Java SDK yet try { - TimeUnit.SECONDS.sleep(nextSleepInterval.getSeconds()); + ctx.createTimer(nextSleepInterval); } catch (InterruptedException e) { throw new RuntimeException(e); } @@ -640,42 +648,34 @@ public override async Task RunAsync(WorkflowContext context, OrderP ```java -public static void main(String[] args) throws InterruptedException { - DaprWorkflowClient client = new DaprWorkflowClient(); - - try (client) { - String eventInstanceId = client.scheduleNewWorkflow(DemoWorkflow.class); - System.out.printf("Started new workflow instance with random ID: %s%n", eventInstanceId); - client.raiseEvent(eventInstanceId, "TestException", null); - System.out.printf("Event raised for workflow with instanceId: %s\n", eventInstanceId); - - System.out.println(separatorStr); - String instanceToTerminateId = "terminateMe"; - client.scheduleNewWorkflow(DemoWorkflow.class, null, instanceToTerminateId); - System.out.printf("Started new workflow instance with specified ID: %s%n", instanceToTerminateId); - - TimeUnit.SECONDS.sleep(5); - System.out.println("Terminate this workflow instance manually before the timeout is reached"); - client.terminateWorkflow(instanceToTerminateId, null); - System.out.println(separatorStr); - - String restartingInstanceId = "restarting"; - client.scheduleNewWorkflow(DemoWorkflow.class, null, restartingInstanceId); - System.out.printf("Started new workflow instance with ID: %s%n", restartingInstanceId); - System.out.println("Sleeping 30 seconds to restart the workflow"); - TimeUnit.SECONDS.sleep(30); - - System.out.println("**SendExternalMessage: RestartEvent**"); - client.raiseEvent(restartingInstanceId, "RestartEvent", "RestartEventPayload"); - - System.out.println("Sleeping 30 seconds to terminate the eternal workflow"); - TimeUnit.SECONDS.sleep(30); - client.terminateWorkflow(restartingInstanceId, null); - } - - System.out.println("Exiting DemoWorkflowClient."); - System.exit(0); +public class ExternalSystemInteractionWorkflow extends Workflow { + @Override + public WorkflowStub create() { + return ctx -> { + // ...other steps... + Integer orderCost = ctx.getInput(int.class); + // Require orders over a certain threshold to be approved + if (orderCost > ORDER_APPROVAL_THRESHOLD) { + try { + // Request human approval for this order + ctx.callActivity("RequestApprovalActivity", orderCost, Void.class).await(); + // Pause and wait for a human to approve the order + boolean approved = ctx.waitForExternalEvent("ManagerApproval", Duration.ofDays(3), boolean.class).await(); + if (!approved) { + // The order was rejected, end the workflow here + ctx.complete("Process reject"); + } + } catch (TaskCanceledException e) { + // An approval timeout results in automatic order cancellation + ctx.complete("Process cancel"); + } + } + // ...other steps... + // End the workflow with a success result + ctx.complete("Process approved"); + }; + } } ``` @@ -744,4 +744,4 @@ External events don't have to be directly triggered by humans. They can also be - Try out the following examples: - [Python](https://github.com/dapr/python-sdk/tree/master/examples/demo_workflow) - [.NET](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow) - - [Java](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/workflows) \ No newline at end of file + - [Java](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/workflows) diff --git a/daprdocs/content/en/developing-applications/debugging/debug-k8s/debug-daprd.md b/daprdocs/content/en/developing-applications/debugging/debug-k8s/debug-daprd.md index 3b13efc5aeb..24aca6c85a4 100644 --- a/daprdocs/content/en/developing-applications/debugging/debug-k8s/debug-daprd.md +++ b/daprdocs/content/en/developing-applications/debugging/debug-k8s/debug-daprd.md @@ -11,7 +11,7 @@ description: "How to debug the Dapr sidecar (daprd) on your Kubernetes cluster" Sometimes it is necessary to understand what's going on in the Dapr sidecar (daprd), which runs as a sidecar next to your application, especially when you diagnose your Dapr application and wonder if there's something wrong in Dapr itself. Additionally, you may be developing a new feature for Dapr on Kubernetes and want to debug your code. -his guide will cover how to use built-in Dapr debugging to debug the Dapr sidecar in your Kubernetes pods. +This guide covers how to use built-in Dapr debugging to debug the Dapr sidecar in your Kubernetes pods. To learn how to view logs and troubleshoot Dapr in Kubernetes, see the [Configure and view Dapr logs guide]({{< ref "logs-troubleshooting.md#logs-in-kubernetes-mode" >}}) ## Pre-requisites @@ -87,6 +87,62 @@ Forwarding from [::1]:40000 -> 40000 All done. Now you can point to port 40000 and start a remote debug session to daprd from your favorite IDE. +## Commonly used `kubectl` commands + +Use the following common `kubectl` commands when debugging daprd and applications running on Kubernetes. + +Get all pods, events, and services: + +```bash +kubectl get all +kubectl get all --n +kubectl get all --all-namespaces +``` + +Get each specifically: + +```bash +kubectl get pods +``` + +```bash +kubectl get events --n +kubectl get events --sort-by=.metadata.creationTimestamp --n +``` + +```bash +kubectl get services +``` + +Check logs: + +```bash +kubectl logs daprd +kubectl logs +kuebctl logs daprd +kubectl logs +``` + +```bash +kubectl describe pod +kubectl describe deploy +kubectl describe replicaset +``` + +Restart a pod by running the following command: + +```bash +kubectl delete pod +``` + +This causes the `replicaset` controller to restart the pod after the delete. + +## Watch the demo + +See the presentation on troubleshooting Dapr on Kubernetes in the [Dapr Community Call #36](https://youtu.be/pniLPRbuLD8?si=bGid7oYSp9cThtiI&t=838). + + + ## Related links - [Overview of Dapr on Kubernetes]({{< ref kubernetes-overview >}}) diff --git a/daprdocs/content/en/developing-applications/debugging/debugging-docker-compose.md b/daprdocs/content/en/developing-applications/debugging/debugging-docker-compose.md new file mode 100644 index 00000000000..ad2af8cab5d --- /dev/null +++ b/daprdocs/content/en/developing-applications/debugging/debugging-docker-compose.md @@ -0,0 +1,81 @@ +--- +type: docs +title: "Debugging Dapr Apps running in Docker Compose" +linkTitle: "Debugging Docker Compose" +weight: 300 +description: "Debug Dapr apps locally which are part of a Docker Compose deployment" +--- + +The goal of this article is to demonstrate a way to debug one or more daprised applications (via your IDE, locally) while remaining integrated with the other applications that have deployed in the docker compose environment. + +Let's take the minimal example of a docker compose file which contains just two services : +- `nodeapp` - your app +- `nodeapp-dapr` - the dapr sidecar process to your `nodeapp` service + +#### compose.yml +```yaml +services: + nodeapp: + build: ./node + ports: + - "50001:50001" + networks: + - hello-dapr + nodeapp-dapr: + image: "daprio/daprd:edge" + command: [ + "./daprd", + "--app-id", "nodeapp", + "--app-port", "3000", + "--resources-path", "./components" + ] + volumes: + - "./components/:/components" + depends_on: + - nodeapp + network_mode: "service:nodeapp" +networks: + hello-dapr +``` + +When you run this docker file with `docker compose -f compose.yml up` this will deploy to Docker and run as normal. + +But how do we debug the `nodeapp` while still integrated to the running dapr sidecar process, and anything else that you may have deployed via the Docker compose file? + +Lets start by introducing a *second* docker compose file called `compose.debug.yml`. This second compose file will augment with the first compose file when the `up` command is ran. + +#### compose.debug.yml +```yaml +services: + nodeapp: # Isolate the nodeapp by removing its ports and taking it off the network + ports: !reset [] + networks: !reset + - "" + nodeapp-dapr: + command: ["./daprd", + "--app-id", "nodeapp", + "--app-port", "8080", # This must match the port that your app is exposed on when debugging in the IDE + "--resources-path", "./components", + "--app-channel-address", "host.docker.internal"] # Make the sidecar look on the host for the App Channel + network_mode: !reset "" # Reset the network_mode... + networks: # ... so that the sidecar can go into the normal network + - hello-dapr + ports: + - "3500:3500" # Expose the HTTP port to the host + - "50001:50001" # Expose the GRPC port to the host (Dapr Worfklows depends upon the GRPC channel) + +``` + +Next, ensure that your `nodeapp` is running/debugging in your IDE of choice, and is exposed on the same port that you specifed above in the `compose.debug.yml` - In the example above this is set to port `8080`. + +Next, stop any existing compose sessions you may have started, and run the following command to run both docker compose files combined together : + +`docker compose -f compose.yml -f compose.debug.yml up` + +You should now find that the dapr sidecar and your debugging app will have bi-directional communication with each other as if they were running together as normal in the Docker compose environment. + +**Note** : It's important to highlight that the `nodeapp` service in the docker compose environment is actually still running, however it has been removed from the docker network so it is effectively orphaned as nothing can communicate to it. + +**Demo** : Watch this video on how to debug local Dapr apps with Docker Compose + + \ No newline at end of file diff --git a/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/_index.md b/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/_index.md index 59dce6d2305..d25f63f11c8 100644 --- a/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/_index.md +++ b/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/_index.md @@ -3,5 +3,5 @@ type: docs title: "Authenticate to Azure" linkTitle: "Authenticate to Azure" weight: 1600 -description: "Learn about authenticating Azure components using Azure Active Directory or Managed Identities" +description: "Learn about authenticating Azure components using Microsoft Entra ID or Managed Identities" --- \ No newline at end of file diff --git a/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/authenticating-azure.md b/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/authenticating-azure.md index b020548eeef..6e4ffbeee3d 100644 --- a/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/authenticating-azure.md +++ b/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/authenticating-azure.md @@ -2,27 +2,27 @@ type: docs title: "Authenticating to Azure" linkTitle: "Overview" -description: "How to authenticate Azure components using Azure AD and/or Managed Identities" +description: "How to authenticate Azure components using Microsoft Entra ID and/or Managed Identities" aliases: - "/operations/components/setup-secret-store/supported-secret-stores/azure-keyvault-managed-identity/" - "/reference/components-reference/supported-secret-stores/azure-keyvault-managed-identity/" weight: 10000 --- -Most Azure components for Dapr support authenticating with Azure AD (Azure Active Directory). Thanks to this: +Most Azure components for Dapr support authenticating with Microsoft Entra ID. Thanks to this: - Administrators can leverage all the benefits of fine-tuned permissions with Azure Role-Based Access Control (RBAC). - Applications running on Azure services such as Azure Container Apps, Azure Kubernetes Service, Azure VMs, or any other Azure platform services can leverage [Managed Identities (MI)](https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview) and [Workload Identity](https://learn.microsoft.com/azure/aks/workload-identity-overview). These offer the ability to authenticate your applications without having to manage sensitive credentials. -## About authentication with Azure AD +## About authentication with Microsoft Entra ID -Azure AD is Azure's identity and access management (IAM) solution, which is used to authenticate and authorize users and services. +Microsoft Entra ID is Azure's identity and access management (IAM) solution, which is used to authenticate and authorize users and services. -Azure AD is built on top of open standards such OAuth 2.0, which allows services (applications) to obtain access tokens to make requests to Azure services, including Azure Storage, Azure Service Bus, Azure Key Vault, Azure Cosmos DB, Azure Database for Postgres, Azure SQL, etc. +Microsoft Entra ID is built on top of open standards such OAuth 2.0, which allows services (applications) to obtain access tokens to make requests to Azure services, including Azure Storage, Azure Service Bus, Azure Key Vault, Azure Cosmos DB, Azure Database for Postgres, Azure SQL, etc. > In Azure terminology, an application is also called a "Service Principal". -Some Azure components offer alternative authentication methods, such as systems based on "shared keys" or "access tokens". Although these are valid and supported by Dapr, you should authenticate your Dapr components using Azure AD whenever possible to take advantage of many benefits, including: +Some Azure components offer alternative authentication methods, such as systems based on "shared keys" or "access tokens". Although these are valid and supported by Dapr, you should authenticate your Dapr components using Microsoft Entra ID whenever possible to take advantage of many benefits, including: - [Managed Identities and Workload Identity](#managed-identities-and-workload-identity) - [Role-Based Access Control](#role-based-access-control) @@ -31,7 +31,7 @@ Some Azure components offer alternative authentication methods, such as systems ### Managed Identities and Workload Identity -With Managed Identities (MI), your application can authenticate with Azure AD and obtain an access token to make requests to Azure services. When your application is running on a supported Azure service (such as Azure VMs, Azure Container Apps, Azure Web Apps, etc), an identity for your application can be assigned at the infrastructure level. +With Managed Identities (MI), your application can authenticate with Microsoft Entra ID and obtain an access token to make requests to Azure services. When your application is running on a supported Azure service (such as Azure VMs, Azure Container Apps, Azure Web Apps, etc), an identity for your application can be assigned at the infrastructure level. Once using MI, your code doesn't have to deal with credentials, which: @@ -48,11 +48,11 @@ When using Azure Role-Based Access Control (RBAC) with supported services, permi ### Auditing -Using Azure AD provides an improved auditing experience for access. Tenant administrators can consult audit logs to track authentication requests. +Using Microsoft Entra ID provides an improved auditing experience for access. Tenant administrators can consult audit logs to track authentication requests. ### (Optional) Authentication using certificates -While Azure AD allows you to use MI, you still have the option to authenticate using certificates. +While Microsoft Entra ID allows you to use MI, you still have the option to authenticate using certificates. ## Support for other Azure environments @@ -66,7 +66,7 @@ By default, Dapr components are configured to interact with Azure resources in t ## Credentials metadata fields -To authenticate with Azure AD, you will need to add the following credentials as values in the metadata for your [Dapr component](#example-usage-in-a-dapr-component). +To authenticate with Microsoft Entra ID, you will need to add the following credentials as values in the metadata for your [Dapr component](#example-usage-in-a-dapr-component). ### Metadata options @@ -82,7 +82,7 @@ Depending on how you've passed credentials to your Dapr services, you have multi | Field | Required | Details | Example | |---------------------|----------|--------------------------------------|----------------------------------------------| -| `azureTenantId` | Y | ID of the Azure AD tenant | `"cd4b2887-304c-47e1-b4d5-65447fdd542b"` | +| `azureTenantId` | Y | ID of the Microsoft Entra ID tenant | `"cd4b2887-304c-47e1-b4d5-65447fdd542b"` | | `azureClientId` | Y | Client ID (application ID) | `"c7dd251f-811f-4ba2-a905-acd4d3f8f08b"` | | `azureClientSecret` | Y | Client secret (application password) | `"Ecy3XG7zVZK3/vl/a2NSB+a1zXLa8RnMum/IgD0E"` | @@ -92,7 +92,7 @@ When running on Kubernetes, you can also use references to Kubernetes secrets fo | Field | Required | Details | Example | |--------|--------|--------|--------| -| `azureTenantId` | Y | ID of the Azure AD tenant | `"cd4b2887-304c-47e1-b4d5-65447fdd542b"` | +| `azureTenantId` | Y | ID of the Microsoft Entra ID tenant | `"cd4b2887-304c-47e1-b4d5-65447fdd542b"` | | `azureClientId` | Y | Client ID (application ID) | `"c7dd251f-811f-4ba2-a905-acd4d3f8f08b"` | | `azureCertificate` | One of `azureCertificate` and `azureCertificateFile` | Certificate and private key (in PFX/PKCS#12 format) | `"-----BEGIN PRIVATE KEY-----\n MIIEvgI... \n -----END PRIVATE KEY----- \n -----BEGIN CERTIFICATE----- \n MIICoTC... \n -----END CERTIFICATE-----` | | `azureCertificateFile` | One of `azureCertificate` and `azureCertificateFile` | Path to the PFX/PKCS#12 file containing the certificate and private key | `"/path/to/file.pem"` | @@ -127,7 +127,7 @@ Using this authentication method does not require setting any metadata option. ### Example usage in a Dapr component -In this example, you will set up an Azure Key Vault secret store component that uses Azure AD to authenticate. +In this example, you will set up an Azure Key Vault secret store component that uses Microsoft Entra ID to authenticate. {{< tabs "Self-Hosted" "Kubernetes">}} @@ -279,11 +279,11 @@ To use a **certificate**: ## Next steps -{{< button text="Generate a new Azure AD application and Service Principal >>" page="howto-aad.md" >}} +{{< button text="Generate a new Microsoft Entra ID application and Service Principal >>" page="howto-aad.md" >}} ## References -- [Azure AD app credential: Azure CLI reference](https://docs.microsoft.com/cli/azure/ad/app/credential) +- [Microsoft Entra ID app credential: Azure CLI reference](https://docs.microsoft.com/cli/azure/ad/app/credential) - [Azure Managed Service Identity (MSI) overview](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview) - [Secrets building block]({{< ref secrets >}}) - [How-To: Retrieve a secret]({{< ref "howto-secrets.md" >}}) diff --git a/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/howto-aad.md b/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/howto-aad.md index d1be027ca98..abb67782420 100644 --- a/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/howto-aad.md +++ b/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/howto-aad.md @@ -1,9 +1,9 @@ --- type: docs -title: "How to: Generate a new Azure AD application and Service Principal" -linkTitle: "How to: Generate Azure AD and Service Principal" +title: "How to: Generate a new Microsoft Entra ID application and Service Principal" +linkTitle: "How to: Generate Microsoft Entra ID and Service Principal" weight: 30000 -description: "Learn how to generate an Azure Active Directory and use it as a Service Principal" +description: "Learn how to generate an Microsoft Entra ID and use it as a Service Principal" --- ## Prerequisites @@ -23,9 +23,9 @@ az login az account set -s [your subscription id] ``` -### Create an Azure AD application +### Create an Microsoft Entra ID application -Create the Azure AD application with: +Create the Microsoft Entra ID application with: ```sh # Friendly name for the application / Service Principal @@ -107,7 +107,7 @@ When adding the returned values to your Dapr component's metadata: ### Create a Service Principal -Once you have created an Azure AD application, create a Service Principal for that application. With this Service Principal, you can grant it access to Azure resources. +Once you have created an Microsoft Entra ID application, create a Service Principal for that application. With this Service Principal, you can grant it access to Azure resources. To create the Service Principal, run the following command: @@ -124,7 +124,7 @@ Expected output: Service Principal ID: 1d0ccf05-5427-4b5e-8eb4-005ac5f9f163 ``` -The returned value above is the **Service Principal ID**, which is different from the Azure AD application ID (client ID). The Service Principal ID is defined within an Azure tenant and used to grant access to Azure resources to an application +The returned value above is the **Service Principal ID**, which is different from the Microsoft Entra ID application ID (client ID). The Service Principal ID is defined within an Azure tenant and used to grant access to Azure resources to an application You'll use the Service Principal ID to grant permissions to an application to access Azure resources. Meanwhile, **the client ID** is used by your application to authenticate. You'll use the client ID in Dapr manifests to configure authentication with Azure services. diff --git a/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/howto-mi.md b/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/howto-mi.md index 5eb6a8f8683..28aa976dc6a 100644 --- a/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/howto-mi.md +++ b/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/howto-mi.md @@ -10,7 +10,7 @@ description: "Learn how to use Managed Identities" Using Managed Identities (MI), authentication happens automatically by virtue of your application running on top of an Azure service that has an assigned identity. -For example, let's say you enable a managed service identity for an Azure VM, Azure Container App, or an Azure Kubernetes Service cluster. When you do, an Azure AD application is created for you and automatically assigned to the service. Your Dapr services can then leverage that identity to authenticate with Azure AD, transparently and without you having to specify any credentials. +For example, let's say you enable a managed service identity for an Azure VM, Azure Container App, or an Azure Kubernetes Service cluster. When you do, an Microsoft Entra ID application is created for you and automatically assigned to the service. Your Dapr services can then leverage that identity to authenticate with Microsoft Entra ID, transparently and without you having to specify any credentials. To get started with managed identities, you need to assign an identity to a new or existing Azure resource. The instructions depend on the service use. Check the following official documentation for the most appropriate instructions: diff --git a/daprdocs/content/en/developing-applications/local-development/ides/vscode/vscode-remote-dev-containers.md b/daprdocs/content/en/developing-applications/local-development/ides/vscode/vscode-remote-dev-containers.md index 952fdb9ed38..d39a45aeb16 100644 --- a/daprdocs/content/en/developing-applications/local-development/ides/vscode/vscode-remote-dev-containers.md +++ b/daprdocs/content/en/developing-applications/local-development/ides/vscode/vscode-remote-dev-containers.md @@ -45,7 +45,7 @@ dapr init #### Example: create a Java Dev Container for Dapr -This is an exmaple of creating a Dev Container for creating Java apps that use Dapr, based on the [official Java 17 Dev Container image](https://github.com/devcontainers/images/tree/main/src/java). +This is an example of creating a Dev Container for creating Java apps that use Dapr, based on the [official Java 17 Dev Container image](https://github.com/devcontainers/images/tree/main/src/java). Place this in a file called `.devcontainer/devcontainer.json` in your project: diff --git a/daprdocs/content/en/developing-applications/local-development/multi-app-dapr-run/multi-app-template.md b/daprdocs/content/en/developing-applications/local-development/multi-app-dapr-run/multi-app-template.md index 350ef0f4219..e5c526da43f 100644 --- a/daprdocs/content/en/developing-applications/local-development/multi-app-dapr-run/multi-app-template.md +++ b/daprdocs/content/en/developing-applications/local-development/multi-app-dapr-run/multi-app-template.md @@ -14,7 +14,7 @@ The Multi-App Run template file is a YAML file that you can use to run multiple - Use the multi-app template - View started applications - Stop the multi-app template -- Stucture the multi-app template file +- Structure the multi-app template file ## Use the multi-app template @@ -93,7 +93,7 @@ Stop the multi-app run template anytime with either of the following commands: ```cmd # the template file needs to be called `dapr.yaml` by default if a directory path is given -dapr stop -f +dapr stop -f ``` or: @@ -162,9 +162,9 @@ apps: The following rules apply for all the paths present in the template file: - If the path is absolute, it is used as is. - - All relative paths under command section should be provided relative to the template file path. + - All relative paths under common section should be provided relative to the template file path. - `appDirPath` under apps section should be provided relative to the template file path. - - All relative paths under app section should be provided relative to the `appDirPath`. + - All other relative paths under apps section should be provided relative to the `appDirPath`. {{% /codetab %}} diff --git a/daprdocs/content/en/getting-started/install-dapr-selfhost.md b/daprdocs/content/en/getting-started/install-dapr-selfhost.md index a14d4ff20de..68a12091852 100644 --- a/daprdocs/content/en/getting-started/install-dapr-selfhost.md +++ b/daprdocs/content/en/getting-started/install-dapr-selfhost.md @@ -25,9 +25,9 @@ Dapr initialization includes: 1. Running a **Dapr placement service container instance** for local actor support. {{% alert title="Docker" color="primary" %}} -The recommended development environment requires [Docker](https://docs.docker.com/install/). While you can [initialize Dapr without a dependency on Docker]({{}})), the next steps in this guide assume the recommended Docker development environment. +The recommended development environment requires [Docker](https://docs.docker.com/install/). While you can [initialize Dapr without a dependency on Docker]({{< ref self-hosted-no-docker.md >}})), the next steps in this guide assume the recommended Docker development environment. -You can also install [Podman](https://podman.io/) in place of Docker. Read more about [initializing Dapr using Podman]({{}}). +You can also install [Podman](https://podman.io/) in place of Docker. Read more about [initializing Dapr using Podman]({{< ref dapr-init.md >}}). {{% /alert %}} ### Step 1: Open an elevated terminal @@ -56,12 +56,35 @@ Run Windows Terminal or command prompt as administrator. ### Step 2: Run the init CLI command +{{< tabs "Linux/MacOS" "Windows">}} + +{{% codetab %}} + Install the latest Dapr runtime binaries: ```bash dapr init ``` +**If you are installing on Mac OS Silicon with Docker,** you may need to perform the following workaround to enable `dapr init` to talk to Docker without using Kubernetes. +1. Navigate to **Docker Desktop** > **Settings** > **Advanced**. +1. Select the **Enable default Docker socket** checkbox. + +{{% /codetab %}} + +{{% codetab %}} + +Install the latest Dapr runtime binaries: + +```bash +dapr init +``` + +{{% /codetab %}} + +{{< /tabs >}} + + ### Step 3: Verify Dapr version ```bash @@ -114,9 +137,14 @@ ls $HOME/.dapr {{% /codetab %}} {{% codetab %}} - +You can verify using either PowerShell or command line. If using PowerShell, run: ```powershell -explorer "%USERPROFILE%\.dapr\" +explorer "$env:USERPROFILE\.dapr" +``` + +If using command line, run: +```cmd +explorer "%USERPROFILE%\.dapr" ``` **Result:** diff --git a/daprdocs/content/en/getting-started/quickstarts/cryptography-quickstart.md b/daprdocs/content/en/getting-started/quickstarts/cryptography-quickstart.md index 7da6714cea1..8959672d09a 100644 --- a/daprdocs/content/en/getting-started/quickstarts/cryptography-quickstart.md +++ b/daprdocs/content/en/getting-started/quickstarts/cryptography-quickstart.md @@ -23,7 +23,209 @@ This example uses the Dapr SDK, which leverages gRPC and is **strongly** recomme Currently, you can experience the cryptography API using the Go SDK. -{{< tabs "Go" >}} +{{< tabs "JavaScript" "Go" >}} + + +{{% codetab %}} + +> This quickstart includes a JavaScript application called `crypto-quickstart`. + +### Pre-requisites + +For this example, you will need: + +- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started). +- [Latest Node.js installed](https://nodejs.org/download/). + +- [Docker Desktop](https://www.docker.com/products/docker-desktop) + +- [OpenSSL](https://www.openssl.org/source/) available on your system + +### Step 1: Set up the environment + +Clone the [sample provided in the Quickstarts repo](https://github.com/dapr/quickstarts/tree/master/cryptography) + +```bash +git clone https://github.com/dapr/quickstarts.git +``` + +In the terminal, from the root directory, navigate to the cryptography sample. + +```bash +cd cryptography/javascript/sdk +``` + +Navigate into the folder with the source code: + +```bash +cd ./crypto-quickstart +``` + +Install the dependencies: + +```bash +npm install +``` + +### Step 2: Run the application with Dapr + +The application code defines two required keys: + +- Private RSA key +- A 256-bit symmetric (AES) key + +Generate two keys, an RSA key and and AES key using OpenSSL and write these to two files: + +```bash +mkdir -p keys +# Generate a private RSA key, 4096-bit keys +openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:4096 -out keys/rsa-private-key.pem +# Generate a 256-bit key for AES +openssl rand -out keys/symmetric-key-256 32 +``` + +Run the Go service app with Dapr: + +```bash +dapr run --app-id crypto-quickstart --resources-path ../../../components/ -- npm start +``` + +**Expected output** + +``` +== APP == 2023-10-25T14:30:50.435Z INFO [GRPCClient, GRPCClient] Opening connection to 127.0.0.1:58173 +== APP == == Encrypting message using buffers +== APP == Encrypted the message, got 856 bytes +== APP == == Decrypting message using buffers +== APP == Decrypted the message, got 24 bytes +== APP == The secret is "passw0rd" +== APP == == Encrypting message using streams +== APP == Encrypting federico-di-dio-photography-Q4g0Q-eVVEg-unsplash.jpg to encrypted.out +== APP == Encrypted the message to encrypted.out +== APP == == Decrypting message using streams +== APP == Decrypting encrypted.out to decrypted.out.jpg +== APP == Decrypted the message to decrypted.out.jpg +``` + +### What happened? + +#### `local-storage.yaml` + +Earlier, you created a directory inside `crypto-quickstarts` called `keys`. In [the `local-storage` component YAML](https://github.com/dapr/quickstarts/tree/master/cryptography/components/local-storage.yaml), the `path` metadata maps to the newly created `keys` directory. + +```yml +apiVersion: dapr.io/v1alpha1 +kind: Component +metadata: + name: localstorage +spec: + type: crypto.dapr.localstorage + version: v1 + metadata: + - name: path + # Path is relative to the folder where the example is located + value: ./keys +``` + +#### `index.mjs` + +[The application file](https://github.com/dapr/quickstarts/blob/master/cryptography/javascript/sdk/crypto-quickstart/index.mjs) encrypts and decrypts messages and files using the RSA and AES keys that you generated. The application creates a new Dapr SDK client: + +```javascript +async function start() { + const client = new DaprClient({ + daprHost, + daprPort, + communicationProtocol: CommunicationProtocolEnum.GRPC, + }); + + // Encrypt and decrypt a message from a buffer + await encryptDecryptBuffer(client); + + // Encrypt and decrypt a message using streams + await encryptDecryptStream(client); +} +``` + +##### Encrypting and decrypting a string using the RSA key + +Once the client is created, the application encrypts a message: + +```javascript +async function encryptDecryptBuffer(client) { + // Message to encrypt + const plaintext = `The secret is "passw0rd"` + + // First, encrypt the message + console.log("== Encrypting message using buffers"); + + const encrypted = await client.crypto.encrypt(plaintext, { + componentName: "localstorage", + keyName: "rsa-private-key.pem", + keyWrapAlgorithm: "RSA", + }); + + console.log("Encrypted the message, got", encrypted.length, "bytes"); +``` + +The application then decrypts the message: + +```javascript + // Decrypt the message + console.log("== Decrypting message using buffers"); + const decrypted = await client.crypto.decrypt(encrypted, { + componentName: "localstorage", + }); + + console.log("Decrypted the message, got", decrypted.length, "bytes"); + console.log(decrypted.toString("utf8")); + + // ... +} +``` + +##### Encrypt and decrpyt a large file using the AES key + +Next, the application encrypts a large image file: + +```javascript +async function encryptDecryptStream(client) { + // First, encrypt the message + console.log("== Encrypting message using streams"); + console.log("Encrypting", testFileName, "to encrypted.out"); + + await pipeline( + createReadStream(testFileName), + await client.crypto.encrypt({ + componentName: "localstorage", + keyName: "symmetric-key-256", + keyWrapAlgorithm: "A256KW", + }), + createWriteStream("encrypted.out"), + ); + + console.log("Encrypted the message to encrypted.out"); +``` + +The application then decrypts the large image file: + +```javascript + // Decrypt the message + console.log("== Decrypting message using streams"); + console.log("Decrypting encrypted.out to decrypted.out.jpg"); + await pipeline( + createReadStream("encrypted.out"), + await client.crypto.decrypt({ + componentName: "localstorage", + }), + createWriteStream("decrypted.out.jpg"), + ); + + console.log("Decrypted the message to decrypted.out.jpg"); +} +``` + +{{% /codetab %}} {{% codetab %}} diff --git a/daprdocs/content/en/getting-started/quickstarts/pubsub-quickstart.md b/daprdocs/content/en/getting-started/quickstarts/pubsub-quickstart.md index 06f6402cf2d..3b459afebfd 100644 --- a/daprdocs/content/en/getting-started/quickstarts/pubsub-quickstart.md +++ b/daprdocs/content/en/getting-started/quickstarts/pubsub-quickstart.md @@ -51,6 +51,20 @@ From the root of the Quickstarts directory, navigate into the pub/sub directory: cd pub_sub/python/sdk ``` +Install the dependencies for the `order-processor` and `checkout` apps: + +```bash +cd ./checkout +pip3 install -r requirements.txt +cd .. +cd ./order-processor +pip3 install -r requirements.txt +cd .. +cd ./order-processor-fastapi +pip3 install -r requirements.txt +cd .. +``` + ### Step 3: Run the publisher and subscriber With the following command, simultaneously run the following services alongside their own Dapr sidecars: @@ -60,6 +74,8 @@ With the following command, simultaneously run the following services alongside ```bash dapr run -f . ``` +> **Note**: Since Python3.exe is not defined in Windows, you may need to change `python3` to `python` in the [`dapr.yaml`]({{< ref "#dapryaml-multi-app-run-template-file" >}}) file before running `dapr run -f .` + **Expected output** @@ -213,6 +229,17 @@ From the root of the Quickstarts directory, navigate into the pub/sub directory: cd pub_sub/javascript/sdk ``` +Install the dependencies for the `order-processor` and `checkout` apps: + +```bash +cd ./order-processor +npm install +cd .. +cd ./checkout +npm install +cd .. +``` + ### Step 3: Run the publisher and subscriber With the following command, simultaneously run the following services alongside their own Dapr sidecars: @@ -350,6 +377,18 @@ From the root of the Quickstarts directory, navigate into the pub/sub directory: cd pub_sub/csharp/sdk ``` +Install the dependencies for the `order-processor` and `checkout` apps: + +```bash +cd ./order-processor +dotnet restore +dotnet build +cd ../checkout +dotnet restore +dotnet build +cd .. +``` + ### Step 3: Run the publisher and subscriber With the following command, simultaneously run the following services alongside their own Dapr sidecars: @@ -495,6 +534,17 @@ From the root of the Quickstarts directory, navigate into the pub/sub directory: cd pub_sub/java/sdk ``` +Install the dependencies for the `order-processor` and `checkout` apps: + +```bash +cd ./order-processor +mvn clean install +cd .. +cd ./checkout +mvn clean install +cd .. +``` + ### Step 3: Run the publisher and subscriber With the following command, simultaneously run the following services alongside their own Dapr sidecars: @@ -645,6 +695,16 @@ From the root of the Quickstarts directory, navigate into the pub/sub directory: cd pub_sub/go/sdk ``` +Install the dependencies for the `order-processor` and `checkout` apps: + +```bash +cd ./order-processor +go build . +cd ../checkout +go build . +cd .. +``` + ### Step 3: Run the publisher and subscriber With the following command, simultaneously run the following services alongside their own Dapr sidecars: @@ -876,7 +936,7 @@ with DaprClient() as client: ### Step 5: View the Pub/sub outputs -Notice, as specified in the code above, the publisher pushes a random number to the Dapr sidecar while the subscriber receives it. +The publisher sends orders to the Dapr sidecar while the subscriber receives them. Publisher output: diff --git a/daprdocs/content/en/getting-started/quickstarts/serviceinvocation-quickstart.md b/daprdocs/content/en/getting-started/quickstarts/serviceinvocation-quickstart.md index c33c529f952..0164e35fddf 100644 --- a/daprdocs/content/en/getting-started/quickstarts/serviceinvocation-quickstart.md +++ b/daprdocs/content/en/getting-started/quickstarts/serviceinvocation-quickstart.md @@ -48,6 +48,16 @@ From the root of the Quickstart clone directory, navigate to the quickstart dire cd service_invocation/python/http ``` +Install the dependencies for the `order-processor` and `checkout` apps: + +```bash +cd ./order-processor +pip3 install -r requirements.txt +cd ../checkout +pip3 install -r requirements.txt +cd .. +``` + ### Step 3: Run the `order-processor` and `checkout` services With the following command, simultaneously run the following services alongside their own Dapr sidecars: @@ -57,6 +67,7 @@ With the following command, simultaneously run the following services alongside ```bash dapr run -f . ``` +> **Note**: Since Python3.exe is not defined in Windows, you may need to change `python3` to `python` in the [`dapr.yaml`]({{< ref "#dapryaml-multi-app-run-template-file" >}}) file before running `dapr run -f .` **Expected output** @@ -183,6 +194,16 @@ From the root of the Quickstart clone directory, navigate to the quickstart dire cd service_invocation/javascript/http ``` +Install the dependencies for the `order-processor` and `checkout` apps: + +```bash +cd ./order-processor +npm install +cd ../checkout +npm install +cd .. +``` + ### Step 3: Run the `order-processor` and `checkout` services With the following command, simultaneously run the following services alongside their own Dapr sidecars: @@ -313,6 +334,18 @@ From the root of the Quickstart clone directory, navigate to the quickstart dire cd service_invocation/csharp/http ``` +Install the dependencies for the `order-processor` and `checkout` apps: + +```bash +cd ./order-processor +dotnet restore +dotnet build +cd ../checkout +dotnet restore +dotnet build +cd .. +``` + ### Step 3: Run the `order-processor` and `checkout` services With the following command, simultaneously run the following services alongside their own Dapr sidecars: @@ -447,6 +480,16 @@ From the root of the Quickstart clone directory, navigate to the quickstart dire cd service_invocation/java/http ``` +Install the dependencies for the `order-processor` and `checkout` apps: + +```bash +cd ./order-processor +mvn clean install +cd ../checkout +mvn clean install +cd .. +``` + ### Step 3: Run the `order-processor` and `checkout` services With the following command, simultaneously run the following services alongside their own Dapr sidecars: @@ -576,6 +619,16 @@ From the root of the Quickstart clone directory, navigate to the quickstart dire cd service_invocation/go/http ``` +Install the dependencies for the `order-processor` and `checkout` apps: + +```bash +cd ./order-processor +go build . +cd ../checkout +go build . +cd .. +``` + ### Step 3: Run the `order-processor` and `checkout` services With the following command, simultaneously run the following services alongside their own Dapr sidecars: diff --git a/daprdocs/content/en/getting-started/quickstarts/statemanagement-quickstart.md b/daprdocs/content/en/getting-started/quickstarts/statemanagement-quickstart.md index 4d1224c4eb7..bfa8427da8c 100644 --- a/daprdocs/content/en/getting-started/quickstarts/statemanagement-quickstart.md +++ b/daprdocs/content/en/getting-started/quickstarts/statemanagement-quickstart.md @@ -48,11 +48,18 @@ In a terminal window, navigate to the `order-processor` directory. cd state_management/python/sdk/order-processor ``` +Install the dependencies: + +```bash +pip3 install -r requirements.txt +``` + Run the `order-processor` service alongside a Dapr sidecar using [Multi-App Run]({{< ref multi-app-dapr-run >}}). ```bash -dapr run -f +dapr run -f . ``` +> **Note**: Since Python3.exe is not defined in Windows, you may need to change `python3` to `python` in the [`dapr.yaml`]({{< ref "#dapryaml-multi-app-run-template-file" >}}) file before running `dapr run -f .` The `order-processor` service writes, reads, and deletes an `orderId` key/value pair to the `statestore` instance [defined in the `statestore.yaml` component]({{< ref "#statestoreyaml-component-file" >}}). As soon as the service starts, it performs a loop. @@ -162,6 +169,14 @@ Clone the [sample provided in the Quickstarts repo](https://github.com/dapr/quic git clone https://github.com/dapr/quickstarts.git ``` +Install the dependencies for the `order-processor` app: + +```bash +cd ./order-processor +npm install +cd .. +``` + ### Step 2: Manipulate service state In a terminal window, navigate to the `order-processor` directory. @@ -170,10 +185,16 @@ In a terminal window, navigate to the `order-processor` directory. cd state_management/javascript/sdk/order-processor ``` +Install the dependencies: + +```bash +npm install +``` + Run the `order-processor` service alongside a Dapr sidecar. ```bash -dapr run -f +dapr run -f . ``` The `order-processor` service writes, reads, and deletes an `orderId` key/value pair to the `statestore` instance [defined in the `statestore.yaml` component]({{< ref "#statestoreyaml-component-file" >}}). As soon as the service starts, it performs a loop. @@ -296,10 +317,17 @@ In a terminal window, navigate to the `order-processor` directory. cd state_management/csharp/sdk/order-processor ``` +Install the dependencies: + +```bash +dotnet restore +dotnet build +``` + Run the `order-processor` service alongside a Dapr sidecar. ```bash -dapr run -f +dapr run -f . ``` The `order-processor` service writes, reads, and deletes an `orderId` key/value pair to the `statestore` instance [defined in the `statestore.yaml` component]({{< ref "#statestoreyaml-component-file" >}}). As soon as the service starts, it performs a loop. @@ -423,10 +451,16 @@ In a terminal window, navigate to the `order-processor` directory. cd state_management/java/sdk/order-processor ``` +Install the dependencies: + +```bash +mvn clean install +``` + Run the `order-processor` service alongside a Dapr sidecar. ```bash -dapr run -f +dapr run -f . ``` The `order-processor` service writes, reads, and deletes an `orderId` key/value pair to the `statestore` instance [defined in the `statestore.yaml` component]({{< ref "#statestoreyaml-component-file" >}}). As soon as the service starts, it performs a loop. @@ -550,10 +584,16 @@ In a terminal window, navigate to the `order-processor` directory. cd state_management/go/sdk/order-processor ``` +Install the dependencies: + +```bash +go build . +``` + Run the `order-processor` service alongside a Dapr sidecar. ```bash -dapr run -f +dapr run -f . ``` The `order-processor` service writes, reads, and deletes an `orderId` key/value pair to the `statestore` instance [defined in the `statestore.yaml` component]({{< ref "#statestoreyaml-component-file" >}}). As soon as the service starts, it performs a loop. diff --git a/daprdocs/content/en/getting-started/tutorials/get-started-component.md b/daprdocs/content/en/getting-started/tutorials/get-started-component.md index 9f460ed0e7d..7c829e5d779 100644 --- a/daprdocs/content/en/getting-started/tutorials/get-started-component.md +++ b/daprdocs/content/en/getting-started/tutorials/get-started-component.md @@ -64,6 +64,12 @@ In the above file definition: Launch a Dapr sidecar that will listen on port 3500 for a blank application named `myapp`: + +PowerShell environment: +```bash +dapr run --app-id myapp --dapr-http-port 3500 --resources-path ../ +``` +non-PowerShell environment: ```bash dapr run --app-id myapp --dapr-http-port 3500 --resources-path . ``` diff --git a/daprdocs/content/en/operations/components/pluggable-components-registration.md b/daprdocs/content/en/operations/components/pluggable-components-registration.md index dda3e302005..10f6a057715 100644 --- a/daprdocs/content/en/operations/components/pluggable-components-registration.md +++ b/daprdocs/content/en/operations/components/pluggable-components-registration.md @@ -25,7 +25,7 @@ While Dapr's built-in components come [included with the runtime](https://github 1. Pluggable components need to be started and ready to take requests _before_ Dapr itself is started. 2. The [Unix Domain Socket][uds] file used for the pluggable component communication need to be made accessible to both Dapr and pluggable component. -In standalone mode, pluggable components run as processes or containers. On Kubernetes, pluggable components run as containers and are automatically injected to the application's pod by Dapr's sidecar injector, allowing customization via the standard [Kubernets Container spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#container-v1-core). +In standalone mode, pluggable components run as processes or containers. On Kubernetes, pluggable components run as containers and are automatically injected to the application's pod by Dapr's sidecar injector, allowing customization via the standard [Kubernetes Container spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#container-v1-core). This also changes the approach to share [Unix Domain Socket][uds] files between Dapr and pluggable components. diff --git a/daprdocs/content/en/operations/configuration/control-concurrency.md b/daprdocs/content/en/operations/configuration/control-concurrency.md index 7633820d6c9..85b240c19b5 100644 --- a/daprdocs/content/en/operations/configuration/control-concurrency.md +++ b/daprdocs/content/en/operations/configuration/control-concurrency.md @@ -11,7 +11,7 @@ Using Dapr, you can control how many requests and events will invoke your applic *Note that this rate limiting is guaranteed for every event that's coming from Dapr, meaning Pub/Sub events, direct invocation from other services, bindings events etc. Dapr can't enforce the concurrency policy on requests that are coming to your app externally.* -*Note that rate limiting per second can be achieved by using the **middleware.http.ratelimit** middleware. However, there is an imporant difference between the two approaches. The rate limit middlware is time bound and limits the number of requests per second, while the `app-max-concurrency` flag specifies the number of concurrent requests (and events) at any point of time. See [Rate limit middleware]({{< ref middleware-rate-limit.md >}}). * +*Note that rate limiting per second can be achieved by using the **middleware.http.ratelimit** middleware. However, there is an important difference between the two approaches. The rate limit middleware is time bound and limits the number of requests per second, while the `app-max-concurrency` flag specifies the number of concurrent requests (and events) at any point of time. See [Rate limit middleware]({{< ref middleware-rate-limit.md >}}). * Watch this [video](https://youtu.be/yRI5g6o_jp8?t=1710) on how to control concurrency and rate limiting ". diff --git a/daprdocs/content/en/operations/configuration/secret-scope.md b/daprdocs/content/en/operations/configuration/secret-scope.md index b31d5e9fd3c..39796447268 100644 --- a/daprdocs/content/en/operations/configuration/secret-scope.md +++ b/daprdocs/content/en/operations/configuration/secret-scope.md @@ -69,7 +69,7 @@ spec: defaultAccess: deny ``` -For applications that need to be deined access to the Kubernetes secret store, follow [these instructions]({{< ref kubernetes-overview >}}), and add the following annotation to the application pod. +For applications that need to be denied access to the Kubernetes secret store, follow [these instructions]({{< ref kubernetes-overview >}}), and add the following annotation to the application pod. ```yaml dapr.io/config: appconfig diff --git a/daprdocs/content/en/operations/hosting/kubernetes/cluster/setup-kind.md b/daprdocs/content/en/operations/hosting/kubernetes/cluster/setup-kind.md index 00d34d93ac8..d77f27b3073 100644 --- a/daprdocs/content/en/operations/hosting/kubernetes/cluster/setup-kind.md +++ b/daprdocs/content/en/operations/hosting/kubernetes/cluster/setup-kind.md @@ -113,6 +113,43 @@ If you are using Docker Desktop, verify that you have [the recommended settings] 1. Navigate to `http://localhost:9999` to validate a successful setup. +## Install metrics-server on the Kind Kubernetes Cluster + +1. Get metrics-server manifests + + ```bash + wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml + ``` + +1. Add insecure TLS parameter to the components.yaml file + + ```yaml + metadata: + labels: + k8s-app: metrics-server + spec: + containers: + - args: + - --cert-dir=/tmp + - --secure-port=4443 + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --kubelet-insecure-tls <==== Add this + - --metric-resolution=15s + image: k8s.gcr.io/metrics-server/metrics-server:v0.6.2 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /livez + ``` + +1. Apply modified manifest + + ```bash + kubectl apply -f components.yaml + ``` + ## Related links - [Try out a Dapr quickstart]({{< ref quickstarts.md >}}) - Learn how to [deploy Dapr on your cluster]({{< ref kubernetes-deploy.md >}}) diff --git a/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-deploy.md b/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-deploy.md index 4973bfd7267..bfae8b10bdb 100644 --- a/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-deploy.md +++ b/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-deploy.md @@ -296,7 +296,7 @@ To use Mariner-based images for Dapr, you need to add `-mariner` to your Docker In the Dapr CLI, you can switch to using Mariner-based images with the `--image-variant` flag. ```sh -dapr init --image-variant mariner +dapr init -k --image-variant mariner ``` {{% /codetab %}} diff --git a/daprdocs/content/en/operations/performance-and-scalability/perf-service-invocation.md b/daprdocs/content/en/operations/performance-and-scalability/perf-service-invocation.md index 6246f346037..3b201e56ecd 100644 --- a/daprdocs/content/en/operations/performance-and-scalability/perf-service-invocation.md +++ b/daprdocs/content/en/operations/performance-and-scalability/perf-service-invocation.md @@ -54,7 +54,7 @@ The baseline test included direct, non-encrypted traffic, without telemetry, dir ### Control plane performance -The Dapr control plane uses a total of 0.009 vCPU and 61.6 Mb when running in non-HA mode, meaning a single replica per system compoment. +The Dapr control plane uses a total of 0.009 vCPU and 61.6 Mb when running in non-HA mode, meaning a single replica per system component. When running in a highly available production setup, the Dapr control plane consumes ~0.02 vCPU and 185 Mb. | Component | vCPU | Memory diff --git a/daprdocs/content/en/operations/security/api-token.md b/daprdocs/content/en/operations/security/api-token.md index 4435dcff463..aa30b39750e 100644 --- a/daprdocs/content/en/operations/security/api-token.md +++ b/daprdocs/content/en/operations/security/api-token.md @@ -60,7 +60,7 @@ To rotate the configured token in self-hosted, update the `DAPR_API_TOKEN` envir ### Kubernetes -To rotate the configured token in Kubernates, update the previously-created secret with the new token in each namespace. You can do that using `kubectl patch` command, but a simpler way to update these in each namespace is by using a manifest: +To rotate the configured token in Kubernetes, update the previously-created secret with the new token in each namespace. You can do that using `kubectl patch` command, but a simpler way to update these in each namespace is by using a manifest: ```yaml apiVersion: v1 diff --git a/daprdocs/content/en/operations/security/app-api-token.md b/daprdocs/content/en/operations/security/app-api-token.md index 3ab926a96af..d94e325139f 100644 --- a/daprdocs/content/en/operations/security/app-api-token.md +++ b/daprdocs/content/en/operations/security/app-api-token.md @@ -61,7 +61,7 @@ To rotate the configured token in self-hosted, update the `APP_API_TOKEN` enviro ### Kubernetes -To rotate the configured token in Kubernates, update the previously-created secret with the new token in each namespace. You can do that using `kubectl patch` command, but a simpler way to update these in each namespace is by using a manifest: +To rotate the configured token in Kubernetes, update the previously-created secret with the new token in each namespace. You can do that using `kubectl patch` command, but a simpler way to update these in each namespace is by using a manifest: ```yaml apiVersion: v1 diff --git a/daprdocs/content/en/operations/security/mtls.md b/daprdocs/content/en/operations/security/mtls.md index f0c3e1a6613..6868b622285 100644 --- a/daprdocs/content/en/operations/security/mtls.md +++ b/daprdocs/content/en/operations/security/mtls.md @@ -486,7 +486,7 @@ By default, system services will look for the credentials in `/var/run/dapr/cred *Note: If you signed the cert root with a different private key, restart the Dapr instances.* ## Community call video on certificate rotation -Watch this [video](https://www.youtube.com/watch?v=Hkcx9kBDrAc&feature=youtu.be&t=1400) on how to perform certificate rotation if your certicates are expiring. +Watch this [video](https://www.youtube.com/watch?v=Hkcx9kBDrAc&feature=youtu.be&t=1400) on how to perform certificate rotation if your certificates are expiring.
diff --git a/daprdocs/content/en/operations/security/oauth.md b/daprdocs/content/en/operations/security/oauth.md index 0e1213dbfcd..ab29634ceaf 100644 --- a/daprdocs/content/en/operations/security/oauth.md +++ b/daprdocs/content/en/operations/security/oauth.md @@ -16,7 +16,7 @@ The main difference between the two flows is that the `Authorization Code Grant Different authorization servers provide different application registration experiences. Here are some samples: -* [Azure AAD](https://docs.microsoft.com/azure/active-directory/develop/v1-protocols-oauth-code) +* [Microsoft Entra ID](https://docs.microsoft.com/azure/active-directory/develop/v1-protocols-oauth-code) * [Facebook](https://developers.facebook.com/apps) * [Fitbit](https://dev.fitbit.com/build/reference/web-api/oauth2/) * [GitHub](https://developer.github.com/apps/building-oauth-apps/creating-an-oauth-app/) @@ -37,7 +37,7 @@ Authorization/Token URLs of some of the popular authorization servers: | Server | Authorization URL | Token URL | |---------|-------------------|-----------| -|Azure AAD||| +|Microsoft Entra ID||| |GitHub||| |Google|| | |Twitter||| diff --git a/daprdocs/content/en/operations/support/support-release-policy.md b/daprdocs/content/en/operations/support/support-release-policy.md index 32ee3f1e7bf..47679a8baaf 100644 --- a/daprdocs/content/en/operations/support/support-release-policy.md +++ b/daprdocs/content/en/operations/support/support-release-policy.md @@ -45,11 +45,19 @@ The table below shows the versions of Dapr releases that have been tested togeth | Release date | Runtime | CLI | SDKs | Dashboard | Status | Release notes | |--------------------|:--------:|:--------|---------|---------|---------|------------| -| October 11th 2023 | 1.12.0
| 1.12.0 | Java 1.10.0
Go 1.9.0
PHP 1.1.0
Python 1.11.0
.NET 1.12.0
JS 3.1.2 | 0.14.0 | Supported (current) | [v1.12.0 release notes](https://github.com/dapr/dapr/releases/tag/v1.12.0) | +| January 17th 2024 | 1.12.4
| 1.12.0 | Java 1.10.0
Go 1.9.1
PHP 1.2.0
Python 1.12.0
.NET 1.12.0
JS 3.2.0 | 0.14.0 | Supported (current) | [v1.12.4 release notes](https://github.com/dapr/dapr/releases/tag/v1.12.4) | +| January 2nd 2024 | 1.12.3
| 1.12.0 | Java 1.10.0
Go 1.9.1
PHP 1.2.0
Python 1.12.0
.NET 1.12.0
JS 3.2.0 | 0.14.0 | Supported (current) | [v1.12.3 release notes](https://github.com/dapr/dapr/releases/tag/v1.12.3) | +| November 18th 2023 | 1.12.2
| 1.12.0 | Java 1.10.0
Go 1.9.1
PHP 1.2.0
Python 1.12.0
.NET 1.12.0
JS 3.2.0 | 0.14.0 | Supported (current) | [v1.12.2 release notes](https://github.com/dapr/dapr/releases/tag/v1.12.2) | +| November 16th 2023 | 1.12.1
| 1.12.0 | Java 1.10.0
Go 1.9.1
PHP 1.2.0
Python 1.12.0
.NET 1.12.0
JS 3.2.0 | 0.14.0 | Supported | [v1.12.1 release notes](https://github.com/dapr/dapr/releases/tag/v1.12.1) | +| October 11th 2023 | 1.12.0
| 1.12.0 | Java 1.10.0
Go 1.9.0
PHP 1.1.0
Python 1.11.0
.NET 1.12.0
JS 3.1.2 | 0.14.0 | Supported | [v1.12.0 release notes](https://github.com/dapr/dapr/releases/tag/v1.12.0) | +| November 18th 2023 | 1.11.6
| 1.11.0 | Java 1.9.0
Go 1.8.0
PHP 1.1.0
Python 1.10.0
.NET 1.11.0
JS 3.1.0 | 0.13.0 | Supported | [v1.11.6 release notes](https://github.com/dapr/dapr/releases/tag/v1.11.6) | +| November 3rd 2023 | 1.11.5
| 1.11.0 | Java 1.9.0
Go 1.8.0
PHP 1.1.0
Python 1.10.0
.NET 1.11.0
JS 3.1.0 | 0.13.0 | Supported | [v1.11.5 release notes](https://github.com/dapr/dapr/releases/tag/v1.11.5) | +| October 5th 2023 | 1.11.4
| 1.11.0 | Java 1.9.0
Go 1.8.0
PHP 1.1.0
Python 1.10.0
.NET 1.11.0
JS 3.1.0 | 0.13.0 | Supported | [v1.11.4 release notes](https://github.com/dapr/dapr/releases/tag/v1.11.4) | | August 31st 2023 | 1.11.3
| 1.11.0 | Java 1.9.0
Go 1.8.0
PHP 1.1.0
Python 1.10.0
.NET 1.11.0
JS 3.1.0 | 0.13.0 | Supported | [v1.11.3 release notes](https://github.com/dapr/dapr/releases/tag/v1.11.3) | | July 20th 2023 | 1.11.2
| 1.11.0 | Java 1.9.0
Go 1.8.0
PHP 1.1.0
Python 1.10.0
.NET 1.11.0
JS 3.1.0 | 0.13.0 | Supported | [v1.11.2 release notes](https://github.com/dapr/dapr/releases/tag/v1.11.2) | | June 22nd 2023 | 1.11.1
| 1.11.0 | Java 1.9.0
Go 1.8.0
PHP 1.1.0
Python 1.10.0
.NET 1.11.0
JS 3.1.0 | 0.13.0 | Supported | [v1.11.1 release notes](https://github.com/dapr/dapr/releases/tag/v1.11.1) | | June 12th 2023 | 1.11.0
| 1.11.0 | Java 1.9.0
Go 1.8.0
PHP 1.1.0
Python 1.10.0
.NET 1.11.0
JS 3.1.0 | 0.13.0 | Supported | [v1.11.0 release notes](https://github.com/dapr/dapr/releases/tag/v1.11.0) | +| November 18th 2023 | 1.10.10
| 1.10.0 | Java 1.8.0
Go 1.7.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 3.0.0 | 0.11.0 | Supported | [v1.10.10 release notes](https://github.com/dapr/dapr/releases/tag/v1.10.10) | | July 20th 2023 | 1.10.9
| 1.10.0 | Java 1.8.0
Go 1.7.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 3.0.0 | 0.11.0 | Supported | [v1.10.9 release notes](https://github.com/dapr/dapr/releases/tag/v1.10.9) | | June 22nd 2023 | 1.10.8
| 1.10.0 | Java 1.8.0
Go 1.7.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 3.0.0 | 0.11.0 | Supported | [v1.10.8 release notes](https://github.com/dapr/dapr/releases/tag/v1.10.8) | | May 15th 2023 | 1.10.7
| 1.10.0 | Java 1.8.0
Go 1.7.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 3.0.0 | 0.11.0 | Supported | | @@ -114,7 +122,8 @@ General guidance on upgrading can be found for [self hosted mode]({{< ref self-h | 1.9.0 | N/A | 1.9.6 | | 1.10.0 | N/A | 1.10.8 | | 1.11.0 | N/A | 1.11.4 | -| 1.12.0 | N/A | 1.12.0 | +| 1.12.0 | N/A | 1.12.4 | + ## Upgrade on Hosting platforms diff --git a/daprdocs/content/en/operations/support/support-security-issues.md b/daprdocs/content/en/operations/support/support-security-issues.md index f11b1e75679..c33ce0b167c 100644 --- a/daprdocs/content/en/operations/support/support-security-issues.md +++ b/daprdocs/content/en/operations/support/support-security-issues.md @@ -12,4 +12,4 @@ The Dapr organization and team makes security a central focus of how we operate To report a security issue, please privately email the [Dapr Maintainers (dapr@dapr.io)](mailto:dapr@dapr.io?subject=[Security%20Disclosure]:%20ISSUE%20TITLE) -The Dapr maintainers will triage and respond ASAP and then patch and send an annoucement within 30 days. +The Dapr maintainers will triage and respond ASAP and then patch and send an announcement within 30 days. diff --git a/daprdocs/content/en/operations/troubleshooting/logs-troubleshooting.md b/daprdocs/content/en/operations/troubleshooting/logs-troubleshooting.md index d2316a1fd84..14eb822c194 100644 --- a/daprdocs/content/en/operations/troubleshooting/logs-troubleshooting.md +++ b/daprdocs/content/en/operations/troubleshooting/logs-troubleshooting.md @@ -73,6 +73,8 @@ dapr run node myapp.js ## Logs in Kubernetes mode +> [Learn how to debug `daprd` on Kubernetes.]({{< ref "debug-daprd.md" >}}) + You can set the log level individually for every sidecar by providing the following annotation in your pod spec template: ```yml diff --git a/daprdocs/content/en/reference/api/cryptography_api.md b/daprdocs/content/en/reference/api/cryptography_api.md new file mode 100644 index 00000000000..336088f23e3 --- /dev/null +++ b/daprdocs/content/en/reference/api/cryptography_api.md @@ -0,0 +1,131 @@ +--- +type: docs +title: "Cryptography API reference" +linkTitle: "Cryptography API" +description: "Detailed documentation on the cryptography API" +weight: 1300 +--- + +Dapr provides cross-platform and cross-language support for encryption and decryption support via the +cryptography building block. Besides the [language specific SDKs]({{}}), a developer can invoke these capabilities using +the HTTP API endpoints below. + +> The HTTP APIs are intended for development and testing only. For production scenarios, the use of the SDKs is strongly +> recommended as they implement the gRPC APIs providing higher performance and capability than the HTTP APIs. + +## Encrypt Payload + +This endpoint lets you encrypt a value provided as a byte array using a specified key and crypto component. + +### HTTP Request + +``` +PUT http://localhost:/v1.0/crypto//encrypt +``` + +#### URL Parameters + | Parameter | Description | +|-------------------|-------------------------------------------------------------| +| daprPort | The Dapr port | +| crypto-store-name | The name of the crypto store to get the encryption key from | + +> Note, all URL parameters are case-sensitive. + +#### Headers +Additional encryption parameters are configured by setting headers with the appropriate +values. The following table details the required and optional headers to set with every +encryption request. + +| Header Key | Description | Allowed Values | Required | +|-------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------|----------------------------------------------------------| +| dapr-key-name | The name of the key to use for the encryption operation | | Yes | +| dapr-key-wrap-algorithm | The key wrap algorithm to use | `A256KW`, `A128CBC`, `A192CBC`, `RSA-OAEP-256` | Yes | +| dapr-omit-decryption-key-name | If true, omits the decryption key name from header `dapr-decryption-key-name` from the output. If false, includes the specified decryption key name specified in header `dapr-decryption-key-name`. | The following values will be accepted as true: `y`, `yes`, `true`, `t`, `on`, `1` | No | +| dapr-decryption-key-name | If `dapr-omit-decryption-key-name` is true, this contains the name of the intended decryption key to include in the output. | | Required only if `dapr-omit-decryption-key-name` is true | +| dapr-data-encryption-cipher | The cipher to use for the encryption operation | `aes-gcm` or `chacha20-poly1305` | No | + +### HTTP Response + +#### Response Body +The response to an encryption request will have its content type header set to `application/octet-stream` as it +returns an array of bytes with the encrypted payload. + +#### Response Codes +| Code | Description | +|------|-------------------------------------------------------------------------| +| 200 | OK | +| 400 | Crypto provider not found | +| 500 | Request formatted correctly, error in dapr code or underlying component | + +### Examples +```shell +curl http://localhost:3500/v1.0/crypto/myAzureKeyVault/encrypt \ + -X PUT \ + -H "dapr-key-name: myCryptoKey" \ + -H "dapr-key-wrap-algorithm: aes-gcm" \ + -H "Content-Type: application/octet-string" \ + --data-binary "\x68\x65\x6c\x6c\x6f\x20\x77\x6f\x72\x6c\x64" +``` + +> The above command sends an array of UTF-8 encoded bytes representing "hello world" and would return +> a stream of 8-bit values in the response similar to the following containing the encrypted payload: + +```bash +gAAAAABhZfZ0Ywz4dQX8y9J0Zl5v7w6Z7xq4jV3cW9o2l4pQ0YD1LdR0Zk7zIYi4n2Ll7t6f0Z4X7r8x9o6a8GyL0X1m9Q0Z0A== +``` + +## Decrypt Payload + +This endpoint lets you decrypt a value provided as a byte array using a specified key and crypto component. + +#### HTTP Request + +``` +PUT curl http://localhost:3500/v1.0/crypto//decrypt +``` + +#### URL Parameters + +| Parameter | Description | +|-------------------|-------------------------------------------------------------| +| daprPort | The Dapr port | +| crypto-store-name | The name of the crypto store to get the decryption key from | + +> Note all parameters are case-sensitive. + +#### Headers +Additional decryption parameters are configured by setting headers with the appropriate values. The following table +details the required and optional headers to set with every decryption request. + +| Header Key | Description | Required | +|---------------|----------------------------------------------------------|----------| +| dapr-key-name | The name of the key to use for the decryption operation. | Yes | + +### HTTP Response + +#### Response Body +The response to a decryption request will have its content type header to set `application/octet-stream` as it +returns an array of bytes representing the decrypted payload. + +#### Response Codes +| Code | Description | +|------|-------------------------------------------------------------------------| +| 200 | OK | +| 400 | Crypto provider not found | +| 500 | Request formatted correctly, error in dapr code or underlying component | + +### Examples +```bash +curl http://localhost:3500/v1.0/crypto/myAzureKeyVault/decrypt \ + -X PUT + -H "dapr-key-name: myCryptoKey"\ + -H "Content-Type: application/octet-stream" \ + --data-binary "gAAAAABhZfZ0Ywz4dQX8y9J0Zl5v7w6Z7xq4jV3cW9o2l4pQ0YD1LdR0Zk7zIYi4n2Ll7t6f0Z4X7r8x9o6a8GyL0X1m9Q0Z0A==" +``` + +> The above command sends a base-64 encoded string of the encrypted message payload and would return a response with +> the content type header set to `application/octet-stream` returning the response body `hello world`. + +```bash +hello world +``` \ No newline at end of file diff --git a/daprdocs/content/en/reference/api/error_codes.md b/daprdocs/content/en/reference/api/error_codes.md index 7de8c0a2c3c..19d3b8cc36c 100644 --- a/daprdocs/content/en/reference/api/error_codes.md +++ b/daprdocs/content/en/reference/api/error_codes.md @@ -3,7 +3,7 @@ type: docs title: "Error codes returned by APIs" linkTitle: "Error codes" description: "Detailed reference of the Dapr API error codes" -weight: 1300 +weight: 1400 --- For http calls made to Dapr runtime, when an error is encountered, an error json is returned in http response body. The json contains an error code and an descriptive error message, e.g. diff --git a/daprdocs/content/en/reference/api/workflow_api.md b/daprdocs/content/en/reference/api/workflow_api.md index 9f9c34de81a..ca272d0faec 100644 --- a/daprdocs/content/en/reference/api/workflow_api.md +++ b/daprdocs/content/en/reference/api/workflow_api.md @@ -235,7 +235,7 @@ The API call will provide a JSON response similar to this: Parameter | Description --------- | ----------- -`runtimeStatus` | The status of the workflow instance. Values include: `RUNNING`, `TERMINATED`, `PAUSED` +`runtimeStatus` | The status of the workflow instance. Values include: `"RUNNING"`, `"COMPLETED"`, `"CONTINUED_AS_NEW"`, `"FAILED"`, `"CANCELED"`, `"TERMINATED"`, `"PENDING"`, `"SUSPENDED"` ## Component format diff --git a/daprdocs/content/en/reference/cli/dapr-init.md b/daprdocs/content/en/reference/cli/dapr-init.md index bdb93bf5d15..7cc2d86ad7d 100644 --- a/daprdocs/content/en/reference/cli/dapr-init.md +++ b/daprdocs/content/en/reference/cli/dapr-init.md @@ -49,32 +49,44 @@ dapr init [flags] ### Examples -#### Self hosted environment +{{< tabs "Self-hosted" "Kubernetes" >}} -Install Dapr by pulling container images for Placement, Redis and Zipkin. By default these images are pulled from Docker Hub. To switch to Dapr Github container registry as the default registry, set the `DAPR_DEFAULT_IMAGE_REGISTRY` environment variable value to be `GHCR`. To switch back to Docker Hub as default registry, unset this environment variable. +{{% codetab %}} + +**Install** + +Install Dapr by pulling container images for Placement, Redis, and Zipkin. By default, these images are pulled from Docker Hub. ```bash dapr init ``` -You can also specify a specific runtime version. Be default, the latest version is used. +Dapr can also run [Slim self-hosted mode]({{< ref self-hosted-no-docker.md >}}), without Docker. ```bash -dapr init --runtime-version 1.4.0 +dapr init -s ``` -You can also install Dapr with a particular image variant, for example: [mariner]({{< ref "kubernetes-deploy.md#using-mariner-based-images" >}}). +> To switch to Dapr Github container registry as the default registry, set the `DAPR_DEFAULT_IMAGE_REGISTRY` environment variable value to be `GHCR`. To switch back to Docker Hub as default registry, unset this environment variable. + +**Specify a runtime version** + +You can also specify a specific runtime version. Be default, the latest version is used. ```bash -dapr init --image-variant mariner +dapr init --runtime-version 1.13.0 ``` -Dapr can also run [Slim self-hosted mode]({{< ref self-hosted-no-docker.md >}}) without Docker. +**Install with image variant** + +You can also install Dapr with a particular image variant, for example: [mariner]({{< ref "kubernetes-deploy.md#using-mariner-based-images" >}}). ```bash -dapr init -s +dapr init --image-variant mariner ``` +**Use Dapr Installer Bundle** + In an offline or airgap environment, you can [download a Dapr Installer Bundle](https://github.com/dapr/installer-bundle/releases) and use this to install Dapr instead of pulling images from the network. ```bash @@ -87,17 +99,17 @@ Dapr can also run in slim self-hosted mode without Docker in an airgap environme dapr init -s --from-dir ``` -You can also specify a private registry to pull container images from. These images need to be published to private registries as shown below to enable Dapr CLI to pull them successfully via the `dapr init` command - +**Specify private registry** + +You can also specify a private registry to pull container images from. These images need to be published to private registries as shown below to enable Dapr CLI to pull them successfully via the `dapr init` command: 1. Dapr runtime container image(dapr) (Used to run Placement) - dapr/dapr: 2. Redis container image(rejson) - dapr/3rdparty/rejson 3. Zipkin container image(zipkin) - dapr/3rdparty/zipkin -> All the required images used by Dapr needs to be under the`dapr` path. +All the required images used by Dapr needs to be under the `dapr` path. The 3rd party images have to be published under `dapr/3rdparty` path. -> The 3rd party images have to be published under `dapr/3rdparty` path. - -> image-registry uri follows this format - `docker.io/` +`image-registry` uri follows the `docker.io/` format. ```bash dapr init --image-registry docker.io/username @@ -114,7 +126,37 @@ You can specify a different container runtime while setting up Dapr. If you omit dapr init --container-runtime podman ``` -#### Kubernetes environment +**Use Docker network** + +You can deploy local containers into Docker networks, which is useful for deploying into separate networks or when using Docker Compose for local development to deploy applications. + +Create the Docker network. + +```bash +docker network create mynet +``` + +Initialize Dapr and specify the created Docker network. + +```bash +dapr init --network mynet +``` + +Verify all containers are running in the specified network. + +```bash +docker ps +``` + +Uninstall Dapr from that Docker network. + +```bash +dapr uninstall --all --network mynet +``` + +{{% /codetab %}} + +{{% codetab %}} ```bash dapr init -k @@ -149,3 +191,7 @@ Scenario 2 : dapr image hosted under a new/different directory in private regist ```bash dapr init -k --image-registry docker.io/username/ ``` + +{{% /codetab %}} + +{{< /tabs >}} diff --git a/daprdocs/content/en/reference/cli/dapr-mtls/dapr-mtls-renew-certificate.md b/daprdocs/content/en/reference/cli/dapr-mtls/dapr-mtls-renew-certificate.md index 71b154f7b45..8941c3ea3a7 100644 --- a/daprdocs/content/en/reference/cli/dapr-mtls/dapr-mtls-renew-certificate.md +++ b/daprdocs/content/en/reference/cli/dapr-mtls/dapr-mtls-renew-certificate.md @@ -24,7 +24,7 @@ dapr mtls renew-certificate [flags] | Name | Environment Variable | Default | Description | | -------------- | -------------------- | ----------------- | ------------------------------------------- | | `--help`, `-h` | | | help for renew-certificate -| `--kubernetes`, `-k` | | `false` | supprted platform| | +| `--kubernetes`, `-k` | | `false` | supported platform| | | `--valid-until` | | 365 days | Validity for newly created certificates | | `--restart` | | false | Restarts Dapr control plane services (Sentry service, Operator service and Placement server) | | `--timeout` | | 300 sec | The timeout for the certificate renewal process | diff --git a/daprdocs/content/en/reference/cli/dapr-stop.md b/daprdocs/content/en/reference/cli/dapr-stop.md index 0bb20213fa6..ef7acd50fb2 100644 --- a/daprdocs/content/en/reference/cli/dapr-stop.md +++ b/daprdocs/content/en/reference/cli/dapr-stop.md @@ -25,7 +25,7 @@ dapr stop [flags] | -------------------- | -------------------- | ------- | -------------------------------- | | `--app-id`, `-a` | `APP_ID` | | The application id to be stopped | | `--help`, `-h` | | | Print this help message | -| `--run-file`, `-f` | | | Stop running multiple applications at once using a Multi-App Run template file. Currently in [alpha]({{< ref "support-preview-features.md" >}}) and only availale in Linux/MacOS | +| `--run-file`, `-f` | | | Stop running multiple applications at once using a Multi-App Run template file. Currently in [alpha]({{< ref "support-preview-features.md" >}}) and only available in Linux/MacOS | ### Examples diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/blobstorage.md b/daprdocs/content/en/reference/components-reference/supported-bindings/blobstorage.md index 3df3e28048b..4baea225cf1 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/blobstorage.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/blobstorage.md @@ -43,16 +43,16 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|--------|---------|---------| | `accountName` | Y | Input/Output | The name of the Azure Storage account | `"myexmapleaccount"` | -| `accountKey` | Y* | Input/Output | The access key of the Azure Storage account. Only required when not using Azure AD authentication. | `"access-key"` | +| `accountKey` | Y* | Input/Output | The access key of the Azure Storage account. Only required when not using Microsoft Entra ID authentication. | `"access-key"` | | `containerName` | Y | Output | The name of the Blob Storage container to write to | `myexamplecontainer` | | `endpoint` | N | Input/Output | Optional custom endpoint URL. This is useful when using the [Azurite emulator](https://github.com/Azure/azurite) or when using custom domains for Azure Storage (although this is not officially supported). The endpoint must be the full base URL, including the protocol (`http://` or `https://`), the IP or FQDN, and optional port. | `"http://127.0.0.1:10000"` | `decodeBase64` | N | Output | Configuration to decode base64 file content before saving to Blob Storage. (In case of saving a file with binary content). Defaults to `false` | `true`, `false` | | `getBlobRetryCount` | N | Output | Specifies the maximum number of HTTP GET requests that will be made while reading from a RetryReader Defaults to `10` | `1`, `2` | `publicAccessLevel` | N | Output | Specifies whether data in the container may be accessed publicly and the level of access (only used if the container is created by Dapr). Defaults to `none` | `blob`, `container`, `none` -### Azure Active Directory (AAD) authentication +### Microsoft Entra ID authentication -The Azure Blob Storage binding component supports authentication using all Azure Active Directory mechanisms. For further information and the relevant component metadata fields to provide depending on the choice of AAD authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}). +The Azure Blob Storage binding component supports authentication using all Microsoft Entra ID mechanisms. For further information and the relevant component metadata fields to provide depending on the choice of Microsoft Entra ID authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}). ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/cosmosdb.md b/daprdocs/content/en/reference/components-reference/supported-bindings/cosmosdb.md index 661c75e8e3f..813166f0265 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/cosmosdb.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/cosmosdb.md @@ -48,9 +48,9 @@ The above example uses secrets as plain strings. It is recommended to use a secr For more information see [Azure Cosmos DB resource model](https://docs.microsoft.com/azure/cosmos-db/account-databases-containers-items). -### Azure Active Directory (Azure AD) authentication +### Microsoft Entra ID authentication -The Azure Cosmos DB binding component supports authentication using all Azure Active Directory mechanisms. For further information and the relevant component metadata fields to provide depending on the choice of AAD authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}). +The Azure Cosmos DB binding component supports authentication using all Microsoft Entra ID mechanisms. For further information and the relevant component metadata fields to provide depending on the choice of Microsoft Entra ID authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}). You can read additional information for setting up Cosmos DB with Azure AD authentication in the [section below](#setting-up-cosmos-db-for-authenticating-with-azure-ad). diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/eventgrid.md b/daprdocs/content/en/reference/components-reference/supported-bindings/eventgrid.md index 6288baee4b7..9e66107b591 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/eventgrid.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/eventgrid.md @@ -90,9 +90,9 @@ This component supports **output binding** with the following operations: - `create`: publishes a message on the Event Grid topic -## Azure AD credentials +## Microsoft Entra ID credentials -The Azure Event Grid binding requires an Azure AD application and service principal for two reasons: +The Azure Event Grid binding requires an Microsoft Entra ID application and service principal for two reasons: - Creating an [event subscription](https://docs.microsoft.com/azure/event-grid/concepts#event-subscriptions) when Dapr is started (and updating it if the Dapr configuration changes) - Authenticating messages delivered by Event Hubs to your application. @@ -106,7 +106,7 @@ Requirements: - [Microsoft.Graph module for PowerShell](https://learn.microsoft.com/powershell/microsoftgraph/installation) for PowerShell installed: `Install-Module Microsoft.Graph -Scope CurrentUser -Repository PSGallery -Force` -For the first purpose, you will need to [create an Azure Service Principal](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal). After creating it, take note of the Azure AD application's **clientID** (a UUID), and run the following script with the Azure CLI: +For the first purpose, you will need to [create an Azure Service Principal](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal). After creating it, take note of the Microsoft Entra ID application's **clientID** (a UUID), and run the following script with the Azure CLI: ```bash # Set the client ID of the app you created @@ -140,7 +140,7 @@ Connect-MgGraph -Scopes "Application.Read.All","Application.ReadWrite.All" ./setup-eventgrid-sp.ps1 $clientId ``` -> Note: if your directory does not have a Service Principal for the application "Microsoft.EventGrid", you may need to run the command `Connect-MgGraph` and sign in as an admin for the Azure AD tenant (this is related to permissions on the Azure AD directory, and not the Azure subscription). Otherwise, please ask your tenant's admin to sign in and run this PowerShell command: `New-MgServicePrincipal -AppId "4962773b-9cdb-44cf-a8bf-237846a00ab7"` (the UUID is a constant) +> Note: if your directory does not have a Service Principal for the application "Microsoft.EventGrid", you may need to run the command `Connect-MgGraph` and sign in as an admin for the Microsoft Entra ID tenant (this is related to permissions on the Microsoft Entra ID directory, and not the Azure subscription). Otherwise, please ask your tenant's admin to sign in and run this PowerShell command: `New-MgServicePrincipal -AppId "4962773b-9cdb-44cf-a8bf-237846a00ab7"` (the UUID is a constant) ### Testing locally diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/eventhubs.md b/daprdocs/content/en/reference/components-reference/supported-bindings/eventhubs.md index a4dc7701369..ee005b4dda4 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/eventhubs.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/eventhubs.md @@ -28,10 +28,10 @@ spec: - name: consumerGroup value: "myapp" # Either connectionString or eventHubNamespace is required - # Use connectionString when *not* using Azure AD + # Use connectionString when *not* using Microsoft Entra ID - name: connectionString value: "Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={EventHub}" - # Use eventHubNamespace when using Azure AD + # Use eventHubNamespace when using Microsoft Entra ID - name: eventHubNamespace value: "namespace" - name: enableEntityManagement @@ -68,9 +68,9 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| `eventHub` | Y* | Input/Output | The name of the Event Hubs hub ("topic"). Required if using Azure AD authentication or if the connection string doesn't contain an `EntityPath` value | `mytopic` | -| `connectionString` | Y* | Input/Output | Connection string for the Event Hub or the Event Hub namespace.
* Mutally exclusive with `eventHubNamespace` field.
* Required when not using [Azure AD Authentication]({{< ref "authenticating-azure.md" >}}) | `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={EventHub}"` or `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key}"` -| `eventHubNamespace` | Y* | Input/Output | The Event Hub Namespace name.
* Mutally exclusive with `connectionString` field.
* Required when using [Azure AD Authentication]({{< ref "authenticating-azure.md" >}}) | `"namespace"` +| `eventHub` | Y* | Input/Output | The name of the Event Hubs hub ("topic"). Required if using Microsoft Entra ID authentication or if the connection string doesn't contain an `EntityPath` value | `mytopic` | +| `connectionString` | Y* | Input/Output | Connection string for the Event Hub or the Event Hub namespace.
* Mutally exclusive with `eventHubNamespace` field.
* Required when not using [Microsoft Entra ID Authentication]({{< ref "authenticating-azure.md" >}}) | `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={EventHub}"` or `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key}"` +| `eventHubNamespace` | Y* | Input/Output | The Event Hub Namespace name.
* Mutally exclusive with `connectionString` field.
* Required when using [Microsoft Entra ID Authentication]({{< ref "authenticating-azure.md" >}}) | `"namespace"` | `enableEntityManagement` | N | Input/Output | Boolean value to allow management of the EventHub namespace and storage account. Default: `false` | `"true", "false"` | `resourceGroupName` | N | Input/Output | Name of the resource group the Event Hub namespace is part of. Required when entity management is enabled | `"test-rg"` | `subscriptionID` | N | Input/Output | Azure subscription ID value. Required when entity management is enabled | `"azure subscription id"` @@ -78,14 +78,14 @@ The above example uses secrets as plain strings. It is recommended to use a secr | `messageRetentionInDays` | N | Input/Output | Number of days to retain messages for in the newly created Event Hub namespace. Used only when entity management is enabled. Default: `"1"` | `"90"` | `consumerGroup` | Y | Input | The name of the [Event Hubs Consumer Group](https://docs.microsoft.com/azure/event-hubs/event-hubs-features#consumer-groups) to listen on | `"group1"` | | `storageAccountName` | Y | Input | Storage account name to use for the checkpoint store. |`"myeventhubstorage"` -| `storageAccountKey` | Y* | Input | Storage account key for the checkpoint store account.
* When using Azure AD, it's possible to omit this if the service principal has access to the storage account too. | `"112233445566778899"` +| `storageAccountKey` | Y* | Input | Storage account key for the checkpoint store account.
* When using Microsoft Entra ID, it's possible to omit this if the service principal has access to the storage account too. | `"112233445566778899"` | `storageConnectionString` | Y* | Input | Connection string for the checkpoint store, alternative to specifying `storageAccountKey` | `"DefaultEndpointsProtocol=https;AccountName=myeventhubstorage;AccountKey="` | `storageContainerName` | Y | Input | Storage container name for the storage account name. | `"myeventhubstoragecontainer"` | `direction` | N | Input/Output | The direction of the binding. | `"input"`, `"output"`, `"input, output"` -### Azure Active Directory (AAD) authentication +### Microsoft Entra ID authentication -The Azure Event Hubs pub/sub component supports authentication using all Azure Active Directory mechanisms. For further information and the relevant component metadata fields to provide depending on the choice of AAD authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}). +The Azure Event Hubs pub/sub component supports authentication using all Microsoft Entra ID mechanisms. For further information and the relevant component metadata fields to provide depending on the choice of Microsoft Entra ID authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}). ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/kubemq.md b/daprdocs/content/en/reference/components-reference/supported-bindings/kubemq.md index 5cf333ea213..e5112d2b29a 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/kubemq.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/kubemq.md @@ -51,7 +51,7 @@ This component supports both **input and output** binding interfaces. {{< tabs "Self-Hosted" "Kubernetes">}} {{% codetab %}} -1. Obtain KubeMQ Key by visiting [https://account.kubemq.io/login/register](https://account.kubemq.io/login/register) and register for a key. +1. [Obtain KubeMQ Key](https://docs.kubemq.io/getting-started/quick-start#obtain-kubemq-license-key). 2. Wait for an email confirmation with your Key You can run a KubeMQ broker with Docker: @@ -64,7 +64,7 @@ You can then interact with the server using the client port: `localhost:50000` {{% /codetab %}} {{% codetab %}} -1. Obtain KubeMQ Key by visiting [https://account.kubemq.io/login/register](https://account.kubemq.io/login/register) and register for a key. +1. [Obtain KubeMQ Key](https://docs.kubemq.io/getting-started/quick-start#obtain-kubemq-license-key). 2. Wait for an email confirmation with your Key Then Run the following kubectl commands: diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/mysql.md b/daprdocs/content/en/reference/components-reference/supported-bindings/mysql.md index f2c11899630..881e8eeb405 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/mysql.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/mysql.md @@ -52,7 +52,7 @@ Note that you can not use secret just for username/password. If you use secret, | `maxIdleConns` | N | Output | The max idle connections. Integer greater than 0 | `"10"` | | `maxOpenConns` | N | Output | The max open connections. Integer greater than 0 | `"10"` | | `connMaxLifetime` | N | Output | The max connection lifetime. Duration string | `"12s"` | -| `connMaxIdleTime` | N | Output | The max connection idel time. Duration string | `"12s"` | +| `connMaxIdleTime` | N | Output | The max connection idle time. Duration string | `"12s"` | ### SSL connection diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/openai.md b/daprdocs/content/en/reference/components-reference/supported-bindings/openai.md index f62950c04b6..34bbeb151fc 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/openai.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/openai.md @@ -36,14 +36,14 @@ The above example uses `apiKey` as a plain string. It is recommended to use a s | Field | Required | Binding support | Details | Example | |--------------------|:--------:|--------|---------|---------| | `endpoint` | Y | Output | Azure OpenAI service endpoint URL. | `"https://myopenai.openai.azure.com"` | -| `apiKey` | Y* | Output | The access key of the Azure OpenAI service. Only required when not using Azure AD authentication. | `"1234567890abcdef"` | +| `apiKey` | Y* | Output | The access key of the Azure OpenAI service. Only required when not using Microsoft Entra ID authentication. | `"1234567890abcdef"` | | `azureTenantId` | Y* | Input | The tenant ID of the Azure OpenAI resource. Only required when `apiKey` is not provided. | `"tenentID"` | | `azureClientId` | Y* | Input | The client ID that should be used by the binding to create or update the Azure OpenAI Subscription and to authenticate incoming messages. Only required when `apiKey` is not provided.| `"clientId"` | | `azureClientSecret` | Y* | Input | The client secret that should be used by the binding to create or update the Azure OpenAI Subscription and to authenticate incoming messages. Only required when `apiKey` is not provided. | `"clientSecret"` | -### Azure Active Directory (AAD) authentication +### Microsoft Entra ID authentication -The Azure OpenAI binding component supports authentication using all Azure Active Directory mechanisms. For further information and the relevant component metadata fields to provide depending on the choice of AAD authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}). +The Azure OpenAI binding component supports authentication using all Microsoft Entra ID mechanisms. For further information and the relevant component metadata fields to provide depending on the choice of Microsoft Entra ID authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}). #### Example Configuration diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/postgresql.md b/daprdocs/content/en/reference/components-reference/supported-bindings/postgresql.md index 0a21d93b663..235cebabaa2 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/postgresql.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/postgresql.md @@ -41,15 +41,15 @@ The following metadata options are **required** to authenticate using a PostgreS |--------|:--------:|---------|---------| | `connectionString` | Y | The connection string for the PostgreSQL database. See the PostgreSQL [documentation on database connections](https://www.postgresql.org/docs/current/libpq-connect.html) for information on how to define a connection string. | `"host=localhost user=postgres password=example port=5432 connect_timeout=10 database=my_db"` -### Authenticate using Azure AD +### Authenticate using Microsoft Entra ID -Authenticating with Azure AD is supported with Azure Database for PostgreSQL. All authentication methods supported by Dapr can be used, including client credentials ("service principal") and Managed Identity. +Authenticating with Microsoft Entra ID is supported with Azure Database for PostgreSQL. All authentication methods supported by Dapr can be used, including client credentials ("service principal") and Managed Identity. | Field | Required | Details | Example | |--------|:--------:|---------|---------| -| `useAzureAD` | Y | Must be set to `true` to enable the component to retrieve access tokens from Azure AD. | `"true"` | -| `connectionString` | Y | The connection string for the PostgreSQL database.
This must contain the user, which corresponds to the name of the user created inside PostgreSQL that maps to the Azure AD identity; this is often the name of the corresponding principal (e.g. the name of the Azure AD application). This connection string should not contain any password. | `"host=mydb.postgres.database.azure.com user=myapplication port=5432 database=my_db sslmode=require"` | -| `azureTenantId` | N | ID of the Azure AD tenant | `"cd4b2887-304c-…"` | +| `useAzureAD` | Y | Must be set to `true` to enable the component to retrieve access tokens from Microsoft Entra ID. | `"true"` | +| `connectionString` | Y | The connection string for the PostgreSQL database.
This must contain the user, which corresponds to the name of the user created inside PostgreSQL that maps to the Microsoft Entra ID identity; this is often the name of the corresponding principal (e.g. the name of the Microsoft Entra ID application). This connection string should not contain any password. | `"host=mydb.postgres.database.azure.com user=myapplication port=5432 database=my_db sslmode=require"` | +| `azureTenantId` | N | ID of the Microsoft Entra ID tenant | `"cd4b2887-304c-…"` | | `azureClientId` | N | Client ID (application ID) | `"c7dd251f-811f-…"` | | `azureClientSecret` | N | Client secret (application password) | `"Ecy3X…"` | diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/redis.md b/daprdocs/content/en/reference/components-reference/supported-bindings/redis.md index 0f74473d38c..e147d101c27 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/redis.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/redis.md @@ -11,7 +11,6 @@ aliases: To setup Redis binding create a component of type `bindings.redis`. See [this guide]({{< ref "howto-bindings.md#1-create-a-binding" >}}) on how to create and apply a binding configuration. - ```yaml apiVersion: dapr.io/v1alpha1 kind: Component diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/s3.md b/daprdocs/content/en/reference/components-reference/supported-bindings/s3.md index 7e7b93a8ecd..2de9b95a727 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/s3.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/s3.md @@ -151,12 +151,12 @@ To use the S3 component, you need to use an existing bucket. Follow the [AWS doc This component supports **output binding** with the following operations: -- `create` : [Create file](#create-file) -- `get` : [Get file](#get-file) -- `delete` : [Delete file](#delete-file) -- `list`: [List file](#list-files) +- `create` : [Create object](#create-object) +- `get` : [Get object](#get-object) +- `delete` : [Delete object](#delete-object) +- `list`: [List objects](#list-objects) -### Create file +### Create object To perform a create operation, invoke the AWS S3 binding with a `POST` method and the following JSON body: diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/servicebusqueues.md b/daprdocs/content/en/reference/components-reference/supported-bindings/servicebusqueues.md index e2c74a4ba4b..c836626edd6 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/servicebusqueues.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/servicebusqueues.md @@ -67,10 +67,10 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|-----------------|----------|---------| -| `connectionString` | Y | Input/Output | The Service Bus connection string. Required unless using Azure AD authentication. | `"Endpoint=sb://************"` | +| `connectionString` | Y | Input/Output | The Service Bus connection string. Required unless using Microsoft Entra ID authentication. | `"Endpoint=sb://************"` | | `queueName` | Y | Input/Output | The Service Bus queue name. Queue names are case-insensitive and will always be forced to lowercase. | `"queuename"` | | `timeoutInSec` | N | Input/Output | Timeout for all invocations to the Azure Service Bus endpoint, in seconds. *Note that this option impacts network calls and it's unrelated to the TTL applies to messages*. Default: `"60"` | `"60"` | -| `namespaceName`| N | Input/Output | Parameter to set the address of the Service Bus namespace, as a fully-qualified domain name. Required if using Azure AD authentication. | `"namespace.servicebus.windows.net"` | +| `namespaceName`| N | Input/Output | Parameter to set the address of the Service Bus namespace, as a fully-qualified domain name. Required if using Microsoft Entra ID authentication. | `"namespace.servicebus.windows.net"` | | `disableEntityManagement` | N | Input/Output | When set to true, queues and subscriptions do not get created automatically. Default: `"false"` | `"true"`, `"false"` | `lockDurationInSec` | N | Input/Output | Defines the length in seconds that a message will be locked for before expiring. Used during subscription creation only. Default set by server. | `"30"` | `autoDeleteOnIdleInSec` | N | Input/Output | Time in seconds to wait before auto deleting idle subscriptions. Used during subscription creation only. Default: `"0"` (disabled) | `"3600"` @@ -90,9 +90,9 @@ The above example uses secrets as plain strings. It is recommended to use a secr | `publishInitialRetryIntervalInMs` | N | Output | Time in milliseconds for the initial exponential backoff when Azure Service Bus throttle messages. Defaults: `"500"` | `"500"` | `direction` | N | Input/Output | The direction of the binding | `"input"`, `"output"`, `"input, output"` -### Azure Active Directory (AAD) authentication +### Microsoft Entra ID authentication -The Azure Service Bus Queues binding component supports authentication using all Azure Active Directory mechanisms, including Managed Identities. For further information and the relevant component metadata fields to provide depending on the choice of AAD authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}). +The Azure Service Bus Queues binding component supports authentication using all Microsoft Entra ID mechanisms, including Managed Identities. For further information and the relevant component metadata fields to provide depending on the choice of Microsoft Entra ID authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}). #### Example Configuration diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/signalr.md b/daprdocs/content/en/reference/components-reference/supported-bindings/signalr.md index 560ed30fcc9..ea29d744a0d 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/signalr.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/signalr.md @@ -37,14 +37,14 @@ The above example uses secrets as plain strings. It is recommended to use a secr |--------------------|:--------:|------------|-----|---------| | `connectionString` | Y | Output | The Azure SignalR connection string | `"Endpoint=https://.service.signalr.net;AccessKey=;Version=1.0;"` | | `hub` | N | Output | Defines the hub in which the message will be send. The hub can be dynamically defined as a metadata value when publishing to an output binding (key is "hub") | `"myhub"` | -| `endpoint` | N | Output | Endpoint of Azure SignalR; required if not included in the `connectionString` or if using Azure AD | `"https://.service.signalr.net"` +| `endpoint` | N | Output | Endpoint of Azure SignalR; required if not included in the `connectionString` or if using Microsoft Entra ID | `"https://.service.signalr.net"` | `accessKey` | N | Output | Access key | `"your-access-key"` -### Azure Active Directory (Azure AD) authentication +### Microsoft Entra ID authentication -The Azure SignalR binding component supports authentication using all Azure Active Directory mechanisms. See the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}) to learn more about the relevant component metadata fields based on your choice of Azure AD authentication mechanism. +The Azure SignalR binding component supports authentication using all Microsoft Entra ID mechanisms. See the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}) to learn more about the relevant component metadata fields based on your choice of Microsoft Entra ID authentication mechanism. -You have two options to authenticate this component with Azure AD: +You have two options to authenticate this component with Microsoft Entra ID: - Pass individual metadata keys: - `endpoint` for the endpoint @@ -52,7 +52,7 @@ You have two options to authenticate this component with Azure AD: - Pass a connection string with `AuthType=aad` specified: - System-assigned managed identity: `Endpoint=https://.service.signalr.net;AuthType=aad;Version=1.0;` - User-assigned managed identity: `Endpoint=https://.service.signalr.net;AuthType=aad;ClientId=;Version=1.0;` - - Azure AD application: `Endpoint=https://.service.signalr.net;AuthType=aad;ClientId=;ClientSecret=;TenantId=;Version=1.0;` + - Microsoft Entra ID application: `Endpoint=https://.service.signalr.net;AuthType=aad;ClientId=;ClientSecret=;TenantId=;Version=1.0;` Note that you cannot use a connection string if your application's ClientSecret contains a `;` character. ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/storagequeues.md b/daprdocs/content/en/reference/components-reference/supported-bindings/storagequeues.md index e29e29932b7..6562364a38b 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/storagequeues.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/storagequeues.md @@ -52,7 +52,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| | `accountName` | Y | Input/Output | The name of the Azure Storage account | `"account1"` | -| `accountKey` | Y* | Input/Output | The access key of the Azure Storage account. Only required when not using Azure AD authentication. | `"access-key"` | +| `accountKey` | Y* | Input/Output | The access key of the Azure Storage account. Only required when not using Microsoft Entra ID authentication. | `"access-key"` | | `queueName` | Y | Input/Output | The name of the Azure Storage queue | `"myqueue"` | | `pollingInterval` | N | Output | Set the interval to poll Azure Storage Queues for new messages, as a Go duration value. Default: `"10s"` | `"30s"` | | `ttlInSeconds` | N | Output | Parameter to set the default message time to live. If this parameter is omitted, messages will expire after 10 minutes. See [also](#specifying-a-ttl-per-message) | `"60"` | @@ -62,9 +62,9 @@ The above example uses secrets as plain strings. It is recommended to use a secr | `visibilityTimeout` | N | Input | Allows setting a custom queue visibility timeout to avoid immediate retrying of recently failed messages. Defaults to 30 seconds. | `"100s"` | | `direction` | N | Input/Output | Direction of the binding. | `"input"`, `"output"`, `"input, output"` | -### Azure Active Directory (Azure AD) authentication +### Microsoft Entra ID authentication -The Azure Storage Queue binding component supports authentication using all Azure Active Directory mechanisms. See the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}) to learn more about the relevant component metadata fields based on your choice of Azure AD authentication mechanism. +The Azure Storage Queue binding component supports authentication using all Microsoft Entra ID mechanisms. See the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}) to learn more about the relevant component metadata fields based on your choice of Microsoft Entra ID authentication mechanism. ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-configuration-stores/azure-appconfig-configuration-store.md b/daprdocs/content/en/reference/components-reference/supported-configuration-stores/azure-appconfig-configuration-store.md index 11c1848cc13..c9f26f2a26c 100644 --- a/daprdocs/content/en/reference/components-reference/supported-configuration-stores/azure-appconfig-configuration-store.md +++ b/daprdocs/content/en/reference/components-reference/supported-configuration-stores/azure-appconfig-configuration-store.md @@ -65,11 +65,11 @@ The above example uses secrets as plain strings. It is recommended to use a secr Access an App Configuration instance using its connection string, which is available in the Azure portal. Since connection strings contain credential information, you should treat them as secrets and [use a secret store]({{< ref component-secrets.md >}}). -## Authenticating with Azure AD +## Authenticating with Microsoft Entra ID -The Azure App Configuration configuration store component also supports authentication with Azure AD. Before you enable this component: +The Azure App Configuration configuration store component also supports authentication with Microsoft Entra ID. Before you enable this component: - Read the [Authenticating to Azure]({{< ref authenticating-azure.md >}}) document. -- Create an Azure AD application (also called Service Principal). +- Create an Microsoft Entra ID application (also called Service Principal). - Alternatively, create a managed identity for your application platform. ## Set up Azure App Configuration diff --git a/daprdocs/content/en/reference/components-reference/supported-configuration-stores/postgresql-configuration-store.md b/daprdocs/content/en/reference/components-reference/supported-configuration-stores/postgresql-configuration-store.md index b9bc3de8328..a846b6a2344 100644 --- a/daprdocs/content/en/reference/components-reference/supported-configuration-stores/postgresql-configuration-store.md +++ b/daprdocs/content/en/reference/components-reference/supported-configuration-stores/postgresql-configuration-store.md @@ -67,15 +67,15 @@ The following metadata options are **required** to authenticate using a PostgreS |--------|:--------:|---------|---------| | `connectionString` | Y | The connection string for the PostgreSQL database. See the PostgreSQL [documentation on database connections](https://www.postgresql.org/docs/current/libpq-connect.html) for information on how to define a connection string. | `"host=localhost user=postgres password=example port=5432 connect_timeout=10 database=my_db"` -### Authenticate using Azure AD +### Authenticate using Microsoft Entra ID -Authenticating with Azure AD is supported with Azure Database for PostgreSQL. All authentication methods supported by Dapr can be used, including client credentials ("service principal") and Managed Identity. +Authenticating with Microsoft Entra ID is supported with Azure Database for PostgreSQL. All authentication methods supported by Dapr can be used, including client credentials ("service principal") and Managed Identity. | Field | Required | Details | Example | |--------|:--------:|---------|---------| -| `useAzureAD` | Y | Must be set to `true` to enable the component to retrieve access tokens from Azure AD. | `"true"` | -| `connectionString` | Y | The connection string for the PostgreSQL database.
This must contain the user, which corresponds to the name of the user created inside PostgreSQL that maps to the Azure AD identity; this is often the name of the corresponding principal (e.g. the name of the Azure AD application). This connection string should not contain any password. | `"host=mydb.postgres.database.azure.com user=myapplication port=5432 database=my_db sslmode=require"` | -| `azureTenantId` | N | ID of the Azure AD tenant | `"cd4b2887-304c-…"` | +| `useAzureAD` | Y | Must be set to `true` to enable the component to retrieve access tokens from Microsoft Entra ID. | `"true"` | +| `connectionString` | Y | The connection string for the PostgreSQL database.
This must contain the user, which corresponds to the name of the user created inside PostgreSQL that maps to the Microsoft Entra ID identity; this is often the name of the corresponding principal (e.g. the name of the Microsoft Entra ID application). This connection string should not contain any password. | `"host=mydb.postgres.database.azure.com user=myapplication port=5432 database=my_db sslmode=require"` | +| `azureTenantId` | N | ID of the Microsoft Entra ID tenant | `"cd4b2887-304c-…"` | | `azureClientId` | N | Client ID (application ID) | `"c7dd251f-811f-…"` | | `azureClientSecret` | N | Client secret (application password) | `"Ecy3X…"` | diff --git a/daprdocs/content/en/reference/components-reference/supported-configuration-stores/redis-configuration-store.md b/daprdocs/content/en/reference/components-reference/supported-configuration-stores/redis-configuration-store.md index d4649c19502..205cc98ad70 100644 --- a/daprdocs/content/en/reference/components-reference/supported-configuration-stores/redis-configuration-store.md +++ b/daprdocs/content/en/reference/components-reference/supported-configuration-stores/redis-configuration-store.md @@ -10,6 +10,7 @@ aliases: ## Component format To setup Redis configuration store create a component of type `configuration.redis`. See [this guide]({{< ref "howto-manage-configuration.md#configure-a-dapr-configuration-store" >}}) on how to create and apply a configuration store configuration. + ```yaml apiVersion: dapr.io/v1alpha1 kind: Component diff --git a/daprdocs/content/en/reference/components-reference/supported-cryptography/azure-key-vault.md b/daprdocs/content/en/reference/components-reference/supported-cryptography/azure-key-vault.md index 6ec9ba6a456..18f650a07b5 100644 --- a/daprdocs/content/en/reference/components-reference/supported-cryptography/azure-key-vault.md +++ b/daprdocs/content/en/reference/components-reference/supported-cryptography/azure-key-vault.md @@ -32,12 +32,12 @@ spec: The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets, as described [here]({{< ref component-secrets.md >}}). {{% /alert %}} -## Authenticating with Azure AD +## Authenticating with Microsoft Entra ID -The Azure Key Vault cryptography component supports authentication with Azure AD only. Before you enable this component: +The Azure Key Vault cryptography component supports authentication with Microsoft Entra ID only. Before you enable this component: 1. Read the [Authenticating to Azure]({{< ref "authenticating-azure.md" >}}) document. -1. Create an [Azure AD application]({{< ref "howto-aad.md" >}}) (also called a Service Principal). +1. Create an [Microsoft Entra ID application]({{< ref "howto-aad.md" >}}) (also called a Service Principal). 1. Alternatively, create a [managed identity]({{< ref "howto-mi.md" >}}) for your application platform. ## Spec metadata fields diff --git a/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-bearer.md b/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-bearer.md index a075548854f..d47c769a93b 100644 --- a/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-bearer.md +++ b/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-bearer.md @@ -42,7 +42,7 @@ spec: Common values for `issuer` include: - Auth0: `https://{domain}`, where `{domain}` is the domain of your Auth0 application -- Azure AD: `https://login.microsoftonline.com/{tenant}/v2.0`, where `{tenant}` should be replaced with the tenant ID of your application, as a UUID +- Microsoft Entra ID: `https://login.microsoftonline.com/{tenant}/v2.0`, where `{tenant}` should be replaced with the tenant ID of your application, as a UUID - Google: `https://accounts.google.com` - Salesforce (Force.com): `https://login.salesforce.com` diff --git a/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-opa.md b/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-opa.md index 62bf7692277..a4e6a47bbde 100644 --- a/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-opa.md +++ b/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-opa.md @@ -31,7 +31,7 @@ spec: value: 403 # `readBody` controls whether the middleware reads the entire request body in-memory and make it - # availble for policy decisions. + # available for policy decisions. - name: readBody value: "false" diff --git a/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-wasm.md b/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-wasm.md index d83bda22fb2..e1167ad0299 100644 --- a/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-wasm.md +++ b/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-wasm.md @@ -51,7 +51,7 @@ How to compile this is described later. | Field | Details | Required | Example | |-------|----------------------------------------------------------------|----------|----------------| | url | The URL of the resource including the Wasm binary to instantiate. The supported schemes include `file://`, `http://`, and `https://`. The path of a `file://` URL is relative to the Dapr process unless it begins with `/`. | true | `file://hello.wasm`, `https://example.com/hello.wasm` | -| guestConfig | An optional configuration passed to Wasm guests. Users can pass an arbitrary string to be parsed by the guest code. | false | `enviroment=production`,`{"environment":"production"}` | +| guestConfig | An optional configuration passed to Wasm guests. Users can pass an arbitrary string to be parsed by the guest code. | false | `environment=production`,`{"environment":"production"}` | ## Dapr configuration diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-apache-kafka.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-apache-kafka.md index 05ff835208e..b05e6d01d1c 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-apache-kafka.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-apache-kafka.md @@ -58,8 +58,8 @@ spec: | Field | Required | Details | Example | |--------------------|:--------:|---------|---------| | brokers | Y | A comma-separated list of Kafka brokers. | `"localhost:9092,dapr-kafka.myapp.svc.cluster.local:9093"` -| consumerGroup | N | A kafka consumer group to listen on. Each record published to a topic is delivered to one consumer within each consumer group subscribed to the topic. | `"group1"` -| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"` +| consumerGroup | N | A kafka consumer group to listen on. Each record published to a topic is delivered to one consumer within each consumer group subscribed to the topic. If a value for `consumerGroup` is provided, any value for `consumerID` is ignored - a combination of the consumer group and a random unique identifier will be set for the `consumerID` instead. | `"group1"` +| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. If a value for `consumerGroup` is provided, any value for `consumerID` is ignored - a combination of the consumer group and a random unique identifier will be set for the `consumerID` instead. | `"channel1"` | clientID | N | A user-provided string sent with every request to the Kafka brokers for logging, debugging, and auditing purposes. Defaults to `"namespace.appID"` for Kubernetes mode or `"appID"` for Self-Hosted mode. | `"my-namespace.my-dapr-app"`, `"my-dapr-app"` | authRequired | N | *Deprecated* Enable [SASL](https://en.wikipedia.org/wiki/Simple_Authentication_and_Security_Layer) authentication with the Kafka brokers. | `"true"`, `"false"` | authType | Y | Configure or disable authentication. Supported values: `none`, `password`, `mtls`, or `oidc` | `"password"`, `"none"` @@ -299,6 +299,44 @@ auth: secretStore: ``` +## Consuming from multiple topics + +When consuming from multiple topics using a single pub/sub component, there is no guarantee about how the consumers in your consumer group are balanced across the topic partitions. + +For instance, let's say you are subscribing to two topics with 10 partitions per topic and you have 20 replicas of your service consuming from the two topics. There is no guarantee that 10 will be assigned to the first topic and 10 to the second topic. Instead, the partitions could be divided unequally, with more than 10 assigned to the first topic and the rest assigned to the second topic. + +This can result in idle consumers listening to the first topic and over-extended consumers on the second topic, or vice versa. This same behavior can be observed when using auto-scalers such as HPA or KEDA. + +If you run into this particular issue, it is recommended that you configure a single pub/sub component per topic with uniquely defined consumer groups per component. This guarantees that all replicas of your service are fully allocated to the unique consumer group, where each consumer group targets one specific topic. + +For example, you may define two Dapr components with the following configuration: + +```yaml +apiVersion: dapr.io/v1alpha1 +kind: Component +metadata: + name: kafka-pubsub-topic-one +spec: + type: pubsub.kafka + version: v1 + metadata: + - name: consumerGroup + value: "{appID}-topic-one" +``` + +```yaml +apiVersion: dapr.io/v1alpha1 +kind: Component +metadata: + name: kafka-pubsub-topic-two +spec: + type: pubsub.kafka + version: v1 + metadata: + - name: consumerGroup + value: "{appID}-topic-two" +``` + ## Sending and receiving multiple messages Apache Kafka component supports sending and receiving multiple messages in a single operation using the bulk Pub/sub API. diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-aws-snssqs.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-aws-snssqs.md index 61b68290196..d4f8618fce3 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-aws-snssqs.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-aws-snssqs.md @@ -83,7 +83,7 @@ The above example uses secrets as plain strings. It is recommended to use [a sec | secretKey | Y | Secret for the AWS user/role. If using an `AssumeRole` access, you will also need to provide a `sessionToken` |`"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"` | region | Y | The AWS region where the SNS/SQS assets are located or be created in. See [this page](https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/?p=ugi&l=na) for valid regions. Ensure that SNS and SQS are available in that region | `"us-east-1"` | consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. | `"channel1"` -| endpoint | N | AWS endpoint for the component to use. Only used for local development with, for example, [localstack](https://github.com/localstack/localstack). The `endpoint` is unncessary when running against production AWS | `"http://localhost:4566"` +| endpoint | N | AWS endpoint for the component to use. Only used for local development with, for example, [localstack](https://github.com/localstack/localstack). The `endpoint` is unnecessary when running against production AWS | `"http://localhost:4566"` | sessionToken | N | AWS session token to use. A session token is only required if you are using temporary security credentials | `"TOKEN"` | messageReceiveLimit | N | Number of times a message is received, after processing of that message fails, that once reached, results in removing of that message from the queue. If `sqsDeadLettersQueueName` is specified, `messageReceiveLimit` is the number of times a message is received, after processing of that message fails, that once reached, results in moving of the message to the SQS dead-letters queue. Default: `10` | `10` | sqsDeadLettersQueueName | N | Name of the dead letters queue for this application | `"myapp-dlq"` diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-eventhubs.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-eventhubs.md index 40d63bdfe75..215d93bf44e 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-eventhubs.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-eventhubs.md @@ -23,10 +23,10 @@ spec: version: v1 metadata: # Either connectionString or eventHubNamespace is required - # Use connectionString when *not* using Azure AD + # Use connectionString when *not* using Microsoft Entra ID - name: connectionString value: "Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={EventHub}" - # Use eventHubNamespace when using Azure AD + # Use eventHubNamespace when using Microsoft Entra ID - name: eventHubNamespace value: "namespace" - name: consumerID # Optional. If not supplied, the runtime will create one. @@ -62,11 +62,11 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Details | Example | |--------------------|:--------:|---------|---------| -| `connectionString` | Y* | Connection string for the Event Hub or the Event Hub namespace.
* Mutally exclusive with `eventHubNamespace` field.
* Required when not using [Azure AD Authentication]({{< ref "authenticating-azure.md" >}}) | `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={EventHub}"` or `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key}"` -| `eventHubNamespace` | Y* | The Event Hub Namespace name.
* Mutally exclusive with `connectionString` field.
* Required when using [Azure AD Authentication]({{< ref "authenticating-azure.md" >}}) | `"namespace"` +| `connectionString` | Y* | Connection string for the Event Hub or the Event Hub namespace.
* Mutally exclusive with `eventHubNamespace` field.
* Required when not using [Microsoft Entra ID Authentication]({{< ref "authenticating-azure.md" >}}) | `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={EventHub}"` or `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key}"` +| `eventHubNamespace` | Y* | The Event Hub Namespace name.
* Mutally exclusive with `connectionString` field.
* Required when using [Microsoft Entra ID Authentication]({{< ref "authenticating-azure.md" >}}) | `"namespace"` | `consumerID` | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"` | `storageAccountName` | Y | Storage account name to use for the checkpoint store. |`"myeventhubstorage"` -| `storageAccountKey` | Y* | Storage account key for the checkpoint store account.
* When using Azure AD, it's possible to omit this if the service principal has access to the storage account too. | `"112233445566778899"` +| `storageAccountKey` | Y* | Storage account key for the checkpoint store account.
* When using Microsoft Entra ID, it's possible to omit this if the service principal has access to the storage account too. | `"112233445566778899"` | `storageConnectionString` | Y* | Connection string for the checkpoint store, alternative to specifying `storageAccountKey` | `"DefaultEndpointsProtocol=https;AccountName=myeventhubstorage;AccountKey="` | `storageContainerName` | Y | Storage container name for the storage account name. | `"myeventhubstoragecontainer"` | `enableEntityManagement` | N | Boolean value to allow management of the EventHub namespace and storage account. Default: `false` | `"true", "false"` @@ -75,9 +75,9 @@ The above example uses secrets as plain strings. It is recommended to use a secr | `partitionCount` | N | Number of partitions for the new Event Hub namespace. Used only when entity management is enabled. Default: `"1"` | `"2"` | `messageRetentionInDays` | N | Number of days to retain messages for in the newly created Event Hub namespace. Used only when entity management is enabled. Default: `"1"` | `"90"` -### Azure Active Directory (AAD) authentication +### Microsoft Entra ID authentication -The Azure Event Hubs pub/sub component supports authentication using all Azure Active Directory mechanisms. For further information and the relevant component metadata fields to provide depending on the choice of AAD authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}). +The Azure Event Hubs pub/sub component supports authentication using all Microsoft Entra ID mechanisms. For further information and the relevant component metadata fields to provide depending on the choice of Microsoft Entra ID authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}). #### Example Configuration @@ -110,7 +110,7 @@ spec: value: "1" - name: messageRetentionInDays # Checkpoint store attributes - # In this case, we're using Azure AD to access the storage account too + # In this case, we're using Microsoft Entra ID to access the storage account too - name: storageAccountName value: "myeventhubstorage" - name: storageContainerName @@ -191,7 +191,7 @@ When entity management is enabled in the metadata, as long as the application ha The Evet Hub name is the `topic` field in the incoming request to publish or subscribe to, while the consumer group name is the name of the Dapr app which subscribes to a given Event Hub. For example, a Dapr app running on Kubernetes with name `dapr.io/app-id: "myapp"` requires an Event Hubs consumer group named `myapp`. -Entity management is only possible when using [Azure AD Authentication]({{< ref "authenticating-azure.md" >}}) and not using a connection string. +Entity management is only possible when using [Microsoft Entra ID Authentication]({{< ref "authenticating-azure.md" >}}) and not using a connection string. > Dapr passes the name of the consumer group to the Event Hub, so this is not supplied in the metadata. diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-servicebus-queues.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-servicebus-queues.md index e98df4814f3..57e3b92868d 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-servicebus-queues.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-servicebus-queues.md @@ -25,7 +25,7 @@ spec: type: pubsub.azure.servicebus.queues version: v1 metadata: - # Required when not using Azure AD Authentication + # Required when not using Microsoft Entra ID Authentication - name: connectionString value: "Endpoint=sb://{ServiceBusNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={ServiceBus}" # - name: consumerID # Optional @@ -70,9 +70,9 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Details | Example | |--------------------|:--------:|---------|---------| -| `connectionString` | Y | Shared access policy connection string for the Service Bus. Required unless using Azure AD authentication. | See example above +| `connectionString` | Y | Shared access policy connection string for the Service Bus. Required unless using Microsoft Entra ID authentication. | See example above | `consumerID` | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"` -| `namespaceName`| N | Parameter to set the address of the Service Bus namespace, as a fully-qualified domain name. Required if using Azure AD authentication. | `"namespace.servicebus.windows.net"` | +| `namespaceName`| N | Parameter to set the address of the Service Bus namespace, as a fully-qualified domain name. Required if using Microsoft Entra ID authentication. | `"namespace.servicebus.windows.net"` | | `timeoutInSec` | N | Timeout for sending messages and for management operations. Default: `60` |`30` | `handlerTimeoutInSec`| N | Timeout for invoking the app's handler. Default: `60` | `30` | `lockRenewalInSec` | N | Defines the frequency at which buffered message locks will be renewed. Default: `20`. | `20` @@ -89,9 +89,9 @@ The above example uses secrets as plain strings. It is recommended to use a secr | `publishMaxRetries` | N | The max number of retries for when Azure Service Bus responds with "too busy" in order to throttle messages. Defaults: `5` | `5` | `publishInitialRetryIntervalInMs` | N | Time in milliseconds for the initial exponential backoff when Azure Service Bus throttle messages. Defaults: `500` | `500` -### Azure Active Directory (AAD) authentication +### Microsoft Entra ID authentication -The Azure Service Bus Queues pubsub component supports authentication using all Azure Active Directory mechanisms, including Managed Identities. For further information and the relevant component metadata fields to provide depending on the choice of AAD authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}). +The Azure Service Bus Queues pubsub component supports authentication using all Microsoft Entra ID mechanisms, including Managed Identities. For further information and the relevant component metadata fields to provide depending on the choice of Microsoft Entra ID authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}). #### Example Configuration diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-servicebus-topics.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-servicebus-topics.md index 7d9ab5b1672..157f960da3e 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-servicebus-topics.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-servicebus-topics.md @@ -26,7 +26,7 @@ spec: type: pubsub.azure.servicebus.topics version: v1 metadata: - # Required when not using Azure AD Authentication + # Required when not using Microsoft Entra ID Authentication - name: connectionString value: "Endpoint=sb://{ServiceBusNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={ServiceBus}" # - name: consumerID # Optional: defaults to the app's own ID @@ -73,8 +73,8 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Details | Example | |--------------------|:--------:|---------|---------| -| `connectionString` | Y | Shared access policy connection string for the Service Bus. Required unless using Azure AD authentication. | See example above -| `namespaceName`| N | Parameter to set the address of the Service Bus namespace, as a fully-qualified domain name. Required if using Azure AD authentication. | `"namespace.servicebus.windows.net"` | +| `connectionString` | Y | Shared access policy connection string for the Service Bus. Required unless using Microsoft Entra ID authentication. | See example above +| `namespaceName`| N | Parameter to set the address of the Service Bus namespace, as a fully-qualified domain name. Required if using Microsoft Entra ID authentication. | `"namespace.servicebus.windows.net"` | | `consumerID` | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. (`appID`) value. | | `timeoutInSec` | N | Timeout for sending messages and for management operations. Default: `60` |`30` | `handlerTimeoutInSec`| N | Timeout for invoking the app's handler. Default: `60` | `30` @@ -92,9 +92,9 @@ The above example uses secrets as plain strings. It is recommended to use a secr | `publishMaxRetries` | N | The max number of retries for when Azure Service Bus responds with "too busy" in order to throttle messages. Defaults: `5` | `5` | `publishInitialRetryIntervalInMs` | N | Time in milliseconds for the initial exponential backoff when Azure Service Bus throttle messages. Defaults: `500` | `500` -### Azure Active Directory (AAD) authentication +### Microsoft Entra ID authentication -The Azure Service Bus Topics pubsub component supports authentication using all Azure Active Directory mechanisms, including Managed Identities. For further information and the relevant component metadata fields to provide depending on the choice of AAD authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}). +The Azure Service Bus Topics pubsub component supports authentication using all Microsoft Entra ID mechanisms, including Managed Identities. For further information and the relevant component metadata fields to provide depending on the choice of Microsoft Entra ID authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}). #### Example Configuration diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-kubemq.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-kubemq.md index 28080ac150c..d6c22d5b552 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-kubemq.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-kubemq.md @@ -45,7 +45,7 @@ spec: {{< tabs "Self-Hosted" "Kubernetes">}} {{% codetab %}} -1. Obtain KubeMQ Key by visiting [https://account.kubemq.io/login/register](https://account.kubemq.io/login/register) and register for a key. +1. [Obtain KubeMQ Key](https://docs.kubemq.io/getting-started/quick-start#obtain-kubemq-license-key). 2. Wait for an email confirmation with your Key You can run a KubeMQ broker with Docker: @@ -58,7 +58,7 @@ You can then interact with the server using the client port: `localhost:50000` {{% /codetab %}} {{% codetab %}} -1. Obtain KubeMQ Key by visiting [https://account.kubemq.io/login/register](https://account.kubemq.io/login/register) and register for a key. +1. [Obtain KubeMQ Key](https://docs.kubemq.io/getting-started/quick-start#obtain-kubemq-license-key). 2. Wait for an email confirmation with your Key Then Run the following kubectl commands: diff --git a/daprdocs/content/en/reference/components-reference/supported-secret-stores/aws-parameter-store.md b/daprdocs/content/en/reference/components-reference/supported-secret-stores/aws-parameter-store.md index eab410fe11c..bc951b50b84 100644 --- a/daprdocs/content/en/reference/components-reference/supported-secret-stores/aws-parameter-store.md +++ b/daprdocs/content/en/reference/components-reference/supported-secret-stores/aws-parameter-store.md @@ -30,6 +30,8 @@ spec: value: "[aws_secret_key]" - name: sessionToken value: "[aws_session_token]" + - name: prefix + value: "[secret_name]" ``` {{% alert title="Warning" color="warning" %}} The above example uses secrets as plain strings. It is recommended to use a local secret store such as [Kubernetes secret store]({{< ref kubernetes-secret-store.md >}}) or a [local file]({{< ref file-secret-store.md >}}) to bootstrap secure key storage. @@ -43,6 +45,7 @@ The above example uses secrets as plain strings. It is recommended to use a loca | accessKey | Y | The AWS Access Key to access this resource | `"key"` | | secretKey | Y | The AWS Secret Access Key to access this resource | `"secretAccessKey"` | | sessionToken | N | The AWS session token to use | `"sessionToken"` | +| prefix | N | Allows you to specify more than one SSM parameter store secret store component. | `"prefix"` | {{% alert title="Important" color="warning" %}} When running the Dapr sidecar (daprd) with your application on EKS (AWS Kubernetes), if you're using a node/pod that has already been attached to an IAM policy defining access to AWS resources, you **must not** provide AWS access-key, secret-key, and tokens in the definition of the component spec you're using. diff --git a/daprdocs/content/en/reference/components-reference/supported-secret-stores/azure-keyvault.md b/daprdocs/content/en/reference/components-reference/supported-secret-stores/azure-keyvault.md index 57286c1b3bd..b5860fe9941 100644 --- a/daprdocs/content/en/reference/components-reference/supported-secret-stores/azure-keyvault.md +++ b/daprdocs/content/en/reference/components-reference/supported-secret-stores/azure-keyvault.md @@ -36,11 +36,11 @@ spec: value : "[pfx_certificate_file_fully_qualified_local_path]" ``` -## Authenticating with Azure AD +## Authenticating with Microsoft Entra ID -The Azure Key Vault secret store component supports authentication with Azure AD only. Before you enable this component: +The Azure Key Vault secret store component supports authentication with Microsoft Entra ID only. Before you enable this component: 1. Read the [Authenticating to Azure]({{< ref authenticating-azure.md >}}) document. -1. Create an Azure AD application (also called Service Principal). +1. Create an Microsoft Entra ID application (also called Service Principal). 1. Alternatively, create a managed identity for your application platform. ## Spec metadata fields @@ -70,7 +70,7 @@ Query Parameter | Description - [Azure CLI](https://docs.microsoft.com/cli/azure/install-azure-cli) - [jq](https://stedolan.github.io/jq/download/) - You are using bash or zsh shell -- You've created an Azure AD application (Service Principal) per the instructions in [Authenticating to Azure]({{< ref authenticating-azure.md >}}). You will need the following values: +- You've created an Microsoft Entra ID application (Service Principal) per the instructions in [Authenticating to Azure]({{< ref authenticating-azure.md >}}). You will need the following values: | Value | Description | | ----- | ----------- | @@ -113,7 +113,7 @@ Query Parameter | Description --location "${LOCATION}" ``` -1. Using RBAC, assign a role to the Azure AD application so it can access the Key Vault. +1. Using RBAC, assign a role to the Microsoft Entra ID application so it can access the Key Vault. In this case, assign the "Key Vault Secrets User" role, which has the "Get secrets" permission over Azure Key Vault. ```sh @@ -133,7 +133,7 @@ Other less restrictive roles, like "Key Vault Secrets Officer" and "Key Vault Ad #### Using a client secret -To use a **client secret**, create a file called `azurekeyvault.yaml` in the components directory. Use the following template, filling in [the Azure AD application you created]({{< ref authenticating-azure.md >}}): +To use a **client secret**, create a file called `azurekeyvault.yaml` in the components directory. Use the following template, filling in [the Microsoft Entra ID application you created]({{< ref authenticating-azure.md >}}): ```yaml apiVersion: dapr.io/v1alpha1 @@ -156,7 +156,7 @@ spec: #### Using a certificate -If you want to use a **certificate** saved on the local disk instead, use the following template. Fill in the details of [the Azure AD application you created]({{< ref authenticating-azure.md >}}): +If you want to use a **certificate** saved on the local disk instead, use the following template. Fill in the details of [the Microsoft Entra ID application you created]({{< ref authenticating-azure.md >}}): ```yaml apiVersion: dapr.io/v1alpha1 @@ -179,7 +179,7 @@ spec: {{% /codetab %}} {{% codetab %}} -In Kubernetes, you store the client secret or the certificate into the Kubernetes Secret Store and then refer to those in the YAML file. Before you start, you need the details of [the Azure AD application you created]({{< ref authenticating-azure.md >}}). +In Kubernetes, you store the client secret or the certificate into the Kubernetes Secret Store and then refer to those in the YAML file. Before you start, you need the details of [the Microsoft Entra ID application you created]({{< ref authenticating-azure.md >}}). #### Using a client secret @@ -298,11 +298,11 @@ In Kubernetes, you store the client secret or the certificate into the Kubernete kubectl apply -f azurekeyvault.yaml ``` 1. Create and assign a managed identity at the pod-level via either: - - [Azure AD workload identity](https://learn.microsoft.com/azure/aks/workload-identity-overview) (preferred method) - - [Azure AD pod identity](https://docs.microsoft.com/azure/aks/use-azure-ad-pod-identity#create-a-pod-identity) + - [Microsoft Entra ID workload identity](https://learn.microsoft.com/azure/aks/workload-identity-overview) (preferred method) + - [Microsoft Entra ID pod identity](https://docs.microsoft.com/azure/aks/use-azure-ad-pod-identity#create-a-pod-identity) - **Important**: While both Azure AD pod identity and workload identity are in preview, currently Azure AD Workload Identity is planned for general availability (stable state). + **Important**: While both Microsoft Entra ID pod identity and workload identity are in preview, currently Microsoft Entra ID Workload Identity is planned for general availability (stable state). 1. After creating a workload identity, give it `read` permissions: - [On your desired KeyVault instance](https://docs.microsoft.com/azure/key-vault/general/assign-access-policy?tabs=azure-cli#assign-the-access-policy) @@ -319,11 +319,11 @@ In Kubernetes, you store the client secret or the certificate into the Kubernete aadpodidbinding: $POD_IDENTITY_NAME ``` -#### Using Azure managed identity directly vs. via Azure AD workload identity +#### Using Azure managed identity directly vs. via Microsoft Entra ID workload identity When using **managed identity directly**, you can have multiple identities associated with an app, requiring `azureClientId` to specify which identity should be used. -However, when using **managed identity via Azure AD workload identity**, `azureClientId` is not necessary and has no effect. The Azure identity to be used is inferred from the service account tied to an Azure identity via the Azure federated identity. +However, when using **managed identity via Microsoft Entra ID workload identity**, `azureClientId` is not necessary and has no effect. The Azure identity to be used is inferred from the service account tied to an Azure identity via the Azure federated identity. {{% /codetab %}} diff --git a/daprdocs/content/en/reference/components-reference/supported-secret-stores/envvar-secret-store.md b/daprdocs/content/en/reference/components-reference/supported-secret-stores/envvar-secret-store.md index b4e67318946..9ef8198aac0 100644 --- a/daprdocs/content/en/reference/components-reference/supported-secret-stores/envvar-secret-store.md +++ b/daprdocs/content/en/reference/components-reference/supported-secret-stores/envvar-secret-store.md @@ -41,7 +41,7 @@ spec: For security reasons, this component cannot be used to access these environment variables: - `APP_API_TOKEN` -- Any variable whose name begines with the `DAPR_` prefix +- Any variable whose name begins with the `DAPR_` prefix ## Related Links - [Secrets building block]({{< ref secrets >}}) diff --git a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-azure-blobstorage.md b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-azure-blobstorage.md index f4922097cb4..61846c3beff 100644 --- a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-azure-blobstorage.md +++ b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-azure-blobstorage.md @@ -37,7 +37,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Details | Example | |--------------------|:--------:|---------|---------| | `accountName` | Y | The storage account name | `"mystorageaccount"`. -| `accountKey` | Y (unless using Azure AD) | Primary or secondary storage key | `"key"` +| `accountKey` | Y (unless using Microsoft Entra ID) | Primary or secondary storage key | `"key"` | `containerName` | Y | The name of the container to be used for Dapr state. The container will be created for you if it doesn't exist | `"container"` | `azureEnvironment` | N | Optional name for the Azure environment if using a different Azure cloud | `"AZUREPUBLICCLOUD"` (default value), `"AZURECHINACLOUD"`, `"AZUREUSGOVERNMENTCLOUD"`, `"AZUREGERMANCLOUD"` | `endpoint` | N | Optional custom endpoint URL. This is useful when using the [Azurite emulator](https://github.com/Azure/azurite) or when using custom domains for Azure Storage (although this is not officially supported). The endpoint must be the full base URL, including the protocol (`http://` or `https://`), the IP or FQDN, and optional port. | `"http://127.0.0.1:10000"` @@ -60,9 +60,9 @@ In order to setup Azure Blob Storage as a state store, you will need the followi - **accountKey**: Primary or secondary storage account key. - **containerName**: The name of the container to be used for Dapr state. The container will be created for you if it doesn't exist. -### Authenticating with Azure AD +### Authenticating with Microsoft Entra ID -This component supports authentication with Azure AD as an alternative to use account keys. Whenever possible, it is recommended that you use Azure AD for authentication in production systems, to take advantage of better security, fine-tuned access control, and the ability to use managed identities for apps running on Azure. +This component supports authentication with Microsoft Entra ID as an alternative to use account keys. Whenever possible, it is recommended that you use Microsoft Entra ID for authentication in production systems, to take advantage of better security, fine-tuned access control, and the ability to use managed identities for apps running on Azure. > The following scripts are optimized for a bash or zsh shell and require the following apps installed: > @@ -71,7 +71,7 @@ This component supports authentication with Azure AD as an alternative to use ac > > You must also be authenticated with Azure in your Azure CLI. -1. To get started with using Azure AD for authenticating the Blob Storage state store component, make sure you've created an Azure AD application and a Service Principal as explained in the [Authenticating to Azure]({{< ref authenticating-azure.md >}}) document. +1. To get started with using Microsoft Entra ID for authenticating the Blob Storage state store component, make sure you've created an Microsoft Entra ID application and a Service Principal as explained in the [Authenticating to Azure]({{< ref authenticating-azure.md >}}) document. Once done, set a variable with the ID of the Service Principal that you created: ```sh @@ -96,7 +96,7 @@ This component supports authentication with Azure AD as an alternative to use ac --scope "${RG_ID}/providers/Microsoft.Storage/storageAccounts/${STORAGE_ACCOUNT_NAME}" ``` -When authenticating your component using Azure AD, the `accountKey` field is not required. Instead, please specify the required credentials in the component's metadata (if any) according to the [Authenticating to Azure]({{< ref authenticating-azure.md >}}) document. +When authenticating your component using Microsoft Entra ID, the `accountKey` field is not required. Instead, please specify the required credentials in the component's metadata (if any) according to the [Authenticating to Azure]({{< ref authenticating-azure.md >}}) document. For example: diff --git a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-azure-cosmosdb.md b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-azure-cosmosdb.md index a1f4f59b935..d7ee723eaa0 100644 --- a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-azure-cosmosdb.md +++ b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-azure-cosmosdb.md @@ -46,14 +46,14 @@ If you wish to use Cosmos DB as an actor store, append the following to the yam | Field | Required | Details | Example | |--------------------|:--------:|---------|---------| | url | Y | The Cosmos DB url | `"https://******.documents.azure.com:443/"`. -| masterKey | Y* | The key to authenticate to the Cosmos DB account. Only required when not using Azure AD authentication. | `"key"` +| masterKey | Y* | The key to authenticate to the Cosmos DB account. Only required when not using Microsoft Entra ID authentication. | `"key"` | database | Y | The name of the database | `"db"` | collection | Y | The name of the collection (container) | `"collection"` | actorStateStore | N | Consider this state store for actors. Defaults to `"false"` | `"true"`, `"false"` -### Azure Active Directory (Azure AD) authentication +### Microsoft Entra ID authentication -The Azure Cosmos DB state store component supports authentication using all Azure Active Directory mechanisms. For further information and the relevant component metadata fields to provide depending on the choice of Azure AD authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}). +The Azure Cosmos DB state store component supports authentication using all Microsoft Entra ID mechanisms. For further information and the relevant component metadata fields to provide depending on the choice of Microsoft Entra ID authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}). You can read additional information for setting up Cosmos DB with Azure AD authentication in the [section below](#setting-up-cosmos-db-for-authenticating-with-azure-ad). @@ -66,7 +66,7 @@ You can read additional information for setting up Cosmos DB with Azure AD aut In order to setup Cosmos DB as a state store, you need the following properties: - **URL**: the Cosmos DB url. for example: `https://******.documents.azure.com:443/` -- **Master Key**: The key to authenticate to the Cosmos DB account. Skip this if using Azure AD authentication. +- **Master Key**: The key to authenticate to the Cosmos DB account. Skip this if using Microsoft Entra ID authentication. - **Database**: The name of the database - **Collection**: The name of the collection (or container) @@ -136,9 +136,9 @@ curl -X POST http://localhost:3500/v1.0/state/ \ For **actor** state operations, the partition key is generated by Dapr using the `appId`, the actor type, and the actor id, such that data for the same actor always ends up under the same partition (you do not need to specify it). This is because actor state operations must use transactions, and in Cosmos DB the items in a transaction must be on the same partition. -## Setting up Cosmos DB for authenticating with Azure AD +## Setting up Cosmos DB for authenticating with Microsoft Entra ID -When using the Dapr Cosmos DB state store and authenticating with Azure AD, you need to perform a few additional steps to set up your environment. +When using the Dapr Cosmos DB state store and authenticating with Microsoft Entra ID, you need to perform a few additional steps to set up your environment. Prerequisites: @@ -147,7 +147,7 @@ Prerequisites: - [jq](https://stedolan.github.io/jq/download/) - The scripts below are optimized for a bash or zsh shell -### Granting your Azure AD application access to Cosmos DB +### Granting your Microsoft Entra ID application access to Cosmos DB > You can find more information on the [official documentation](https://docs.microsoft.com/azure/cosmos-db/how-to-setup-rbac), including instructions to assign more granular permissions. @@ -172,6 +172,42 @@ az cosmosdb sql role assignment create \ --role-definition-id "$ROLE_ID" ``` +## Optimizing Cosmos DB for bulk operation write performance + +If you are building a system that only ever reads data from Cosmos DB via key (`id`), which is the default Dapr behavior when using the state management API or actors, there are ways you can optimize Cosmos DB for improved write speeds. This is done by excluding all paths from indexing. By default, Cosmos DB indexes all fields inside of a document. On systems that are write-heavy and run little-to-no queries on values within a document, this indexing policy slows down the time it takes to write or update a document in Cosmos DB. This is exacerbated in high-volume systems. + +For example, the default Terraform definition for a Cosmos SQL container indexing reads as follows: + +```tf +indexing_policy { + indexing_mode = "consistent" + + included_path { + path = "/*" + } +} +``` + +It is possible to force Cosmos DB to only index the `id` and `partitionKey` fields by excluding all other fields from indexing. This can be done by updating the above to read as follows: + +```tf +indexing_policy { + # This could also be set to "none" if you are using the container purely as a key-value store. This may be applicable if your container is only going to be used as a distributed cache. + indexing_mode = "consistent" + + # Note that included_path has been replaced with excluded_path + excluded_path { + path = "/*" + } +} +``` + +{{% alert title="Note" color="primary" %}} + +This optimization comes at the cost of queries against fields inside of documents within the state store. This would likely impact any stored procedures or SQL queries defined and executed. It is only recommended that this optimization be applied only if you are using the Dapr State Management API or Dapr Actors to interact with Cosmos DB. + +{{% /alert %}} + ## Related links - [Basic schema for a Dapr component]({{< ref component-schema >}}) diff --git a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-azure-tablestorage.md b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-azure-tablestorage.md index 64fa12c828c..5d8e8cfe672 100644 --- a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-azure-tablestorage.md +++ b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-azure-tablestorage.md @@ -45,11 +45,11 @@ The above example uses secrets as plain strings. It is recommended to use a secr | `serviceURL` | N | The full storage service endpoint URL. Useful for Azure environments other than public cloud. | `"https://mystorageaccount.table.core.windows.net/"` | `skipCreateTable` | N | Skips the check for and, if necessary, creation of the specified storage table. This is useful when using active directory authentication with minimal privileges. Defaults to `false`. | `"true"` -### Azure Active Directory (Azure AD) authentication +### Microsoft Entra ID authentication -The Azure Cosmos DB state store component supports authentication using all Azure Active Directory mechanisms. For further information and the relevant component metadata fields to provide depending on the choice of Azure AD authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}). +The Azure Cosmos DB state store component supports authentication using all Microsoft Entra ID mechanisms. For further information and the relevant component metadata fields to provide depending on the choice of Microsoft Entra ID authentication mechanism, see the [docs for authenticating to Azure]({{< ref authenticating-azure.md >}}). -You can read additional information for setting up Cosmos DB with Azure AD authentication in the [section below](#setting-up-cosmos-db-for-authenticating-with-azure-ad). +You can read additional information for setting up Cosmos DB with Microsoft Entra ID authentication in the [section below](#setting-up-cosmos-db-for-authenticating-with-azure-ad). ## Option 1: Setup Azure Table Storage @@ -59,7 +59,7 @@ If you wish to create a table for Dapr to use, you can do so beforehand. However In order to setup Azure Table Storage as a state store, you will need the following properties: - **AccountName**: The storage account name. For example: **mystorageaccount**. -- **AccountKey**: Primary or secondary storage key. Skip this if using Azure AD authentication. +- **AccountKey**: Primary or secondary storage key. Skip this if using Microsoft Entra ID authentication. - **TableName**: The name of the table to be used for Dapr state. The table will be created for you if it doesn't exist, unless the `skipCreateTable` option is enabled. - **cosmosDbMode**: Set this to `false` to connect to Azure Tables. @@ -71,7 +71,7 @@ If you wish to create a table for Dapr to use, you can do so beforehand. However In order to setup Azure Cosmos DB Table API as a state store, you will need the following properties: - **AccountName**: The Cosmos DB account name. For example: **mycosmosaccount**. -- **AccountKey**: The Cosmos DB master key. Skip this if using Azure AD authentication. +- **AccountKey**: The Cosmos DB master key. Skip this if using Microsoft Entra ID authentication. - **TableName**: The name of the table to be used for Dapr state. The table will be created for you if it doesn't exist, unless the `skipCreateTable` option is enabled. - **cosmosDbMode**: Set this to `true` to connect to Azure Tables. diff --git a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-memcached.md b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-memcached.md index fab220a6618..2d00042c199 100644 --- a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-memcached.md +++ b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-memcached.md @@ -25,7 +25,7 @@ spec: - name: maxIdleConnections value: # Optional. default: "2" - name: timeout - value: # Optional. default: "1000ms" + value: # Optional. default: "1000" ``` {{% alert title="Warning" color="warning" %}} @@ -38,7 +38,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr |--------------------|:--------:|---------|---------| | hosts | Y | Comma delimited endpoints | `"memcached.default.svc.cluster.local:11211"` | maxIdleConnections | N | The max number of idle connections. Defaults to `"2"` | `"3"` -| timeout | N | The timeout for the calls. Defaults to `"1000ms"` | `"1000ms"` +| timeout | N | The timeout for the calls in milliseconds. Defaults to `"1000"` | `"1000"` ## Setup Memcached diff --git a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-mongodb.md b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-mongodb.md index 428953dd306..db3fec2ed77 100644 --- a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-mongodb.md +++ b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-mongodb.md @@ -47,20 +47,23 @@ spec: The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}). {{% /alert %}} -If you wish to use MongoDB as an actor store, append the following to the yaml. +### Actor state store and transactions support + +When using as an actor state store or to leverage transactions, MongoDB must be running in a [Replica Set](https://www.mongodb.com/docs/manual/replication/). + +If you wish to use MongoDB as an actor store, add this metadata option to your Component YAML: ```yaml - name: actorStateStore value: "true" ``` - ## Spec metadata fields | Field | Required | Details | Example | |--------------------|:--------:|---------|---------| -| server | Y* | The server to connect to, when using DNS SRV record | `"server.example.com"` -| host | Y* | The host to connect to | `"mongo-mongodb.default.svc.cluster.local:27017"` +| server | Y1 | The server to connect to, when using DNS SRV record | `"server.example.com"` +| host | Y1 | The host to connect to | `"mongo-mongodb.default.svc.cluster.local:27017"` | username | N | The username of the user to connect with (applicable in conjunction with `host`) | `"admin"` | password | N | The password of the user (applicable in conjunction with `host`) | `"password"` | databaseName | N | The name of the database to use. Defaults to `"daprStore"` | `"daprStore"` @@ -68,46 +71,36 @@ If you wish to use MongoDB as an actor store, append the following to the yaml. | writeConcern | N | The write concern to use | `"majority"` | readConcern | N | The read concern to use | `"majority"`, `"local"`,`"available"`, `"linearizable"`, `"snapshot"` | operationTimeout | N | The timeout for the operation. Defaults to `"5s"` | `"5s"` -| params | N** | Additional parameters to use | `"?authSource=daprStore&ssl=true"` +| params | N2 | Additional parameters to use | `"?authSource=daprStore&ssl=true"` -> [*] The `server` and `host` fields are mutually exclusive. If neither or both are set, Dapr will return an error. +> [1] The `server` and `host` fields are mutually exclusive. If neither or both are set, Dapr returns an error. -> [**] The `params` field accepts a query string that specifies connection specific options as `=` pairs, separated by `"&"` and prefixed with `"?"`. e.g. to use "daprStore" db as authentication database and enabling SSL/TLS in connection, specify params as `"?authSource=daprStore&ssl=true"`. See [the mongodb manual](https://docs.mongodb.com/manual/reference/connection-string/#std-label-connections-connection-options) for the list of available options and their use cases. +> [2] The `params` field accepts a query string that specifies connection specific options as `=` pairs, separated by `&` and prefixed with `?`. e.g. to use "daprStore" db as authentication database and enabling SSL/TLS in connection, specify params as `?authSource=daprStore&ssl=true`. See [the mongodb manual](https://docs.mongodb.com/manual/reference/connection-string/#std-label-connections-connection-options) for the list of available options and their use cases. ## Setup MongoDB {{< tabs "Self-Hosted" "Kubernetes" >}} {{% codetab %}} -You can run MongoDB locally using Docker: +You can run a single MongoDB instance locally using Docker: -``` +```sh docker run --name some-mongo -d mongo ``` -You can then interact with the server using `localhost:27017`. - -If you do not specify a `databaseName` value in your component definition, make sure to create a database named `daprStore`. +You can then interact with the server at `localhost:27017`. If you do not specify a `databaseName` value in your component definition, make sure to create a database named `daprStore`. +In order to use the MongoDB state store for transactions and as an actor state store, you need to run MongoDB as a Replica Set. Refer to [the official documentation](https://www.mongodb.com/compatibility/deploying-a-mongodb-cluster-with-docker) for how to create a 3-node Replica Set using Docker. {{% /codetab %}} {{% codetab %}} -The easiest way to install MongoDB on Kubernetes is by using the [Helm chart](https://github.com/helm/charts/tree/master/stable/mongodb): - -``` -helm install mongo stable/mongodb -``` - +You can conveniently install MongoDB on Kubernetes using the [Helm chart packaged by Bitnami](https://github.com/bitnami/charts/tree/main/bitnami/mongodb/). Refer to the documentation for the Helm chart for deploying MongoDB, both as a standalone server, and with a Replica Set (required for using transactions and actors). This installs MongoDB into the `default` namespace. To interact with MongoDB, find the service with: `kubectl get svc mongo-mongodb`. - -For example, if installing using the example above, the MongoDB host address would be: - +For example, if installing using the Helm defaults above, the MongoDB host address would be: `mongo-mongodb.default.svc.cluster.local:27017` - - Follow the on-screen instructions to get the root password for MongoDB. -The username is `admin` by default. +The username is typically `admin` by default. {{% /codetab %}} {{< /tabs >}} @@ -117,6 +110,7 @@ The username is `admin` by default. This state store supports [Time-To-Live (TTL)]({{< ref state-store-ttl.md >}}) for records stored with Dapr. When storing data using Dapr, you can set the `ttlInSeconds` metadata property to indicate when the data should be considered "expired". ## Related links + - [Basic schema for a Dapr component]({{< ref component-schema >}}) - Read [this guide]({{< ref "howto-get-save-state.md#step-2-save-and-retrieve-a-single-state" >}}) for instructions on configuring state store components - [State management building block]({{< ref state-management >}}) diff --git a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-postgresql.md b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-postgresql.md index 0d5c682422e..5035d8fae03 100644 --- a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-postgresql.md +++ b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-postgresql.md @@ -61,15 +61,15 @@ The following metadata options are **required** to authenticate using a PostgreS |--------|:--------:|---------|---------| | `connectionString` | Y | The connection string for the PostgreSQL database. See the PostgreSQL [documentation on database connections](https://www.postgresql.org/docs/current/libpq-connect.html) for information on how to define a connection string. | `"host=localhost user=postgres password=example port=5432 connect_timeout=10 database=my_db"` -### Authenticate using Azure AD +### Authenticate using Microsoft Entra ID -Authenticating with Azure AD is supported with Azure Database for PostgreSQL. All authentication methods supported by Dapr can be used, including client credentials ("service principal") and Managed Identity. +Authenticating with Microsoft Entra ID is supported with Azure Database for PostgreSQL. All authentication methods supported by Dapr can be used, including client credentials ("service principal") and Managed Identity. | Field | Required | Details | Example | |--------|:--------:|---------|---------| -| `useAzureAD` | Y | Must be set to `true` to enable the component to retrieve access tokens from Azure AD. | `"true"` | -| `connectionString` | Y | The connection string for the PostgreSQL database.
This must contain the user, which corresponds to the name of the user created inside PostgreSQL that maps to the Azure AD identity; this is often the name of the corresponding principal (e.g. the name of the Azure AD application). This connection string should not contain any password. | `"host=mydb.postgres.database.azure.com user=myapplication port=5432 database=my_db sslmode=require"` | -| `azureTenantId` | N | ID of the Azure AD tenant | `"cd4b2887-304c-…"` | +| `useAzureAD` | Y | Must be set to `true` to enable the component to retrieve access tokens from Microsoft Entra ID. | `"true"` | +| `connectionString` | Y | The connection string for the PostgreSQL database.
This must contain the user, which corresponds to the name of the user created inside PostgreSQL that maps to the Microsoft Entra ID identity; this is often the name of the corresponding principal (e.g. the name of the Microsoft Entra ID application). This connection string should not contain any password. | `"host=mydb.postgres.database.azure.com user=myapplication port=5432 database=my_db sslmode=require"` | +| `azureTenantId` | N | ID of the Microsoft Entra ID tenant | `"cd4b2887-304c-…"` | | `azureClientId` | N | Client ID (application ID) | `"c7dd251f-811f-…"` | | `azureClientSecret` | N | Client secret (application password) | `"Ecy3X…"` | diff --git a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-redis.md b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-redis.md index 3237b109284..366bbde0d44 100644 --- a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-redis.md +++ b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-redis.md @@ -11,6 +11,10 @@ aliases: To setup Redis state store create a component of type `state.redis`. See [this guide]({{< ref "howto-get-save-state.md#step-1-setup-a-state-store" >}}) on how to create and apply a state store configuration. +{{% alert title="Limitations" color="warning" %}} +Before using Redis and the Transactions API, make sure you're familiar with [Redis limitations regarding transactions](https://redis.io/docs/interact/transactions/#what-about-rollbacks). +{{% /alert %}} + ```yaml apiVersion: dapr.io/v1alpha1 kind: Component @@ -26,18 +30,14 @@ spec: value: - name: enableTLS value: # Optional. Allowed: true, false. - - name: failover - value: # Optional. Allowed: true, false. - - name: sentinelMasterName - value: # Optional - name: maxRetries value: # Optional - name: maxRetryBackoff value: # Optional - name: failover - value: # Optional + value: # Optional. Allowed: true, false. - name: sentinelMasterName - value: # Optional + value: # Optional - name: redeliverInterval value: # Optional - name: processingTimeout diff --git a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-sqlserver.md b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-sqlserver.md index e4f48d547b6..96d79ac9d64 100644 --- a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-sqlserver.md +++ b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-sqlserver.md @@ -28,7 +28,7 @@ spec: value: | Server=myServerName\myInstanceName;Database=myDataBase;User Id=myUsername;Password=myPassword; - # Authenticate with Azure AD (Azure SQL only) + # Authenticate with Microsoft Entra ID (Azure SQL only) # "useAzureAD" be set to "true" - name: useAzureAD value: true @@ -75,15 +75,15 @@ The following metadata options are **required** to authenticate using SQL Server |--------|:--------:|---------|---------| | `connectionString` | Y | The connection string used to connect.
If the connection string contains the database, it must already exist. Otherwise, if the database is omitted, a default database named "Dapr" is created. | `"Server=myServerName\myInstanceName;Database=myDataBase;User Id=myUsername;Password=myPassword;"` | -### Authenticate using Azure AD +### Authenticate using Microsoft Entra ID -Authenticating with Azure AD is supported with Azure SQL only. All authentication methods supported by Dapr can be used, including client credentials ("service principal") and Managed Identity. +Authenticating with Microsoft Entra ID is supported with Azure SQL only. All authentication methods supported by Dapr can be used, including client credentials ("service principal") and Managed Identity. | Field | Required | Details | Example | |--------|:--------:|---------|---------| -| `useAzureAD` | Y | Must be set to `true` to enable the component to retrieve access tokens from Azure AD. | `"true"` | +| `useAzureAD` | Y | Must be set to `true` to enable the component to retrieve access tokens from Microsoft Entra ID. | `"true"` | | `connectionString` | Y | The connection string or URL of the Azure SQL database, **without credentials**.
If the connection string contains the database, it must already exist. Otherwise, if the database is omitted, a default database named "Dapr" is created. | `"sqlserver://myServerName.database.windows.net:1433?database=myDataBase"` | -| `azureTenantId` | N | ID of the Azure AD tenant | `"cd4b2887-304c-47e1-b4d5-65447fdd542b"` | +| `azureTenantId` | N | ID of the Microsoft Entra ID tenant | `"cd4b2887-304c-47e1-b4d5-65447fdd542b"` | | `azureClientId` | N | Client ID (application ID) | `"c7dd251f-811f-4ba2-a905-acd4d3f8f08b"` | | `azureClientSecret` | N | Client secret (application password) | `"Ecy3XG7zVZK3/vl/a2NSB+a1zXLa8RnMum/IgD0E"` | diff --git a/daprdocs/content/en/reference/resource-specs/subscription-schema.md b/daprdocs/content/en/reference/resource-specs/subscription-schema.md index 55b8bc76f5f..bd5fc8263a8 100644 --- a/daprdocs/content/en/reference/resource-specs/subscription-schema.md +++ b/daprdocs/content/en/reference/resource-specs/subscription-schema.md @@ -21,18 +21,17 @@ kind: Subscription metadata: name: spec: - version: v2alpha1 topic: # Required routes: # Required - rules: - - match: + - match: path: pubsubname: # Required - deadlettertopic: # Optional - bulksubscribe: # Optional - - enabled: - - maxmessages: - - maxawaitduration: + deadLetterTopic: # Optional + bulkSubscribe: # Optional + - enabled: + - maxMessagesCount: + - maxAwaitDurationMs: scopes: - ``` @@ -42,10 +41,10 @@ scopes: | Field | Required | Details | Example | |--------------------|:--------:|---------|---------| | topic | Y | The name of the topic to which your component subscribes. | `orders` | -| routes | Y | The routes configuration for this topic, including specifying the condition for sending a message to a specific path. Includes the following fields:
  • match: _Optional._ The CEL expression used to match the event. If not specified, the route is considered the default.
  • path: The path for events that match this rule.
The endpoint to which all topic messages are sent. | `match: event.type == "widget"`
`path: /widgets` | +| routes | Y | The routes configuration for this topic, including specifying the condition for sending a message to a specific path. Includes the following fields:
  • match: The CEL expression used to match the event. If not specified, the route is considered the default.
  • path: The path for events that match this rule.
The endpoint to which all topic messages are sent. | `match: event.type == "widget"`
`path: /widgets` | | pubsubname | N | The name of your pub/sub component. | `pubsub` | -| deadlettertopic | N | The name of the dead letter topic that forwards undeliverable messages. | `poisonMessages` | -| bulksubscribe | N | Enable bulk subscribe properties. | `true`, `false` | +| deadLetterTopic | N | The name of the dead letter topic that forwards undeliverable messages. | `poisonMessages` | +| bulkSubscribe | N | Enable bulk subscribe properties. | `true`, `false` | ## `v1alpha1` format @@ -58,15 +57,14 @@ kind: Subscription metadata: name: spec: - version: v1alpha1 topic: # Required route: # Required pubsubname: # Required deadLetterTopic: # Optional bulkSubscribe: # Optional - enabled: - - maxmessages: - - maxawaitduration: + - maxMessagesCount: + - maxAwaitDurationMs: scopes: - ``` diff --git a/daprdocs/data/components/bindings/azure.yaml b/daprdocs/data/components/bindings/azure.yaml index 54d89da3ef1..af2d796b312 100644 --- a/daprdocs/data/components/bindings/azure.yaml +++ b/daprdocs/data/components/bindings/azure.yaml @@ -14,6 +14,14 @@ features: input: true output: true +- component: Azure OpenAI + link: openai + state: Alpha + version: v1 + since: "1.11" + features: + input: true + output: true - component: Azure SignalR link: signalr state: Alpha diff --git a/daprdocs/layouts/shortcodes/dapr-latest-version.html b/daprdocs/layouts/shortcodes/dapr-latest-version.html index 109d34c73d2..d3c43ab7178 100644 --- a/daprdocs/layouts/shortcodes/dapr-latest-version.html +++ b/daprdocs/layouts/shortcodes/dapr-latest-version.html @@ -1 +1 @@ -{{- if .Get "short" }}1.12{{ else if .Get "long" }}1.12.0{{ else if .Get "cli" }}1.12.0{{ else }}1.12.0{{ end -}} +{{- if .Get "short" }}1.12{{ else if .Get "long" }}1.12.4{{ else if .Get "cli" }}1.12.0{{ else }}1.12.4{{ end -}} \ No newline at end of file diff --git a/scripts/init-container.sh b/scripts/init-container.sh new file mode 100644 index 00000000000..61f6a8dccc3 --- /dev/null +++ b/scripts/init-container.sh @@ -0,0 +1,4 @@ +git config --global --add safe.directory '*' +cd ./daprdocs +git submodule update --init --recursive +npm install diff --git a/sdkdocs/dotnet b/sdkdocs/dotnet index 99d874a2b13..d023a43ba4f 160000 --- a/sdkdocs/dotnet +++ b/sdkdocs/dotnet @@ -1 +1 @@ -Subproject commit 99d874a2b138af020df099a0fc0a09a7d0597fae +Subproject commit d023a43ba4fd4cddb7aa2c0962cf786f01f58c24 diff --git a/sdkdocs/go b/sdkdocs/go index e16e0350a52..a65eddaa4e9 160000 --- a/sdkdocs/go +++ b/sdkdocs/go @@ -1 +1 @@ -Subproject commit e16e0350a52349b5a05138edc0b58e3be78ee753 +Subproject commit a65eddaa4e9217ed5cdf436b3438d2ffd837ba55 diff --git a/sdkdocs/java b/sdkdocs/java index 5e45aa86b81..a9a09ba2acc 160000 --- a/sdkdocs/java +++ b/sdkdocs/java @@ -1 +1 @@ -Subproject commit 5e45aa86b81748bf1e6efdbf7f52c20645a12435 +Subproject commit a9a09ba2acc39bc7e54a5a7092e1c5820818e23c diff --git a/sdkdocs/js b/sdkdocs/js index df7eff281a5..5c2b40ac94b 160000 --- a/sdkdocs/js +++ b/sdkdocs/js @@ -1 +1 @@ -Subproject commit df7eff281a5a1395a7967c658a5707e8dfb2b99e +Subproject commit 5c2b40ac94b50f6a5bdb32008f6a47da69946d95 diff --git a/sdkdocs/python b/sdkdocs/python index 6171b67db60..ef732090e8e 160000 --- a/sdkdocs/python +++ b/sdkdocs/python @@ -1 +1 @@ -Subproject commit 6171b67db60d51704ed8425ae71dda9226bf1255 +Subproject commit ef732090e8e04629ca573d127c5ee187a505aba4 diff --git a/translations/docs-zh b/translations/docs-zh index 794330f6cab..7938567259e 160000 --- a/translations/docs-zh +++ b/translations/docs-zh @@ -1 +1 @@ -Subproject commit 794330f6cab2db8e09053bb7bf19233eb3237538 +Subproject commit 7938567259e1dcaba7bb3fbfca88ed9db92cefaa