diff --git a/.cargo/audit.toml b/.cargo/audit.toml index 7620ba284..ec635098b 100644 --- a/.cargo/audit.toml +++ b/.cargo/audit.toml @@ -1,13 +1,4 @@ [advisories] ignore = [ - "RUSTSEC-2020-0071", # Bound by Rusoto 0.42, reqwest 0.9, hyper 0.12, chrono 0.4. - "RUSTSEC-2021-0078", # Bound by Rusoto 0.42, Reqwest 0.9 requiring Hyper 0.12 - "RUSTSEC-2021-0079", # Bound by Rusoto 0.42, Reqwest 0.9, a2 0.5 requiring Hyper 0.12 - "RUSTSEC-2021-0124", # Bound by tokio restrictions, rusoto, reqwest, hyper... - "RUSTSEC-2023-0034", # Bound by Rusoto 0.42, Reqwest 0.9, a2 0.5 requiring Hyper 0.12 - "RUSTSEC-2023-0052", # Bound by Rusoto 0.47, Rustls 0.20, hyper-rustls 0.22, a2 0.8 - "RUSTSEC-2023-0065", # Bound by tokio-tungstenite - "RUSTSEC-2024-0006", # Bound by Rusoto 0.42 - "RUSTSEC-2024-0336", # Bound by hyper-alpn 0.4.1, hyper-rustls 0.22.0/0.23.4, - # tokio-rustls 0.22.0/0.23.4 (not affected) + "RUSTSEC-2021-0124", # Bound by tokio restrictions, cargo, reqwest, hyper... ] diff --git a/.circleci/config.yml b/.circleci/config.yml index 4a47dd3c0..7cd0f4b44 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -44,12 +44,76 @@ commands: else echo "${PASS}" | docker login -u="${USER}" --password-stdin fi + setup_rust: + steps: + - run: + name: Set up Rust + command: | + apt update + apt install build-essential curl libstdc++6 libstdc++-12-dev libssl-dev pkg-config -y + apt install cmake -y + # RUST_VER + curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain 1.81 -y + export PATH=$PATH:$HOME/.cargo/bin + echo 'export PATH=$PATH:$HOME/.cargo/bin' >> $BASH_ENV + rustc --version + build_applications: + steps: + - run: + name: Build Applicatins + command: cargo build --features=emulator + setup_bigtable: + steps: + - run: + name: Setup Bigtable + command: scripts/setup_bt.sh + setup_cbt: + steps: + - run: + name: Set up cbt + command: | + echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list + curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg + apt-get update -y + apt install google-cloud-cli-cbt -y + create_test_result_workspace: + steps: + - run: + name: Create Workspace + command: mkdir -p workspace/test-results + restore_test_cache: + parameters: + cache_key: + type: string + steps: + - restore_cache: + name: Restoring Rust cache + key: <> + save_test_cache: + parameters: + cache_key: + type: string + steps: + - save_cache: + name: Save Rust cache + key: <> + paths: + - target + - ~/.cargo/registry + - ~/.cargo/git + setup_python: + steps: + - run: + name: Set up Python + command: | + pip install --upgrade pip + pip install poetry jobs: audit: docker: # NOTE: update version for all # RUST_VER - - image: rust:1.76 + - image: rust:1.81 auth: username: $DOCKER_USER password: $DOCKER_PASS @@ -67,7 +131,7 @@ jobs: python-checks: docker: - - image: python:3.12-slim-bullseye + - image: python:3.12-slim-bookworm auth: username: $DOCKER_USER password: $DOCKER_PASS @@ -88,112 +152,138 @@ jobs: name: isort, black, flake8, pydocstyle and mypy command: make lint - test: + test-integration: docker: - - image: python:3.12-slim-bullseye + - image: python:3.12-slim-bookworm auth: username: $DOCKER_USER password: $DOCKER_PASS environment: RUST_BACKTRACE: 1 - - image: amazon/dynamodb-local:latest + - image: google/cloud-sdk:latest auth: username: $DOCKER_USER password: $DOCKER_PASS - command: -jar DynamoDBLocal.jar -sharedDb + command: gcloud beta emulators bigtable start --host-port=localhost:8086 + resource_class: large + environment: + BIGTABLE_EMULATOR_HOST: localhost:8086 + steps: + - checkout + - restore_test_cache: + cache_key: rust-v1-integration-test-{{ checksum "Cargo.lock" }} + - create_test_result_workspace + - setup_rust + - build_applications + - run: + name: Set up system + command: | + apt update + apt install libssl-dev apt-transport-https ca-certificates gnupg curl cmake -y + - setup_cbt + - setup_python + - setup_bigtable + - run: + name: Integration tests (Bigtable) + command: make integration-test + environment: + RUST_LOG: autopush=debug,autopush_common=debug,autoendpoint=debug,autoconnect=debug,slog_mozlog_json=info,warn + # PYTEST_ARGS: -sv + DB_DSN: grpc://localhost:8086 + TEST_RESULTS_DIR: workspace/test-results + - store_artifacts: + path: ~/project/workspace/test-results/integration_test_results.xml + destination: integration_test_results.xml + - store_test_results: + path: workspace/test-results + - save_test_cache: + cache_key: rust-v1-integration-test-cache-{{ checksum "Cargo.lock" }} + + test-unit: + docker: + - image: python:3.12-slim-bookworm + auth: + username: $DOCKER_USER + password: $DOCKER_PASS + environment: + RUST_BACKTRACE: 1 + RUST_TEST_THREADS: 1 - image: google/cloud-sdk:latest auth: username: $DOCKER_USER password: $DOCKER_PASS command: gcloud beta emulators bigtable start --host-port=localhost:8086 - resource_class: xlarge + resource_class: 2xlarge environment: BIGTABLE_EMULATOR_HOST: localhost:8086 - AWS_LOCAL_DYNAMODB: http://localhost:8000 steps: - checkout # Need to download the poetry.lock files so we can use their # checksums in restore_cache. - - restore_cache: - name: Restoring Rust cache - key: rust-v1-{{ .Environment.CACHE_VERSION }}-{{ .Branch }}-{{ checksum "Cargo.lock" }} + - restore_test_cache: + cache_key: rust-v2-cache-{{ checksum "Cargo.lock" }} + - create_test_result_workspace + - setup_rust + - setup_cbt + - setup_bigtable - run: - name: Create Workspace - command: mkdir -p workspace + name: Install cargo-nextest + command: curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin - run: - name: Set up system + name: Echo Rust version command: | - apt update - apt install libssl-dev apt-transport-https ca-certificates gnupg curl -y + rustc --version - run: - name: Set up cbt - command: | - echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list - curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg - apt-get update -y - apt install google-cloud-cli-cbt -y + name: Install cargo-llvm-cov + command: cargo install cargo-llvm-cov + # Note: This build can potentially exceed the amount of memory availble to the CircleCI instance. + # We've seen that limiting the number of jobs helps reduce the frequency of this. (Note that + # when doing discovery, we found that the docker image `meminfo` and `cpuinfo` often report + # the machine level memory and CPU which are far higher than the memory allocated to the docker + # instance. This may be causing rust to be overly greedy triggering the VM to OOM the process.) - run: - name: Set up Python + name: Report tests and coverage command: | - pip install --upgrade pip - pip install poetry + cargo llvm-cov --summary-only --json --output-path workspace/test-results/cov.json nextest --features=emulator --features=bigtable --jobs=2 --profile=ci + - store_artifacts: + path: ~/project/workspace/test-results/cov.json + destination: cov.json + - store_artifacts: + path: ~/project/target/nextest/ci/junit.xml + destination: junit.xml + - store_test_results: + path: target/nextest/ci + - save_test_cache: + cache_key: rust-v2-cache-{{ checksum "Cargo.lock" }} + + rust-checks: + docker: + - image: python:3.12-slim-bookworm + auth: + username: $DOCKER_USER + password: $DOCKER_PASS + environment: + RUST_BACKTRACE: 1 + RUST_TEST_THREADS: 1 + resource_class: large + environment: + BIGTABLE_EMULATOR_HOST: localhost:8086 + steps: + - checkout + # Need to download the poetry.lock files so we can use their + # checksums in restore_cache. + - restore_test_cache: + cache_key: rust-v2-cache-{{ checksum "Cargo.lock" }} + - setup_rust - run: - name: Set up Rust + name: Echo Rust Version command: | - apt update - apt install build-essential curl libstdc++6 libstdc++-10-dev libssl-dev pkg-config -y - apt install cmake -y - # RUST_VER - curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain 1.76 -y - export PATH=$PATH:$HOME/.cargo/bin - echo 'export PATH=$PATH:$HOME/.cargo/bin' >> $BASH_ENV rustc --version - cargo build --features=emulator --features=dual - run: name: Check formatting command: | cargo fmt -- --check cargo clippy --all --all-targets --all-features -- -D warnings --deny=clippy::dbg_macro - - run: - name: Setup Bigtable - command: scripts/setup_bt.sh - - run: - name: Rust tests - # Note: This build can potentially exceed the amount of memory availble to the CircleCI instance. - # We've seen that limiting the number of jobs helps reduce the frequency of this. (Note that - # when doing discovery, we found that the docker image `meminfo` and `cpuinfo` often report - # the machine level memory and CPU which are far higher than the memory allocated to the docker - # instance. This may be causing rust to be overly greedy triggering the VM to OOM the process.) - command: cargo test --features=emulator --features=dual --jobs=2 - - run: - name: Integration tests (Bigtable) - command: make integration-test - environment: - DB_DSN: grpc://localhost:8086 - TEST_RESULTS_DIR: workspace/test-results - - run: - name: Integration tests (DynamoDB) - command: make integration-test - environment: - TEST_RESULTS_DIR: workspace/test-results - - run: - name: Integration tests (Dual Bigtable/DynamoDB) - command: make integration-test - environment: - RUST_LOG: autopush=debug,autopush_common=debug,autoendpoint=debug,autoconnect=debug,slog_mozlog_json=info,warn - # PYTEST_ARGS: -sv - DB_DSN: dual - DB_SETTINGS: tests/integration/dual_test.json - TEST_RESULTS_DIR: workspace/test-results - - store_test_results: - path: workspace/test-results - - save_cache: - name: Save Rust cache - key: rust-v1-{{ .Environment.CACHE_VERSION }}-{{ .Branch }}-{{ checksum "Cargo.lock" }} - paths: - - target - - ~/.cargo/registry - - ~/.cargo/git build: docker: @@ -246,7 +336,7 @@ jobs: build-load-test: docker: - - image: cimg/base:2023.12 + - image: cimg/base:2024.06 steps: - checkout - setup_remote_docker: @@ -265,65 +355,6 @@ jobs: - autopush-locust.tar deploy: - docker: - - image: docker:18.03.0-ce - auth: - username: $DOCKER_USER - password: $DOCKER_PASS - parameters: - image: - type: string - repo: - type: string - steps: - - setup_remote_docker - - docker_login - - attach_workspace: - at: /cache - - run: - name: Restore Docker image cache - command: docker load -i /cache/docker.tar - - run: - name: Deploy to Dockerhub - command: | - if [ "${CIRCLE_BRANCH}" == "master" ]; then - # deploy master - docker login -u $DOCKER_USER -p $DOCKER_PASS - docker tag <> <>:latest - docker push <>:latest - elif [ ! -z "${CIRCLE_TAG}" ]; then - # deploy a release tag - docker login -u $DOCKER_USER -p $DOCKER_PASS - echo "<>:${CIRCLE_TAG}" - docker tag <> "<>:${CIRCLE_TAG}" - docker images - docker push "<>:${CIRCLE_TAG}" - fi - - deploy-load-test: - docker: - - image: cimg/base:2023.12 - steps: - - checkout - - attach_workspace: - at: /tmp/workspace - - setup_remote_docker - - run: - name: Load Docker Image From Workspace - command: docker load -i /tmp/workspace/autopush-locust.tar - - docker_login: - load: True - - run: - name: Push to Docker Hub - command: | - echo ${DOCKERHUB_LOAD_TEST_REPO}:${CIRCLE_SHA1} - docker tag autopush-locust ${DOCKERHUB_LOAD_TEST_REPO}:${CIRCLE_SHA1} - docker tag autopush-locust ${DOCKERHUB_LOAD_TEST_REPO}:latest - docker images - docker push "${DOCKERHUB_LOAD_TEST_REPO}:${CIRCLE_SHA1}" - docker push "${DOCKERHUB_LOAD_TEST_REPO}:latest" - - deploy-to-gar: executor: gcp-gcr/default parameters: build_tag: @@ -351,13 +382,19 @@ jobs: - run: name: Restore Docker image cache command: docker load -i /tmp/cache/docker.tar + # This is the easiest way to tag multiple images using different + # conditions for the GAR_TAG variable in the smallest amount of code. + # + # You can find other jobs and commands you can use with this orb that + # include tagging here: + # https://circleci.com/developer/orbs/orb/circleci/gcp-gcr - run: name: Tag image command: | - if [ "${CIRCLE_BRANCH}" == "master" ]; then - echo 'export GAR_TAG=master' >> $BASH_ENV - elif [ ! -z "${CIRCLE_TAG}" ]; then - echo "export GAR_TAG=$CIRCLE_TAG" >> $BASH_ENV + if [ ! -z "${CIRCLE_TAG}" ]; then + echo "export GAR_TAG=${CIRCLE_TAG}" >> $BASH_ENV + else + echo "export GAR_TAG=${CIRCLE_BRANCH}" >> $BASH_ENV fi echo "export GAR_IMAGE=\"<>/${GCP_GAR_PROJECT_ID}/${GCP_GAR_REPO}/<>\"" >> $BASH_ENV source $BASH_ENV @@ -371,9 +408,30 @@ jobs: registry-url: <> tag: $GAR_TAG,latest -workflows: - version: 2 + deploy-load-test: + docker: + - image: cimg/base:2024.06 + steps: + - checkout + - attach_workspace: + at: /tmp/workspace + - setup_remote_docker + - run: + name: Load Docker Image From Workspace + command: docker load -i /tmp/workspace/autopush-locust.tar + - docker_login: + load: True + - run: + name: Push to Docker Hub + command: | + echo ${DOCKERHUB_LOAD_TEST_REPO}:${CIRCLE_SHA1} + docker tag autopush-locust ${DOCKERHUB_LOAD_TEST_REPO}:${CIRCLE_SHA1} + docker tag autopush-locust ${DOCKERHUB_LOAD_TEST_REPO}:latest + docker images + docker push "${DOCKERHUB_LOAD_TEST_REPO}:${CIRCLE_SHA1}" + docker push "${DOCKERHUB_LOAD_TEST_REPO}:latest" +workflows: build-test-deploy: jobs: - audit: @@ -384,7 +442,18 @@ workflows: filters: tags: only: /.*/ - - test: + - test-integration: + name: Integration Tests + filters: + tags: + only: /.*/ + - test-unit: + name: Rust Unit Tests + filters: + tags: + only: /.*/ + - rust-checks: + name: Rust Formatting Check filters: tags: only: /.*/ @@ -412,14 +481,15 @@ workflows: tags: only: /.*/ - # Comment out the following four sections for local CircleCI testing. + # Comment out the following two sections for local CircleCI testing. - deploy: name: deploy-autoconnect - image: autoconnect:build - repo: ${DOCKERHUB_CONNECT_REPO} + image: autoconnect requires: - build-autoconnect - - test + - Integration Tests + - Rust Unit Tests + - Rust Formatting Check filters: tags: only: /.*/ @@ -428,35 +498,12 @@ workflows: - deploy: name: deploy-autoendpoint - image: autoendpoint:build - repo: ${DOCKERHUB_ENDPOINT_REPO} - requires: - - build-autoendpoint - - test - filters: - tags: - only: /.*/ - branches: - only: master - - - deploy-to-gar: - name: deploy-autoconnect-gar - image: autoconnect - requires: - - build-autoconnect - - test - filters: - tags: - only: /.*/ - branches: - only: master - - - deploy-to-gar: - name: deploy-autoendpoint-gar image: autoendpoint requires: - build-autoendpoint - - test + - Integration Tests + - Rust Unit Tests + - Rust Formatting Check filters: tags: only: /.*/ diff --git a/.config/nextest.toml b/.config/nextest.toml new file mode 100644 index 000000000..a239907e6 --- /dev/null +++ b/.config/nextest.toml @@ -0,0 +1,23 @@ +[store] +dir = "target/nextest" + +[profile.default] +retries = 0 +test-threads = "num-cpus" +threads-required = 1 +status-level = "pass" +final-status-level = "flaky" +failure-output = "immediate" +success-output = "never" +fail-fast = false +slow-timeout = { period = "300s" } + +[profile.ci] +fail-fast = false + +[profile.ci.junit] +path = "junit.xml" + +report-name = "autopush-unit-tests" +store-success-output = false +store-failure-output = true diff --git a/.dockerignore b/.dockerignore index 0b95bd847..d44f5ead1 100644 --- a/.dockerignore +++ b/.dockerignore @@ -18,7 +18,6 @@ html_coverage site-packages/* lib-python/* bin/* -ddb include/* lib_pypy/* pypy diff --git a/.github/workflows/publish_docs.yml b/.github/workflows/publish_docs.yml index c334ec326..54da0116d 100644 --- a/.github/workflows/publish_docs.yml +++ b/.github/workflows/publish_docs.yml @@ -1,57 +1,37 @@ name: Publish docs to pages run-name: ${{ github.actor }} is publishing docs on: - # on a successful push + workflow_dispatch: # Allow manual triggering push: - # only run on pushes to these branches - branches: - - 'master' - - 'main' - - 'docs' - # or run if pushes to these files. - # paths: - # - 'docs/**.md' - check_run: - types: - # Only run if we have a successfully completed branch - - completed - # if desired, we can also use cron like rules to trigger these - # schedule: - # - cron: 0 0 * * * - # Allow for manual triggering - workflow_dispatch: + branches: [master,main] + # paths: ['docs/**.md'] # API docs need building always -# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages -permissions: - contents: read - pages: write - id-token: write - -# Allow only one concurrent deployment. Cancel any in-progress. concurrency: - group: "pages" - cancel-in-progress: true + group: github-pages + cancel-in-progress: false # Skip any intermediate builds but finish deploying jobs: build: runs-on: ubuntu-latest env: - MDBOOK_ENV: 0.4.24 - MERMAID_ENV: 0.12.6 + MDBOOK_ENV: 0.4.40 + MERMAID_ENV: 0.13.0 DEST_DIR: /home/runner/.cargo/bin steps: - # these are other job descriptions to call. - - uses: actions/checkout@v3 - - name: Configure rust # TODO: can we export building rust and installing mdbook as an artifact and reuse it? - run: | - curl --proto '=https' --tlsv1.2 https://sh.rustup.rs -sSf -y | sh - rustup update - export PATH=$PATH:$DEST_DIR + - uses: actions/checkout@v4 + - uses: actions/cache@v4 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/ + ~/.cargo/git/ + target/ + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} - name: Install mdBook run: | + export PATH=$PATH:$DEST_DIR curl -sSL "https://github.com/rust-lang/mdBook/releases/download/v$MDBOOK_ENV/mdbook-v$MDBOOK_ENV-x86_64-unknown-linux-gnu.tar.gz" | tar -xz --directory $DEST_DIR curl -sSL "https://github.com/badboy/mdBook-mermaid/releases/download/v$MERMAID_ENV/mdbook-mermaid-v$MERMAID_ENV-x86_64-unknown-linux-gnu.tar.gz" | tar -xz --directory $DEST_DIR - # actually build the book - name: Build the main book run: cd docs && mdbook build - name: Build API docs @@ -60,11 +40,10 @@ jobs: - name: Copy cargo docs to API dir run: mkdir -p docs/output/api && cp -r target/doc/* docs/output/api - name: Upload artifact - uses: actions/upload-pages-artifact@v1 + uses: actions/upload-pages-artifact@v3 with: path: ./docs/output - # write the pages deploy: needs: build permissions: @@ -75,6 +54,6 @@ jobs: url: ${{ steps.deployment.outputs.page_url }} runs-on: ubuntu-latest steps: - - name: Deploy to Github Pages + - name: Deploy to GitHub Pages id: depolyment - uses: actions/deploy-pages@v2 + uses: actions/deploy-pages@v4 diff --git a/.gitignore b/.gitignore index 3cee32869..f72d9d619 100644 --- a/.gitignore +++ b/.gitignore @@ -7,7 +7,6 @@ syntax:glob *.egg *~ build -ddb dist docs/_build *.xml @@ -42,7 +41,6 @@ docs/old *.local.toml .cargo/config .vscode/* -dynamodb-local-metadata.json # circleCI workspace diff --git a/Cargo.lock b/Cargo.lock index f646edfe9..ce70111b9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,32 +4,38 @@ version = 3 [[package]] name = "a2" -version = "0.8.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da60b150c4846f18f007bbdf912cefe83aaa88fe6725b8bfc3529e5fd3caa895" +checksum = "f279fc8b1f1a64138f0f4b9cda9be488ae35bc2f8556c7ffe60730f1c07d005a" dependencies = [ - "base64 0.20.0", + "base64 0.21.7", "erased-serde", - "http 0.2.12", - "hyper 0.14.28", - "hyper-alpn", + "http 1.1.0", + "http-body-util", + "hyper 1.4.1", + "hyper-rustls 0.26.0", + "hyper-util", "openssl", + "parking_lot 0.12.3", + "rustls 0.22.4", + "rustls-pemfile 2.2.0", "serde", "serde_json", "thiserror", + "tokio", ] [[package]] name = "actix" -version = "0.13.3" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb72882332b6d6282f428b77ba0358cb2687e61a6f6df6a6d3871e8a177c2d4f" +checksum = "de7fa236829ba0841304542f7614c42b80fca007455315c45c785ccfa873a85b" dependencies = [ "actix-macros", "actix-rt", "actix_derive", - "bitflags 2.5.0", - "bytes 1.6.0", + "bitflags 2.6.0", + "bytes", "crossbeam-channel", "futures-core", "futures-sink", @@ -37,10 +43,10 @@ dependencies = [ "futures-util", "log", "once_cell", - "parking_lot 0.12.1", - "pin-project-lite 0.2.14", - "smallvec 1.13.2", - "tokio 1.37.0", + "parking_lot 0.12.3", + "pin-project-lite", + "smallvec", + "tokio", "tokio-util", ] @@ -50,13 +56,13 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f7b0a21988c1bf877cf4759ef5ddaac04c1c9fe808c9142ecb78ba97d97a28a" dependencies = [ - "bitflags 2.5.0", - "bytes 1.6.0", + "bitflags 2.6.0", + "bytes", "futures-core", "futures-sink", "memchr", - "pin-project-lite 0.2.14", - "tokio 1.37.0", + "pin-project-lite", + "tokio", "tokio-util", "tracing", ] @@ -73,24 +79,24 @@ dependencies = [ "futures-util", "log", "once_cell", - "smallvec 1.13.2", + "smallvec", ] [[package]] name = "actix-http" -version = "3.6.0" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d223b13fd481fc0d1f83bb12659ae774d9e3601814c68a0bc539731698cca743" +checksum = "d48f96fc3003717aeb9856ca3d02a8c7de502667ad76eeacd830b48d2e91fac4" dependencies = [ "actix-codec", "actix-rt", "actix-service", "actix-utils", "ahash", - "base64 0.21.7", - "bitflags 2.5.0", + "base64 0.22.1", + "bitflags 2.6.0", "brotli", - "bytes 1.6.0", + "bytes", "bytestring", "derive_more", "encoding_rs", @@ -100,16 +106,16 @@ dependencies = [ "http 0.2.12", "httparse", "httpdate", - "itoa 1.0.11", + "itoa", "language-tags", "local-channel", "mime", "percent-encoding", - "pin-project-lite 0.2.14", + "pin-project-lite", "rand 0.8.5", "sha1", - "smallvec 1.13.2", - "tokio 1.37.0", + "smallvec", + "tokio", "tokio-util", "tracing", "zstd", @@ -128,7 +134,7 @@ dependencies = [ "actix-tls", "actix-utils", "awc", - "bytes 1.6.0", + "bytes", "futures-core", "http 0.2.12", "log", @@ -137,7 +143,7 @@ dependencies = [ "serde_urlencoded", "slab", "socket2", - "tokio 1.37.0", + "tokio", ] [[package]] @@ -147,47 +153,49 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb" dependencies = [ "quote", - "syn 2.0.58", + "syn 2.0.79", ] [[package]] name = "actix-router" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d22475596539443685426b6bdadb926ad0ecaefdfc5fb05e5e3441f15463c511" +checksum = "13d324164c51f63867b57e73ba5936ea151b8a41a1d23d1031eeb9f70d0236f8" dependencies = [ "bytestring", + "cfg-if", "http 0.2.12", "regex", + "regex-lite", "serde", "tracing", ] [[package]] name = "actix-rt" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28f32d40287d3f402ae0028a9d54bef51af15c8769492826a69d28f81893151d" +checksum = "24eda4e2a6e042aa4e55ac438a2ae052d3b5da0ecf83d7411e1a368946925208" dependencies = [ "actix-macros", "futures-core", - "tokio 1.37.0", + "tokio", ] [[package]] name = "actix-server" -version = "2.3.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3eb13e7eef0423ea6eab0e59f6c72e7cb46d33691ad56a726b3cd07ddec2c2d4" +checksum = "7ca2549781d8dd6d75c40cf6b6051260a2cc2f3c62343d761a969a0640646894" dependencies = [ "actix-rt", "actix-service", "actix-utils", "futures-core", "futures-util", - "mio 0.8.11", + "mio", "socket2", - "tokio 1.37.0", + "tokio", "tracing", ] @@ -199,14 +207,14 @@ checksum = "3b894941f818cfdc7ccc4b9e60fa7e53b5042a2e8567270f9147d5591893373a" dependencies = [ "futures-core", "paste", - "pin-project-lite 0.2.14", + "pin-project-lite", ] [[package]] name = "actix-test" -version = "0.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0bc744219177d18f6244d035e1a0b4e36defb6707be2388348ba7e3f071772" +checksum = "439022b5a7b5dac10798465029a9566e8e0cca7a6014541ed277b695691fac5f" dependencies = [ "actix-codec", "actix-http", @@ -222,14 +230,14 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "tokio 1.37.0", + "tokio", ] [[package]] name = "actix-tls" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4cce60a2f2b477bc72e5cde0af1812a6e82d8fd85b5570a5dcf2a5bf2c5be5f" +checksum = "ac453898d866cdbecdbc2334fe1738c747b4eba14a677261f2b768ba05329389" dependencies = [ "actix-rt", "actix-service", @@ -238,8 +246,8 @@ dependencies = [ "http 0.2.12", "http 1.1.0", "impl-more", - "pin-project-lite 0.2.14", - "tokio 1.37.0", + "pin-project-lite", + "tokio", "tokio-util", "tracing", ] @@ -251,14 +259,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88a1dcdff1466e3c2488e1cb5c36a71822750ad43839937f85d2f4d9f8b705d8" dependencies = [ "local-waker", - "pin-project-lite 0.2.14", + "pin-project-lite", ] [[package]] name = "actix-web" -version = "4.5.1" +version = "4.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a6556ddebb638c2358714d853257ed226ece6023ef9364f23f0c70737ea984" +checksum = "9180d76e5cc7ccbc4d60a506f2c727730b154010262df5b910eb17dbe4b8cb38" dependencies = [ "actix-codec", "actix-http", @@ -270,25 +278,27 @@ dependencies = [ "actix-utils", "actix-web-codegen", "ahash", - "bytes 1.6.0", + "bytes", "bytestring", - "cfg-if 1.0.0", + "cfg-if", "cookie", "derive_more", "encoding_rs", "futures-core", "futures-util", - "itoa 1.0.11", + "impl-more", + "itoa", "language-tags", "log", "mime", "once_cell", - "pin-project-lite 0.2.14", + "pin-project-lite", "regex", + "regex-lite", "serde", "serde_json", "serde_urlencoded", - "smallvec 1.13.2", + "smallvec", "socket2", "time", "url", @@ -296,14 +306,14 @@ dependencies = [ [[package]] name = "actix-web-codegen" -version = "4.2.2" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1f50ebbb30eca122b188319a4398b3f7bb4a8cdf50ecfb73bfc6a3c3ce54f5" +checksum = "f591380e2e68490b5dfaf1dd1aa0ebe78d84ba7067078512b4ea6e4492d622b8" dependencies = [ "actix-router", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.79", ] [[package]] @@ -316,34 +326,34 @@ dependencies = [ "actix-http", "actix-web", "futures-core", - "tokio 1.37.0", + "tokio", ] [[package]] name = "actix_derive" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c7db3d5a9718568e4cf4a537cfd7070e6e6ff7481510d0237fb529ac850f6d3" +checksum = "b6ac1e58cded18cb28ddc17143c4dea5345b3ad575e14f32f66e4054a56eb271" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.79", ] [[package]] name = "addr2line" -version = "0.21.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" dependencies = [ "gimli", ] [[package]] -name = "adler" -version = "1.0.2" +name = "adler2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] name = "again" @@ -362,8 +372,8 @@ version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ - "cfg-if 1.0.0", - "getrandom 0.2.12", + "cfg-if", + "getrandom 0.2.15", "once_cell", "version_check", "zerocopy", @@ -414,52 +424,53 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] name = "anstream" -version = "0.6.13" +version = "0.6.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" +checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", + "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" [[package]] name = "anstyle-parse" -version = "0.2.3" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.2" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" +checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.2" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -467,9 +478,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.81" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "arc-swap" @@ -489,37 +500,43 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", - "pin-project-lite 0.2.14", + "pin-project-lite", ] [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.79", ] [[package]] name = "async-trait" -version = "0.1.79" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.79", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "atty" version = "0.2.14" @@ -528,14 +545,14 @@ checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi 0.1.19", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] name = "autocfg" -version = "1.2.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "autoconnect" @@ -559,7 +576,7 @@ dependencies = [ "cadence", "config", "docopt", - "env_logger 0.11.3", + "env_logger 0.11.5", "fernet", "futures 0.3.30", "futures-locks", @@ -568,7 +585,7 @@ dependencies = [ "lazy_static", "log", "mozsvc-common", - "reqwest 0.12.2", + "reqwest 0.12.8", "sentry", "sentry-actix", "sentry-core", @@ -593,15 +610,15 @@ dependencies = [ "cadence", "futures 0.3.30", "futures-locks", - "hyper 1.2.0", - "reqwest 0.12.2", + "hyper 1.4.1", + "reqwest 0.12.8", "sentry", "serde", "serde_derive", "serde_json", "slog", "slog-scope", - "tokio 1.37.0", + "tokio", "uuid", ] @@ -616,13 +633,13 @@ dependencies = [ "fernet", "lazy_static", "mozsvc-common", - "reqwest 0.12.2", + "reqwest 0.12.8", "serde", "serde_derive", "serde_json", "slog", "slog-scope", - "tokio 1.37.0", + "tokio", ] [[package]] @@ -640,16 +657,16 @@ dependencies = [ "autoconnect_ws", "autopush_common", "backtrace", - "bytes 1.6.0", + "bytes", "bytestring", "cadence", "ctor", "futures-util", - "reqwest 0.12.2", + "reqwest 0.12.8", "serde_json", "slog-scope", "thiserror", - "tokio 1.37.0", + "tokio", "uuid", ] @@ -676,7 +693,7 @@ dependencies = [ "slog-scope", "strum", "thiserror", - "tokio 1.37.0", + "tokio", ] [[package]] @@ -694,11 +711,12 @@ dependencies = [ "ctor", "futures 0.3.30", "mockall", - "reqwest 0.12.2", + "reqwest 0.12.8", "sentry", + "serde_json", "slog-scope", "thiserror", - "tokio 1.37.0", + "tokio", "uuid", ] @@ -715,7 +733,7 @@ dependencies = [ "async-trait", "autopush_common", "backtrace", - "base64 0.22.0", + "base64 0.22.1", "bytebuffer", "cadence", "config", @@ -729,19 +747,16 @@ dependencies = [ "lazy_static", "log", "mockall", - "mockito", + "mockito 1.5.0", "openssl", "rand 0.8.5", "regex", - "reqwest 0.12.2", - "rusoto_core", - "rusoto_dynamodb", + "reqwest 0.12.8", "sentry", "sentry-actix", "sentry-core", "serde", "serde_derive", - "serde_dynamodb", "serde_json", "slog", "slog-async", @@ -751,7 +766,7 @@ dependencies = [ "slog-term", "tempfile", "thiserror", - "tokio 1.37.0", + "tokio", "url", "uuid", "validator", @@ -763,21 +778,20 @@ dependencies = [ name = "autopush_common" version = "1.71.7" dependencies = [ - "actix-http", "actix-rt", "actix-web", "again", "async-trait", "backtrace", - "base64 0.22.0", + "base64 0.22.1", "cadence", "chrono", "config", "deadpool", + "derive_builder", "fernet", "form_urlencoded", "futures 0.3.30", - "futures-backoff", "futures-util", "gethostname", "google-cloud-rust-raw", @@ -785,26 +799,19 @@ dependencies = [ "grpcio-sys", "hex", "httparse", - "hyper 1.2.0", "lazy_static", - "log", "mockall", - "mockito", + "mockito 0.31.1", "num_cpus", "openssl", "protobuf", "rand 0.8.5", "regex", - "reqwest 0.12.2", - "rusoto_core", - "rusoto_credential", - "rusoto_dynamodb", + "reqwest 0.12.8", "sentry", "sentry-backtrace", - "sentry-core", "serde", "serde_derive", - "serde_dynamodb", "serde_json", "slog", "slog-async", @@ -815,10 +822,7 @@ dependencies = [ "slog-term", "tempfile", "thiserror", - "tokio 0.2.25", - "tokio 1.37.0", - "tokio-core", - "tungstenite", + "tokio", "url", "uuid", "woothee", @@ -826,9 +830,9 @@ dependencies = [ [[package]] name = "awc" -version = "3.4.0" +version = "3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68c09cc97310b926f01621faee652f3d1b0962545a3cec6c9ac07def9ea36c2c" +checksum = "79049b2461279b886e46f1107efc347ebecc7b88d74d023dda010551a124967b" dependencies = [ "actix-codec", "actix-http", @@ -836,60 +840,42 @@ dependencies = [ "actix-service", "actix-tls", "actix-utils", - "base64 0.21.7", - "bytes 1.6.0", - "cfg-if 1.0.0", + "base64 0.22.1", + "bytes", + "cfg-if", "cookie", "derive_more", "futures-core", "futures-util", "h2 0.3.26", "http 0.2.12", - "itoa 1.0.11", + "itoa", "log", "mime", "percent-encoding", - "pin-project-lite 0.2.14", + "pin-project-lite", "rand 0.8.5", "serde", "serde_json", "serde_urlencoded", - "tokio 1.37.0", + "tokio", ] [[package]] name = "backtrace" -version = "0.3.71" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] -[[package]] -name = "base64" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" - -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - -[[package]] -name = "base64" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5" - [[package]] name = "base64" version = "0.21.7" @@ -898,9 +884,9 @@ checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "bindgen" @@ -933,50 +919,20 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" dependencies = [ "serde", ] -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -dependencies = [ - "block-padding", - "byte-tools", - "byteorder", - "generic-array 0.12.4", -] - -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "generic-array 0.14.7", -] - [[package]] name = "block-buffer" version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "generic-array 0.14.7", -] - -[[package]] -name = "block-padding" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -dependencies = [ - "byte-tools", + "generic-array", ] [[package]] @@ -990,9 +946,9 @@ dependencies = [ [[package]] name = "brotli" -version = "3.5.0" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d640d25bc63c50fb1f0b545ffd80207d2e10a4c965530809b40ba3386825c391" +checksum = "74f7971dbd9326d58187408ab83117d8ac1bb9c17b085fdacd1cf2f598719b6b" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -1001,9 +957,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "2.5.1" +version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e2e4afe60d7dd600fdd3de8d0f08c2b7ec039712e3b6137ff98b7004e82de4f" +checksum = "9a45bd2e4095a8b518033b128020dd4a55aab1c0a381ba4404a472630f4bc362" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -1011,21 +967,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.15.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff69b9dd49fd426c69a0db9fc04dd934cdb6645ff000864d98f7e2af8830eaa" - -[[package]] -name = "byte-tools" -version = "0.3.1" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytebuffer" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7bfaf7cd08cacd74cdc6b521c37ac39cbc92692e5ab5c21ed5657a749b577c" +checksum = "2618ff1f07e000264e355904bff5c0527091fcbad883ff7f71383c5428915f4b" dependencies = [ "byteorder", ] @@ -1038,25 +988,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" -dependencies = [ - "byteorder", - "iovec", -] - -[[package]] -name = "bytes" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" - -[[package]] -name = "bytes" -version = "1.6.0" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" [[package]] name = "bytestring" @@ -1064,26 +998,27 @@ version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74d80203ea6b29df88012294f62733de21cfeab47f17b41af3a38bc30a03ee72" dependencies = [ - "bytes 1.6.0", + "bytes", ] [[package]] name = "cadence" -version = "1.3.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7611a627d6f459659e63e007fb6a01733a4dca558779fe019f66579068779d79" +checksum = "62fd689c825a93386a2ac05a46f88342c6df9ec3e79416f665650614e92e7475" dependencies = [ "crossbeam-channel", ] [[package]] name = "cc" -version = "1.0.90" +version = "1.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5" +checksum = "812acba72f0a070b003d3697490d2b55b837230ae7c6c6497f05cc2ddbb8d938" dependencies = [ "jobserver", "libc", + "shlex", ] [[package]] @@ -1095,12 +1030,6 @@ dependencies = [ "nom", ] -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -1109,24 +1038,23 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.37" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a0d04d43504c61aa6c7531f1871dd0d418d91130162063b789da00fd7057a5e" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", "js-sys", "num-traits", - "serde", "wasm-bindgen", - "windows-targets 0.52.4", + "windows-targets 0.52.6", ] [[package]] name = "clang-sys" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" dependencies = [ "glob", "libc", @@ -1148,29 +1076,20 @@ dependencies = [ "vec_map", ] -[[package]] -name = "cloudabi" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "cmake" -version = "0.1.50" +version = "0.1.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" +checksum = "fb1e43aa7fd152b1f968787f7dbcdeb306d1867ff373c69955211876c053f91a" dependencies = [ "cc", ] [[package]] name = "colorchoice" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" [[package]] name = "colored" @@ -1217,7 +1136,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" dependencies = [ - "getrandom 0.2.12", + "getrandom 0.2.15", "once_cell", "tiny-keccak", ] @@ -1260,90 +1179,42 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ "libc", ] [[package]] name = "crc32fast" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] name = "crossbeam-channel" -version = "0.5.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" -dependencies = [ - "crossbeam-utils 0.8.19", -] - -[[package]] -name = "crossbeam-deque" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20ff29ded3204c5106278a81a38f4b482636ed4fa1e6cfbeef193291beb29ed" -dependencies = [ - "crossbeam-epoch", - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset", - "scopeguard", -] - -[[package]] -name = "crossbeam-queue" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" -dependencies = [ - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - -[[package]] -name = "crossbeam-utils" -version = "0.7.2" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "lazy_static", + "crossbeam-utils", ] [[package]] name = "crossbeam-utils" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crunchy" @@ -1357,44 +1228,25 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array 0.14.7", + "generic-array", "typenum", ] -[[package]] -name = "crypto-mac" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" -dependencies = [ - "generic-array 0.14.7", - "subtle", -] - -[[package]] -name = "ct-logs" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1a816186fa68d9e426e3cb4ae4dff1fcd8e4a2c34b781bf7a822574a0d0aac8" -dependencies = [ - "sct 0.6.1", -] - [[package]] name = "ctor" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad291aa74992b9b7a7e88c38acbbf6ad7e107f1d90ee8775b7bc1fc3394f485c" +checksum = "edb49164822f3ee45b17acd4a208cfc1251410cf0cad9a833234c9890774dd9f" dependencies = [ "quote", - "syn 2.0.58", + "syn 2.0.79", ] [[package]] name = "darling" -version = "0.20.8" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54e36fcd13ed84ffdfda6f5be89b31287cbb80c439841fe69e04841435464391" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ "darling_core", "darling_macro", @@ -1402,27 +1254,27 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.8" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", - "syn 2.0.58", + "strsim 0.11.1", + "syn 2.0.79", ] [[package]] name = "darling_macro" -version = "0.20.8" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.58", + "syn 2.0.79", ] [[package]] @@ -1434,16 +1286,16 @@ dependencies = [ "async-trait", "deadpool-runtime", "num_cpus", - "tokio 1.37.0", + "tokio", ] [[package]] name = "deadpool-runtime" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63dfa964fe2a66f3fde91fc70b267fe193d822c7e603e2a675a49a7f46ad3f49" +checksum = "092966b41edc516079bdf31ec78a2e0588d1d0c08f78b91d8307215928642b2b" dependencies = [ - "tokio 1.37.0", + "tokio", ] [[package]] @@ -1467,34 +1319,47 @@ dependencies = [ ] [[package]] -name = "derive_more" -version = "0.99.17" +name = "derive_builder" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +checksum = "cd33f37ee6a119146a1781d3356a7c26028f83d779b2e04ecd45fdc75c76877b" dependencies = [ - "convert_case 0.4.0", + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7431fa049613920234f22c47fdc33e6cf3ee83067091ea4277a3f8c4587aae38" +dependencies = [ + "darling", "proc-macro2", "quote", - "rustc_version 0.4.0", - "syn 1.0.109", + "syn 2.0.79", ] [[package]] -name = "digest" -version = "0.8.1" +name = "derive_builder_macro" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +checksum = "4abae7035bf79b9877b779505d8cf3749285b80c43941eda66604841889451dc" dependencies = [ - "generic-array 0.12.4", + "derive_builder_core", + "syn 2.0.79", ] [[package]] -name = "digest" -version = "0.9.0" +name = "derive_more" +version = "0.99.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ - "generic-array 0.14.7", + "convert_case 0.4.0", + "proc-macro2", + "quote", + "rustc_version", + "syn 2.0.79", ] [[package]] @@ -1503,7 +1368,7 @@ version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "block-buffer 0.10.4", + "block-buffer", "crypto-common", ] @@ -1513,7 +1378,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "dirs-sys-next", ] @@ -1525,7 +1390,7 @@ checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" dependencies = [ "libc", "redox_users", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1557,24 +1422,24 @@ checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" [[package]] name = "either" -version = "1.10.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "encoding_rs" -version = "0.8.33" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] name = "env_filter" -version = "0.1.0" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a009aa4810eb158359dda09d0c87378e4bbb89b5a801f016885a4707ba24f7ea" +checksum = "4f2c92ceda6ceec50f43169f9ee8424fe2db276791afde7b2cd8bc084cb376ab" dependencies = [ "log", "regex", @@ -1595,9 +1460,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.11.3" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" +checksum = "e13fa619b91fb2381732789fc5de83b45675e882f66623b7d8cb4f643017018d" dependencies = [ "anstream", "anstyle", @@ -1623,35 +1488,29 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", "windows-sys 0.52.0", ] -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" - [[package]] name = "fastrand" -version = "2.0.2" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "fernet" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3364d69f691f3903b1a71605fa04f40a7c2d259f0f0512347e36d19a63debf1f" +checksum = "c66b725fe9483b9ee72ccaec072b15eb8ad95a3ae63a8c798d5748883b72fd33" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "byteorder", - "getrandom 0.2.12", + "getrandom 0.2.15", "openssl", "zeroize", ] @@ -1665,14 +1524,14 @@ dependencies = [ "cc", "lazy_static", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] name = "flate2" -version = "1.0.28" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" +checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" dependencies = [ "crc32fast", "miniz_oxide", @@ -1714,28 +1573,6 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" -[[package]] -name = "fuchsia-cprng" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" - -[[package]] -name = "fuchsia-zircon" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -dependencies = [ - "bitflags 1.3.2", - "fuchsia-zircon-sys", -] - -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" - [[package]] name = "futures" version = "0.1.31" @@ -1757,17 +1594,6 @@ dependencies = [ "futures-util", ] -[[package]] -name = "futures-backoff" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79f590345ee8cedf1f36068db74ca81c7e7bf753a3e59ea58b0826243a13971b" -dependencies = [ - "futures 0.1.31", - "futures-timer", - "rand 0.4.6", -] - [[package]] name = "futures-channel" version = "0.3.30" @@ -1809,7 +1635,7 @@ checksum = "45ec6fe3675af967e67c5536c0b9d44e34e6c52f86bedc4ea49c5317b8e94d06" dependencies = [ "futures-channel", "futures-task", - "tokio 1.37.0", + "tokio", ] [[package]] @@ -1820,7 +1646,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.79", ] [[package]] @@ -1835,15 +1661,6 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" -[[package]] -name = "futures-timer" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5cedfe9b6dc756220782cc1ba5bcb1fa091cdcba155e40d3556159c3db58043" -dependencies = [ - "futures 0.1.31", -] - [[package]] name = "futures-util" version = "0.3.30" @@ -1858,20 +1675,11 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.14", + "pin-project-lite", "pin-utils", "slab", ] -[[package]] -name = "generic-array" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" -dependencies = [ - "typenum", -] - [[package]] name = "generic-array" version = "0.14.7" @@ -1898,27 +1706,29 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] name = "getrandom" -version = "0.2.12" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", + "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] name = "gimli" -version = "0.28.1" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" [[package]] name = "glob" @@ -1948,7 +1758,7 @@ dependencies = [ "grpcio-sys", "libc", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "protobuf", ] @@ -1974,7 +1784,7 @@ version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ - "bytes 1.6.0", + "bytes", "fnv", "futures-core", "futures-sink", @@ -1982,26 +1792,26 @@ dependencies = [ "http 0.2.12", "indexmap", "slab", - "tokio 1.37.0", + "tokio", "tokio-util", "tracing", ] [[package]] name = "h2" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "816ec7294445779408f36fe57bc5b7fc1cf59664059096c65f905c1c61f58069" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" dependencies = [ - "bytes 1.6.0", + "atomic-waker", + "bytes", "fnv", "futures-core", "futures-sink", - "futures-util", "http 1.1.0", "indexmap", "slab", - "tokio 1.37.0", + "tokio", "tokio-util", "tracing", ] @@ -2014,15 +1824,15 @@ checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" [[package]] name = "hashbrown" -version = "0.14.3" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" [[package]] name = "heck" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hermit-abi" @@ -2040,20 +1850,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] -name = "hex" -version = "0.4.3" +name = "hermit-abi" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" [[package]] -name = "hmac" -version = "0.11.0" +name = "hex" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" -dependencies = [ - "crypto-mac", - "digest 0.9.0", -] +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "home" @@ -2072,18 +1878,18 @@ checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" dependencies = [ "libc", "match_cfg", - "winapi 0.3.9", + "winapi", ] [[package]] -name = "http" -version = "0.1.21" +name = "hostname" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6ccf5ede3a895d8856620237b2f02972c1bbc78d2965ad7fe8838d4a0ed41f0" +checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba" dependencies = [ - "bytes 0.4.12", - "fnv", - "itoa 0.4.8", + "cfg-if", + "libc", + "windows", ] [[package]] @@ -2092,9 +1898,9 @@ version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ - "bytes 1.6.0", + "bytes", "fnv", - "itoa 1.0.11", + "itoa", ] [[package]] @@ -2103,9 +1909,9 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ - "bytes 1.6.0", + "bytes", "fnv", - "itoa 1.0.11", + "itoa", ] [[package]] @@ -2114,39 +1920,39 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ - "bytes 1.6.0", + "bytes", "http 0.2.12", - "pin-project-lite 0.2.14", + "pin-project-lite", ] [[package]] name = "http-body" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ - "bytes 1.6.0", + "bytes", "http 1.1.0", ] [[package]] name = "http-body-util" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ - "bytes 1.6.0", - "futures-core", + "bytes", + "futures-util", "http 1.1.0", - "http-body 1.0.0", - "pin-project-lite 0.2.14", + "http-body 1.0.1", + "pin-project-lite", ] [[package]] name = "httparse" -version = "1.8.0" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -2162,11 +1968,11 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.28" +version = "0.14.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" dependencies = [ - "bytes 1.6.0", + "bytes", "futures-channel", "futures-core", "futures-util", @@ -2175,10 +1981,10 @@ dependencies = [ "http-body 0.4.6", "httparse", "httpdate", - "itoa 1.0.11", - "pin-project-lite 0.2.14", + "itoa", + "pin-project-lite", "socket2", - "tokio 1.37.0", + "tokio", "tower-service", "tracing", "want", @@ -2186,70 +1992,74 @@ dependencies = [ [[package]] name = "hyper" -version = "1.2.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "186548d73ac615b32a73aafe38fb4f56c0d340e110e5a200bcadbaf2e199263a" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" dependencies = [ - "bytes 1.6.0", + "bytes", "futures-channel", "futures-util", - "h2 0.4.4", + "h2 0.4.6", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "httparse", - "itoa 1.0.11", - "pin-project-lite 0.2.14", - "smallvec 1.13.2", - "tokio 1.37.0", + "httpdate", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", "want", ] [[package]] -name = "hyper-alpn" -version = "0.4.1" +name = "hyper-rustls" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bafa0c8f13da51d474d0c0933c7abe8c50e5f8c7ad72ea34ba998104d9190760" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ - "hyper 0.14.28", + "futures-util", + "http 0.2.12", + "hyper 0.14.30", "log", - "rustls 0.20.9", - "rustls-pemfile", - "tokio 1.37.0", - "tokio-rustls 0.23.4", - "webpki-roots", + "rustls 0.21.12", + "rustls-native-certs", + "tokio", + "tokio-rustls 0.24.1", ] [[package]] name = "hyper-rustls" -version = "0.22.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" +checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" dependencies = [ - "ct-logs", "futures-util", - "hyper 0.14.28", - "log", - "rustls 0.19.1", - "rustls-native-certs 0.5.0", - "tokio 1.37.0", - "tokio-rustls 0.22.0", - "webpki 0.21.4", + "http 1.1.0", + "hyper 1.4.1", + "hyper-util", + "rustls 0.22.4", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.25.0", + "tower-service", + "webpki-roots", ] [[package]] name = "hyper-rustls" -version = "0.24.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", - "http 0.2.12", - "hyper 0.14.28", - "log", - "rustls 0.21.11", - "rustls-native-certs 0.6.3", - "tokio 1.37.0", - "tokio-rustls 0.24.1", + "http 1.1.0", + "hyper 1.4.1", + "hyper-util", + "rustls 0.23.13", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.0", + "tower-service", ] [[package]] @@ -2258,10 +2068,10 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ - "bytes 1.6.0", - "hyper 0.14.28", + "bytes", + "hyper 0.14.30", "native-tls", - "tokio 1.37.0", + "tokio", "tokio-native-tls", ] @@ -2271,41 +2081,40 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ - "bytes 1.6.0", + "bytes", "http-body-util", - "hyper 1.2.0", + "hyper 1.4.1", "hyper-util", "native-tls", - "tokio 1.37.0", + "tokio", "tokio-native-tls", "tower-service", ] [[package]] name = "hyper-util" -version = "0.1.3" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" +checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" dependencies = [ - "bytes 1.6.0", + "bytes", "futures-channel", "futures-util", "http 1.1.0", - "http-body 1.0.0", - "hyper 1.2.0", - "pin-project-lite 0.2.14", + "http-body 1.0.1", + "hyper 1.4.1", + "pin-project-lite", "socket2", - "tokio 1.37.0", - "tower", + "tokio", "tower-service", "tracing", ] [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2348,73 +2157,55 @@ checksum = "206ca75c9c03ba3d4ace2460e57b189f39f43de612c2f85836e65c929701bb2d" [[package]] name = "indexmap" -version = "2.2.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.14.3", -] - -[[package]] -name = "input_buffer" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e1b822cc844905551931d6f81608ed5f50a79c1078a4e2b4d42dbc7c1eedfbf" -dependencies = [ - "bytes 0.4.12", + "hashbrown 0.15.0", ] [[package]] name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "iovec" -version = "0.1.4" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ - "libc", + "cfg-if", ] [[package]] name = "ipnet" -version = "2.9.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "is-terminal" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" +checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi 0.4.0", "libc", "windows-sys 0.52.0", ] +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + [[package]] name = "itertools" -version = "0.10.5" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" dependencies = [ "either", ] -[[package]] -name = "itoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" - [[package]] name = "itoa" version = "1.0.11" @@ -2423,18 +2214,18 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.28" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab46a6e9526ddef3ae7f787c06f0f2600639ba80ea3eade3d8e670a2230f51d6" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" dependencies = [ "wasm-bindgen", ] @@ -2452,28 +2243,19 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "8.3.0" +version = "9.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" +checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" dependencies = [ "base64 0.21.7", + "js-sys", "pem", - "ring 0.16.20", + "ring", "serde", "serde_json", "simple_asn1", ] -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - [[package]] name = "language-tags" version = "0.3.2" @@ -2482,9 +2264,9 @@ checksum = "d4345964bb142484797b161f473a503a434de77149dd8c7427788c6e13379388" [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "lazycell" @@ -2494,18 +2276,18 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.153" +version = "0.2.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" [[package]] name = "libloading" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" +checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ - "cfg-if 1.0.0", - "windows-targets 0.48.5", + "cfg-if", + "windows-targets 0.52.6", ] [[package]] @@ -2514,15 +2296,15 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "libc", ] [[package]] name = "libz-sys" -version = "1.1.16" +version = "1.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e143b5e666b2695d28f6bca6497720813f699c9602dd7f5cac91008b8ada7f9" +checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" dependencies = [ "cc", "libc", @@ -2538,9 +2320,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "local-channel" @@ -2561,18 +2343,9 @@ checksum = "4d873d7c67ce09b42110d801813efbc9364414e356be9935700d368351657487" [[package]] name = "lock_api" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" -dependencies = [ - "scopeguard", -] - -[[package]] -name = "lock_api" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -2580,9 +2353,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.21" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" [[package]] name = "match_cfg" @@ -2590,37 +2363,11 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - -[[package]] -name = "md-5" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5a279bb9607f9f53c22d496eade00d138d1bdcccd07d74650387cf94942a15" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "opaque-debug 0.3.1", -] - [[package]] name = "memchr" -version = "2.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" - -[[package]] -name = "memoffset" -version = "0.5.6" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" -dependencies = [ - "autocfg", -] +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "mime" @@ -2636,65 +2383,24 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" -dependencies = [ - "adler", -] - -[[package]] -name = "mio" -version = "0.6.23" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" dependencies = [ - "cfg-if 0.1.10", - "fuchsia-zircon", - "fuchsia-zircon-sys", - "iovec", - "kernel32-sys", - "libc", - "log", - "miow", - "net2", - "slab", - "winapi 0.2.8", + "adler2", ] [[package]] name = "mio" -version = "0.8.11" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ + "hermit-abi 0.3.9", "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.48.0", -] - -[[package]] -name = "mio-uds" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" -dependencies = [ - "iovec", - "libc", - "mio 0.6.23", -] - -[[package]] -name = "miow" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" -dependencies = [ - "kernel32-sys", - "net2", - "winapi 0.2.8", - "ws2_32-sys", + "windows-sys 0.52.0", ] [[package]] @@ -2703,7 +2409,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43766c2b5203b10de348ffe19f7e54564b64f3d6018ff7648d1e2d6d3a0f0a48" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "downcast", "fragile", "lazy_static", @@ -2718,10 +2424,10 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af7cbce79ec385a1d4f54baa90a76401eb15d9cab93685f62e7e9f942aa00ae2" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.79", ] [[package]] @@ -2742,24 +2448,47 @@ dependencies = [ "similar", ] +[[package]] +name = "mockito" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b34bd91b9e5c5b06338d392463e1318d683cf82ec3d3af4014609be6e2108d" +dependencies = [ + "assert-json-diff", + "bytes", + "colored", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", + "hyper-util", + "log", + "rand 0.8.5", + "regex", + "serde_json", + "serde_urlencoded", + "similar", + "tokio", +] + [[package]] name = "mozsvc-common" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51fba38c7ded23ca88a409f72277d177170b3eadb5e283741182fd3cae60ecdf" dependencies = [ - "hostname", + "hostname 0.3.1", "lazy_static", "reqwest 0.11.27", ] [[package]] name = "native-tls" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" dependencies = [ - "lazy_static", "libc", "log", "openssl", @@ -2771,17 +2500,6 @@ dependencies = [ "tempfile", ] -[[package]] -name = "net2" -version = "0.2.39" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b13b648036a2339d06de780866fbdfda0dde886de7b3af2ddeba8b14f4ee34ac" -dependencies = [ - "cfg-if 0.1.10", - "libc", - "winapi 0.3.9", -] - [[package]] name = "nom" version = "7.1.3" @@ -2794,11 +2512,10 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ - "autocfg", "num-integer", "num-traits", ] @@ -2820,9 +2537,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] @@ -2848,30 +2565,21 @@ dependencies = [ [[package]] name = "object" -version = "0.32.2" +version = "0.36.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" - -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" - -[[package]] -name = "opaque-debug" -version = "0.3.1" +version = "1.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" +checksum = "82881c4be219ab5faaf2ad5e5e5ecdff8c66bd7402ca3160975c93b24961afd1" +dependencies = [ + "portable-atomic", +] [[package]] name = "openssl" @@ -2879,8 +2587,8 @@ version = "0.10.66" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" dependencies = [ - "bitflags 2.5.0", - "cfg-if 1.0.0", + "bitflags 2.6.0", + "cfg-if", "foreign-types", "libc", "once_cell", @@ -2896,7 +2604,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.79", ] [[package]] @@ -2938,17 +2646,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "parking_lot" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" -dependencies = [ - "lock_api 0.3.4", - "parking_lot_core 0.6.3", - "rustc_version 0.2.3", -] - [[package]] name = "parking_lot" version = "0.11.2" @@ -2956,33 +2653,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", - "lock_api 0.4.11", + "lock_api", "parking_lot_core 0.8.6", ] [[package]] name = "parking_lot" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" -dependencies = [ - "lock_api 0.4.11", - "parking_lot_core 0.9.9", -] - -[[package]] -name = "parking_lot_core" -version = "0.6.3" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66b810a62be75176a80873726630147a5ca780cd33921e0b5709033e66b0a" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ - "cfg-if 0.1.10", - "cloudabi", - "libc", - "redox_syscall 0.1.57", - "rustc_version 0.2.3", - "smallvec 0.6.14", - "winapi 0.3.9", + "lock_api", + "parking_lot_core 0.9.10", ] [[package]] @@ -2991,32 +2673,32 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "instant", "libc", "redox_syscall 0.2.16", - "smallvec 1.13.2", - "winapi 0.3.9", + "smallvec", + "winapi", ] [[package]] name = "parking_lot_core" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "redox_syscall 0.4.1", - "smallvec 1.13.2", - "windows-targets 0.48.5", + "redox_syscall 0.5.7", + "smallvec", + "windows-targets 0.52.6", ] [[package]] name = "paste" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pathdiff" @@ -3032,11 +2714,12 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "1.1.1" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "base64 0.13.1", + "base64 0.22.1", + "serde", ] [[package]] @@ -3047,9 +2730,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.9" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" +checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" dependencies = [ "memchr", "thiserror", @@ -3058,9 +2741,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.9" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73541b156d32197eecda1a4014d7f868fd2bcb3c550d5386087cfba442bf69c" +checksum = "4d3a6e3394ec80feb3b6393c725571754c6188490265c61aaf260810d6b95aa0" dependencies = [ "pest", "pest_generator", @@ -3068,54 +2751,28 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.9" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c35eeed0a3fab112f75165fdc026b3913f4183133f19b49be773ac9ea966e8bd" +checksum = "94429506bde1ca69d1b5601962c73f4172ab4726571a59ea95931218cb0e930e" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.79", ] [[package]] name = "pest_meta" -version = "2.7.9" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2adbf29bb9776f28caece835398781ab24435585fe0d4dc1374a61db5accedca" +checksum = "ac8a071862e93690b6e34e9a5fb8e33ff3734473ac0245b27232222c4906a33f" dependencies = [ "once_cell", "pest", - "sha2 0.10.8", -] - -[[package]] -name = "pin-project" -version = "1.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" -dependencies = [ - "pin-project-internal", -] - -[[package]] -name = "pin-project-internal" -version = "1.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.58", + "sha2", ] -[[package]] -name = "pin-project-lite" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" - [[package]] name = "pin-project-lite" version = "0.2.14" @@ -3130,9 +2787,15 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" + +[[package]] +name = "portable-atomic" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" [[package]] name = "powerfmt" @@ -3142,15 +2805,18 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] [[package]] name = "predicates" -version = "3.1.0" +version = "3.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b87bfd4605926cdfefc1c3b5f8fe560e3feca9d5552cf68c466d3d8236c7e8" +checksum = "7e9086cc7640c29a356d1a29fd134380bee9d8f79a17410aa76e7ad295f42c97" dependencies = [ "anstyle", "predicates-core", @@ -3158,15 +2824,15 @@ dependencies = [ [[package]] name = "predicates-core" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b794032607612e7abeb4db69adb4e33590fa6cf1149e95fd7cb00e634b92f174" +checksum = "ae8177bee8e75d6846599c6b9ff679ed51e882816914eec639944d7c9aa11931" [[package]] name = "predicates-tree" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368ba315fb8c5052ab692e68a0eefec6ec57b23a36959c14496f0b0df2c0cecf" +checksum = "41b740d195ed3166cd147c8047ec98db0e22ec019eb8eeb76d343b795304fb13" dependencies = [ "predicates-core", "termtree", @@ -3198,9 +2864,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.79" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] @@ -3213,26 +2879,13 @@ checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" [[package]] name = "quote" -version = "1.0.35" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] -[[package]] -name = "rand" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" -dependencies = [ - "fuchsia-cprng", - "libc", - "rand_core 0.3.1", - "rdrand", - "winapi 0.3.9", -] - [[package]] name = "rand" version = "0.7.3" @@ -3277,21 +2930,6 @@ dependencies = [ "rand_core 0.6.4", ] -[[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -dependencies = [ - "rand_core 0.4.2", -] - -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" - [[package]] name = "rand_core" version = "0.5.1" @@ -3307,7 +2945,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.12", + "getrandom 0.2.15", ] [[package]] @@ -3319,21 +2957,6 @@ dependencies = [ "rand_core 0.5.1", ] -[[package]] -name = "rdrand" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "redox_syscall" -version = "0.1.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" - [[package]] name = "redox_syscall" version = "0.2.16" @@ -3345,29 +2968,29 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.4.1" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", ] [[package]] name = "redox_users" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom 0.2.12", + "getrandom 0.2.15", "libredox", "thiserror", ] [[package]] name = "regex" -version = "1.10.4" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" dependencies = [ "aho-corasick", "memchr", @@ -3377,20 +3000,26 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.6" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] +[[package]] +name = "regex-lite" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a" + [[package]] name = "regex-syntax" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" @@ -3399,14 +3028,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" dependencies = [ "base64 0.21.7", - "bytes 1.6.0", + "bytes", "encoding_rs", "futures-core", "futures-util", "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.28", + "hyper 0.14.30", "hyper-tls 0.5.0", "ipnet", "js-sys", @@ -3415,14 +3044,14 @@ dependencies = [ "native-tls", "once_cell", "percent-encoding", - "pin-project-lite 0.2.14", - "rustls-pemfile", + "pin-project-lite", + "rustls-pemfile 1.0.4", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper", - "system-configuration", - "tokio 1.37.0", + "sync_wrapper 0.1.2", + "system-configuration 0.5.1", + "tokio", "tokio-native-tls", "tower-service", "url", @@ -3434,21 +3063,22 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.2" +version = "0.12.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d66674f2b6fb864665eea7a3c1ac4e3dfacd2fda83cf6f935a612e01b0e3338" +checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" dependencies = [ - "base64 0.21.7", - "bytes 1.6.0", + "base64 0.22.1", + "bytes", "encoding_rs", "futures-channel", "futures-core", "futures-util", - "h2 0.4.4", + "h2 0.4.6", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", - "hyper 1.2.0", + "hyper 1.4.1", + "hyper-rustls 0.27.3", "hyper-tls 0.6.0", "hyper-util", "ipnet", @@ -3458,36 +3088,21 @@ dependencies = [ "native-tls", "once_cell", "percent-encoding", - "pin-project-lite 0.2.14", - "rustls-pemfile", + "pin-project-lite", + "rustls-pemfile 2.2.0", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper", - "system-configuration", - "tokio 1.37.0", + "sync_wrapper 1.0.1", + "system-configuration 0.6.1", + "tokio", "tokio-native-tls", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg", -] - -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin 0.5.2", - "untrusted 0.7.1", - "web-sys", - "winapi 0.3.9", + "windows-registry", ] [[package]] @@ -3497,11 +3112,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", - "cfg-if 1.0.0", - "getrandom 0.2.12", + "cfg-if", + "getrandom 0.2.15", "libc", - "spin 0.9.8", - "untrusted 0.9.0", + "spin", + "untrusted", "windows-sys 0.52.0", ] @@ -3512,109 +3127,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" dependencies = [ "base64 0.21.7", - "bitflags 2.5.0", + "bitflags 2.6.0", "serde", "serde_derive", ] [[package]] -name = "rusoto_core" -version = "0.47.0" +name = "rust-ini" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b4f000e8934c1b4f70adde180056812e7ea6b1a247952db8ee98c94cd3116cc" +checksum = "7e2a3bcec1f113553ef1c88aae6c020a369d03d55b58de9869a0908930385091" dependencies = [ - "async-trait", - "base64 0.13.1", - "bytes 1.6.0", - "crc32fast", - "futures 0.3.30", - "http 0.2.12", - "hyper 0.14.28", - "hyper-rustls 0.22.1", - "lazy_static", - "log", - "rusoto_credential", - "rusoto_signature", - "rustc_version 0.4.0", - "serde", - "serde_json", - "tokio 1.37.0", - "xml-rs", -] - -[[package]] -name = "rusoto_credential" -version = "0.47.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a46b67db7bb66f5541e44db22b0a02fed59c9603e146db3a9e633272d3bac2f" -dependencies = [ - "async-trait", - "chrono", - "dirs-next", - "futures 0.3.30", - "hyper 0.14.28", - "serde", - "serde_json", - "shlex", - "tokio 1.37.0", - "zeroize", -] - -[[package]] -name = "rusoto_dynamodb" -version = "0.47.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7935e1f9ca57c4ee92a4d823dcd698eb8c992f7e84ca21976ae72cd2b03016e7" -dependencies = [ - "async-trait", - "bytes 1.6.0", - "futures 0.3.30", - "rusoto_core", - "serde", - "serde_json", -] - -[[package]] -name = "rusoto_signature" -version = "0.47.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6264e93384b90a747758bcc82079711eacf2e755c3a8b5091687b5349d870bcc" -dependencies = [ - "base64 0.13.1", - "bytes 1.6.0", - "chrono", - "digest 0.9.0", - "futures 0.3.30", - "hex", - "hmac", - "http 0.2.12", - "hyper 0.14.28", - "log", - "md-5", - "percent-encoding", - "pin-project-lite 0.2.14", - "rusoto_credential", - "rustc_version 0.4.0", - "serde", - "sha2 0.9.9", - "tokio 1.37.0", -] - -[[package]] -name = "rust-ini" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e2a3bcec1f113553ef1c88aae6c020a369d03d55b58de9869a0908930385091" -dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "ordered-multimap", ] [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" @@ -3624,29 +3156,20 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = [ - "semver 0.9.0", -] - -[[package]] -name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver 1.0.22", + "semver", ] [[package]] name = "rustix" -version = "0.38.32" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "errno", "libc", "linux-raw-sys", @@ -3655,51 +3178,41 @@ dependencies = [ [[package]] name = "rustls" -version = "0.19.1" +version = "0.21.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ - "base64 0.13.1", "log", - "ring 0.16.20", - "sct 0.6.1", - "webpki 0.21.4", + "ring", + "rustls-webpki 0.101.7", + "sct", ] [[package]] name = "rustls" -version = "0.20.9" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b80e3dec595989ea8510028f30c408a4630db12c9cbb8de34203b89d6577e99" +checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" dependencies = [ "log", - "ring 0.16.20", - "sct 0.7.1", - "webpki 0.22.4", + "ring", + "rustls-pki-types", + "rustls-webpki 0.102.8", + "subtle", + "zeroize", ] [[package]] name = "rustls" -version = "0.21.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" -dependencies = [ - "log", - "ring 0.17.8", - "rustls-webpki", - "sct 0.7.1", -] - -[[package]] -name = "rustls-native-certs" -version = "0.5.0" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" +checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" dependencies = [ - "openssl-probe", - "rustls 0.19.1", - "schannel", - "security-framework", + "once_cell", + "rustls-pki-types", + "rustls-webpki 0.102.8", + "subtle", + "zeroize", ] [[package]] @@ -3709,7 +3222,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" dependencies = [ "openssl-probe", - "rustls-pemfile", + "rustls-pemfile 1.0.4", "schannel", "security-framework", ] @@ -3723,27 +3236,53 @@ dependencies = [ "base64 0.21.7", ] +[[package]] +name = "rustls-pemfile" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" + [[package]] name = "rustls-webpki" version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.8", - "untrusted 0.9.0", + "ring", + "untrusted", +] + +[[package]] +name = "rustls-webpki" +version = "0.102.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", ] [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" [[package]] name = "ryu" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "same-file" @@ -3756,43 +3295,27 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] -[[package]] -name = "scoped-tls" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "332ffa32bf586782a3efaeb58f127980944bbc8c4d6913a86107ac2a5ab24b28" - [[package]] name = "scopeguard" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sct" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" -dependencies = [ - "ring 0.16.20", - "untrusted 0.7.1", -] - [[package]] name = "sct" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.17.8", - "untrusted 0.9.0", + "ring", + "untrusted", ] [[package]] @@ -3803,11 +3326,11 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] name = "security-framework" -version = "2.10.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", "core-foundation", "core-foundation-sys", "libc", @@ -3816,9 +3339,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.10.0" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" dependencies = [ "core-foundation-sys", "libc", @@ -3826,50 +3349,35 @@ dependencies = [ [[package]] name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver" -version = "1.0.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" - -[[package]] -name = "semver-parser" -version = "0.7.0" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "sentry" -version = "0.32.2" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "766448f12e44d68e675d5789a261515c46ac6ccd240abdd451a9c46c84a49523" +checksum = "00421ed8fa0c995f07cde48ba6c89e80f2b312f74ff637326f392fbfd23abe02" dependencies = [ "httpdate", "log", "native-tls", - "reqwest 0.11.27", + "reqwest 0.12.8", "sentry-backtrace", "sentry-contexts", "sentry-core", "sentry-debug-images", "sentry-panic", "sentry-tracing", - "tokio 1.37.0", + "tokio", "ureq", ] [[package]] name = "sentry-actix" -version = "0.32.2" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "285f2c101ed64f00585ec98f0d81a402c1cf5c2f9b1e1d53a84901812bcd4530" +checksum = "d1986312ea8425a28299262ead2483ca8f0e167994f9239848d5718041abcd49" dependencies = [ "actix-web", "futures-util", @@ -3878,9 +3386,9 @@ dependencies = [ [[package]] name = "sentry-backtrace" -version = "0.32.2" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32701cad8b3c78101e1cd33039303154791b0ff22e7802ed8cc23212ef478b45" +checksum = "a79194074f34b0cbe5dd33896e5928bbc6ab63a889bd9df2264af5acb186921e" dependencies = [ "backtrace", "once_cell", @@ -3890,23 +3398,23 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.32.2" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ddd2a91a13805bd8dab4ebf47323426f758c35f7bf24eacc1aded9668f3824" +checksum = "eba8870c5dba2bfd9db25c75574a11429f6b95957b0a78ac02e2970dd7a5249a" dependencies = [ - "hostname", + "hostname 0.4.0", "libc", "os_info", - "rustc_version 0.4.0", + "rustc_version", "sentry-core", "uname", ] [[package]] name = "sentry-core" -version = "0.32.2" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1189f68d7e7e102ef7171adf75f83a59607fafd1a5eecc9dc06c026ff3bdec4" +checksum = "46a75011ea1c0d5c46e9e57df03ce81f5c7f0a9e199086334a1f9c0a541e0826" dependencies = [ "log", "once_cell", @@ -3918,9 +3426,9 @@ dependencies = [ [[package]] name = "sentry-debug-images" -version = "0.32.2" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4d0a615e5eeca5699030620c119a094e04c14cf6b486ea1030460a544111a7" +checksum = "7ec2a486336559414ab66548da610da5e9626863c3c4ffca07d88f7dc71c8de8" dependencies = [ "findshlibs", "once_cell", @@ -3929,9 +3437,9 @@ dependencies = [ [[package]] name = "sentry-panic" -version = "0.32.2" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1c18d0b5fba195a4950f2f4c31023725c76f00aabb5840b7950479ece21b5ca" +checksum = "2eaa3ecfa3c8750c78dcfd4637cfa2598b95b52897ed184b4dc77fcf7d95060d" dependencies = [ "sentry-backtrace", "sentry-core", @@ -3939,9 +3447,9 @@ dependencies = [ [[package]] name = "sentry-tracing" -version = "0.32.2" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3012699a9957d7f97047fd75d116e22d120668327db6e7c59824582e16e791b2" +checksum = "f715932bf369a61b7256687c6f0554141b7ce097287e30e3f7ed6e9de82498fe" dependencies = [ "sentry-backtrace", "sentry-core", @@ -3951,9 +3459,9 @@ dependencies = [ [[package]] name = "sentry-types" -version = "0.32.2" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7173fd594569091f68a7c37a886e202f4d0c1db1e1fa1d18a051ba695b2e2ec" +checksum = "4519c900ce734f7a0eb7aba0869dfb225a7af8820634a7dd51449e3b093cfb7c" dependencies = [ "debugid", "hex", @@ -3968,51 +3476,41 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.197" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.197" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", -] - -[[package]] -name = "serde_dynamodb" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0a485bf5e371e28d4978444686ffa91830d4719b92fb31960fbd8f494816239" -dependencies = [ - "bytes 1.6.0", - "rusoto_dynamodb", - "serde", + "syn 2.0.79", ] [[package]] name = "serde_json" -version = "1.0.115" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ - "itoa 1.0.11", + "itoa", + "memchr", "ryu", "serde", ] [[package]] name = "serde_spanned" -version = "0.6.5" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ "serde", ] @@ -4024,45 +3522,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.11", + "itoa", "ryu", "serde", ] -[[package]] -name = "sha-1" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df" -dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", -] - [[package]] name = "sha1" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.10.7", -] - -[[package]] -name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", - "digest 0.9.0", - "opaque-debug 0.3.1", + "digest", ] [[package]] @@ -4071,9 +3544,9 @@ version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", - "digest 0.10.7", + "digest", ] [[package]] @@ -4084,18 +3557,18 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] [[package]] name = "similar" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa42c91313f1d05da9b26f267f931cf178d4aba455b4c4622dd7355eb80c6640" +checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" [[package]] name = "simple_asn1" @@ -4201,15 +3674,6 @@ dependencies = [ "time", ] -[[package]] -name = "smallvec" -version = "0.6.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" -dependencies = [ - "maybe-uninit", -] - [[package]] name = "smallvec" version = "1.13.2" @@ -4218,20 +3682,14 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", ] -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - [[package]] name = "spin" version = "0.9.8" @@ -4250,33 +3708,39 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + [[package]] name = "strum" -version = "0.26.2" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" dependencies = [ "strum_macros", ] [[package]] name = "strum_macros" -version = "0.26.2" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ "heck", "proc-macro2", "quote", "rustversion", - "syn 2.0.58", + "syn 2.0.79", ] [[package]] name = "subtle" -version = "2.4.1" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" @@ -4285,15 +3749,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", - "quote", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.58" +version = "2.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" +checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" dependencies = [ "proc-macro2", "quote", @@ -4306,6 +3769,15 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +dependencies = [ + "futures-core", +] + [[package]] name = "system-configuration" version = "0.5.1" @@ -4314,7 +3786,18 @@ checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags 1.3.2", "core-foundation", - "system-configuration-sys", + "system-configuration-sys 0.5.0", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.6.0", + "core-foundation", + "system-configuration-sys 0.6.0", ] [[package]] @@ -4327,6 +3810,16 @@ dependencies = [ "libc", ] +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "take_mut" version = "0.2.2" @@ -4335,14 +3828,15 @@ checksum = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60" [[package]] name = "tempfile" -version = "3.10.1" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "fastrand", + "once_cell", "rustix", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -4353,7 +3847,7 @@ checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f" dependencies = [ "dirs-next", "rustversion", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -4382,22 +3876,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.58" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.58" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.79", ] [[package]] @@ -4406,18 +3900,18 @@ version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", ] [[package]] name = "time" -version = "0.3.34" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", - "itoa 1.0.11", + "itoa", "libc", "num-conv", "num_threads", @@ -4435,9 +3929,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", @@ -4454,9 +3948,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.6.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" dependencies = [ "tinyvec_macros", ] @@ -4469,150 +3963,31 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "0.1.22" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "mio 0.6.23", - "num_cpus", - "tokio-codec", - "tokio-current-thread", - "tokio-executor", - "tokio-fs", - "tokio-io", - "tokio-reactor", - "tokio-sync", - "tokio-tcp", - "tokio-threadpool", - "tokio-timer", - "tokio-udp", - "tokio-uds", -] - -[[package]] -name = "tokio" -version = "0.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092" -dependencies = [ - "bytes 0.5.6", - "pin-project-lite 0.1.12", - "tokio-macros 0.2.6", -] - -[[package]] -name = "tokio" -version = "1.37.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", - "bytes 1.6.0", + "bytes", "libc", - "mio 0.8.11", - "num_cpus", - "parking_lot 0.12.1", - "pin-project-lite 0.2.14", + "mio", + "parking_lot 0.12.3", + "pin-project-lite", "signal-hook-registry", "socket2", - "tokio-macros 2.2.0", - "windows-sys 0.48.0", -] - -[[package]] -name = "tokio-codec" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25b2998660ba0e70d18684de5d06b70b70a3a747469af9dea7618cc59e75976b" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "tokio-io", -] - -[[package]] -name = "tokio-core" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87b1395334443abca552f63d4f61d0486f12377c2ba8b368e523f89e828cffd4" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "iovec", - "log", - "mio 0.6.23", - "scoped-tls", - "tokio 0.1.22", - "tokio-executor", - "tokio-io", - "tokio-reactor", - "tokio-timer", -] - -[[package]] -name = "tokio-current-thread" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e" -dependencies = [ - "futures 0.1.31", - "tokio-executor", -] - -[[package]] -name = "tokio-executor" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb2d1b8f4548dbf5e1f7818512e9c406860678f29c300cdf0ebac72d1a3a1671" -dependencies = [ - "crossbeam-utils 0.7.2", - "futures 0.1.31", -] - -[[package]] -name = "tokio-fs" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "297a1206e0ca6302a0eed35b700d292b275256f596e2f3fea7729d5e629b6ff4" -dependencies = [ - "futures 0.1.31", - "tokio-io", - "tokio-threadpool", -] - -[[package]] -name = "tokio-io" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "log", -] - -[[package]] -name = "tokio-macros" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", + "tokio-macros", + "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.79", ] [[package]] @@ -4622,48 +3997,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ "native-tls", - "tokio 1.37.0", -] - -[[package]] -name = "tokio-reactor" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09bc590ec4ba8ba87652da2068d150dcada2cfa2e07faae270a5e0409aa51351" -dependencies = [ - "crossbeam-utils 0.7.2", - "futures 0.1.31", - "lazy_static", - "log", - "mio 0.6.23", - "num_cpus", - "parking_lot 0.9.0", - "slab", - "tokio-executor", - "tokio-io", - "tokio-sync", -] - -[[package]] -name = "tokio-rustls" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" -dependencies = [ - "rustls 0.19.1", - "tokio 1.37.0", - "webpki 0.21.4", -] - -[[package]] -name = "tokio-rustls" -version = "0.23.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" -dependencies = [ - "rustls 0.20.9", - "tokio 1.37.0", - "webpki 0.22.4", + "tokio", ] [[package]] @@ -4672,115 +4006,50 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.11", - "tokio 1.37.0", -] - -[[package]] -name = "tokio-sync" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edfe50152bc8164fcc456dab7891fa9bf8beaf01c5ee7e1dd43a397c3cf87dee" -dependencies = [ - "fnv", - "futures 0.1.31", -] - -[[package]] -name = "tokio-tcp" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98df18ed66e3b72e742f185882a9e201892407957e45fbff8da17ae7a7c51f72" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "iovec", - "mio 0.6.23", - "tokio-io", - "tokio-reactor", + "rustls 0.21.12", + "tokio", ] [[package]] -name = "tokio-threadpool" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df720b6581784c118f0eb4310796b12b1d242a7eb95f716a8367855325c25f89" -dependencies = [ - "crossbeam-deque", - "crossbeam-queue", - "crossbeam-utils 0.7.2", - "futures 0.1.31", - "lazy_static", - "log", - "num_cpus", - "slab", - "tokio-executor", -] - -[[package]] -name = "tokio-timer" -version = "0.2.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93044f2d313c95ff1cb7809ce9a7a05735b012288a888b62d4434fd58c94f296" -dependencies = [ - "crossbeam-utils 0.7.2", - "futures 0.1.31", - "slab", - "tokio-executor", -] - -[[package]] -name = "tokio-udp" -version = "0.1.6" +name = "tokio-rustls" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "log", - "mio 0.6.23", - "tokio-codec", - "tokio-io", - "tokio-reactor", + "rustls 0.22.4", + "rustls-pki-types", + "tokio", ] [[package]] -name = "tokio-uds" -version = "0.2.7" +name = "tokio-rustls" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab57a4ac4111c8c9dbcf70779f6fc8bc35ae4b2454809febac840ad19bd7e4e0" +checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "iovec", - "libc", - "log", - "mio 0.6.23", - "mio-uds", - "tokio-codec", - "tokio-io", - "tokio-reactor", + "rustls 0.23.13", + "rustls-pki-types", + "tokio", ] [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ - "bytes 1.6.0", + "bytes", "futures-core", "futures-sink", - "pin-project-lite 0.2.14", - "tokio 1.37.0", - "tracing", + "pin-project-lite", + "tokio", ] [[package]] name = "toml" -version = "0.8.12" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" dependencies = [ "serde", "serde_spanned", @@ -4790,18 +4059,18 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.5" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.22.9" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e40bb779c5187258fd7aad0eb68cb8706a0a81fa712fbea808ab43c4b8374c4" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ "indexmap", "serde", @@ -4810,33 +4079,11 @@ dependencies = [ "winnow", ] -[[package]] -name = "tower" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" -dependencies = [ - "futures-core", - "futures-util", - "pin-project", - "pin-project-lite 0.2.14", - "tokio 1.37.0", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tower-layer" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" - [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" @@ -4845,7 +4092,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ "log", - "pin-project-lite 0.2.14", + "pin-project-lite", "tracing-core", ] @@ -4874,25 +4121,6 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" -[[package]] -name = "tungstenite" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a0c2bd5aeb7dcd2bb32e472c8872759308495e5eccc942e929a513cd8d36110" -dependencies = [ - "base64 0.11.0", - "byteorder", - "bytes 0.4.12", - "http 0.1.21", - "httparse", - "input_buffer", - "log", - "rand 0.7.3", - "sha-1", - "url", - "utf-8", -] - [[package]] name = "typenum" version = "1.17.0" @@ -4901,9 +4129,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "ucd-trie" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" [[package]] name = "uname" @@ -4916,42 +4144,36 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-width" -version = "0.1.11" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" - -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "untrusted" @@ -4961,11 +4183,11 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "2.9.6" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11f214ce18d8b2cbe84ed3aa6486ed3f5b285cf8d8fbdbce9f3f767a724adc35" +checksum = "b74fc6b57825be3373f7054754755f03ac3a8f5d70015ccad699ba2029956f4a" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "log", "native-tls", "once_cell", @@ -4974,9 +4196,9 @@ dependencies = [ [[package]] name = "url" -version = "2.5.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna", @@ -4984,33 +4206,27 @@ dependencies = [ "serde", ] -[[package]] -name = "utf-8" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" - [[package]] name = "utf8parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" dependencies = [ - "getrandom 0.2.12", + "getrandom 0.2.15", "serde", ] [[package]] name = "validator" -version = "0.17.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da339118f018cc70ebf01fafc103360528aad53717e4bf311db929cb01cb9345" +checksum = "db79c75af171630a3148bd3e6d7c4f42b6a9a014c2945bc5ed0020cbb8d9478e" dependencies = [ "idna", "once_cell", @@ -5023,16 +4239,16 @@ dependencies = [ [[package]] name = "validator_derive" -version = "0.17.0" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76e88ea23b8f5e59230bff8a2f03c0ee0054a61d5b8343a38946bcd406fe624c" +checksum = "df0bcf92720c40105ac4b2dda2a4ea3aa717d4d6a862cc217da653a4bd5c6b10" dependencies = [ "darling", + "once_cell", "proc-macro-error", "proc-macro2", "quote", - "regex", - "syn 2.0.58", + "syn 2.0.79", ] [[package]] @@ -5055,9 +4271,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "walkdir" @@ -5092,36 +4308,37 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", + "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.79", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -5129,9 +4346,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -5139,22 +4356,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.79", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "wasm-timer" @@ -5173,41 +4390,21 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" dependencies = [ "js-sys", "wasm-bindgen", ] -[[package]] -name = "webpki" -version = "0.21.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" -dependencies = [ - "ring 0.16.20", - "untrusted 0.7.1", -] - -[[package]] -name = "webpki" -version = "0.22.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53" -dependencies = [ - "ring 0.17.8", - "untrusted 0.9.0", -] - [[package]] name = "webpki-roots" -version = "0.22.6" +version = "0.26.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" +checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" dependencies = [ - "webpki 0.22.4", + "rustls-pki-types", ] [[package]] @@ -5222,12 +4419,6 @@ dependencies = [ "rustix", ] -[[package]] -name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" - [[package]] name = "winapi" version = "0.3.9" @@ -5238,12 +4429,6 @@ dependencies = [ "winapi-x86_64-pc-windows-gnu", ] -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" - [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" @@ -5252,11 +4437,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.6" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "winapi 0.3.9", + "windows-sys 0.59.0", ] [[package]] @@ -5265,13 +4450,53 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" +dependencies = [ + "windows-core", + "windows-targets 0.52.6", +] + [[package]] name = "windows-core" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.4", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-registry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", ] [[package]] @@ -5289,7 +4514,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.4", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", ] [[package]] @@ -5309,17 +4543,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.4", - "windows_aarch64_msvc 0.52.4", - "windows_i686_gnu 0.52.4", - "windows_i686_msvc 0.52.4", - "windows_x86_64_gnu 0.52.4", - "windows_x86_64_gnullvm 0.52.4", - "windows_x86_64_msvc 0.52.4", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -5330,9 +4565,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -5342,9 +4577,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -5354,9 +4589,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.4" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -5366,9 +4607,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -5378,9 +4619,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -5390,9 +4631,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -5402,15 +4643,15 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.5" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dffa400e67ed5a4dd237983829e66475f0a4a26938c4b04c21baede6262215b8" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -5421,7 +4662,7 @@ version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "windows-sys 0.48.0", ] @@ -5435,22 +4676,6 @@ dependencies = [ "regex", ] -[[package]] -name = "ws2_32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - -[[package]] -name = "xml-rs" -version = "0.8.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791978798f0597cfc70478424c2b4fdc2b7a8024aaff78497ef00f24ef674193" - [[package]] name = "yaml-rust" version = "0.4.5" @@ -5462,56 +4687,57 @@ dependencies = [ [[package]] name = "yup-oauth2" -version = "8.3.1" +version = "8.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24bea7df5a9a74a9a0de92f22e5ab3fb9505dd960c7f1f00de5b7231d9d97206" +checksum = "b61da40aeb0907a65f7fb5c1de83c5a224d6a9ebb83bf918588a2bb744d636b8" dependencies = [ "anyhow", "async-trait", - "base64 0.13.1", + "base64 0.21.7", "futures 0.3.30", "http 0.2.12", - "hyper 0.14.28", + "hyper 0.14.30", "hyper-rustls 0.24.2", "itertools", "log", "percent-encoding", - "rustls 0.21.11", - "rustls-pemfile", + "rustls 0.22.4", + "rustls-pemfile 1.0.4", "seahash", "serde", "serde_json", "time", - "tokio 1.37.0", + "tokio", "tower-service", "url", ] [[package]] name = "zerocopy" -version = "0.7.32" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ + "byteorder", "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.32" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.79", ] [[package]] name = "zeroize" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" dependencies = [ "zeroize_derive", ] @@ -5524,32 +4750,32 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.79", ] [[package]] name = "zstd" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d789b1514203a1120ad2429eae43a7bd32b90976a7bb8a05f7ec02fa88cc23a" +checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "7.1.0" +version = "7.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd99b45c6bc03a018c8b8a86025678c87e55526064e38f9df301989dce7ec0a" +checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.10+zstd.1.5.6" +version = "2.0.13+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c253a4914af5bafc8fa8c86ee400827e83cf6ec01195ec1f1ed8441bf00d65aa" +checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index 0dd1bcdc8..53ca3b5b6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,8 +1,6 @@ [workspace] members = [ "autopush-common", - # Pending removal: https://github.com/mozilla-services/autopush-rs/issues/524 - #"autopush", "autoendpoint", "autoconnect", "autoconnect/autoconnect-common", @@ -70,13 +68,6 @@ openssl = { version = "0.10" } rand = "0.8" regex = "1.4" reqwest = { version = "0.12", features = ["json", "blocking"] } -rusoto_core = { version = "0.47", default-features = false, features = [ - "rustls", -] } # locked by serde_dynamodb 0.9 -rusoto_credential = { version = "0.47" } # locked by serde_dynamodb 0.9 -rusoto_dynamodb = { version = "0.47", default-features = false, features = [ - "rustls", -] } # locked by serde_dynamodb 0.9 sentry = { version = "0.32", features = [ "debug-logs", ] } # Using debug-logs avoids https://github.com/getsentry/sentry-rust/issues/237 @@ -85,7 +76,6 @@ sentry-actix = "0.32" sentry-backtrace = "0.32" serde = "1.0" serde_derive = "1.0" -serde_dynamodb = "0.9" serde_json = "1.0" slog = { version = "2.7", features = [ "dynamic-keys", @@ -99,13 +89,11 @@ slog-scope = "4.4" slog-stdlog = "4.1" slog-term = "2.6" thiserror = "1.0" -tokio = "1.36" +tokio = "1.38" tokio-compat-02 = "0.2" tokio-core = "0.1" tokio-io = "0.1" tokio-openssl = "0.6" -# Use older version of tungstenite to support legacy connection server. -tungstenite = { version = "0.9.2", default-features = false } # 0.10+ requires tokio 0.3+ uuid = { version = "1.1", features = ["serde", "v4"] } url = "2.2" @@ -115,7 +103,7 @@ autoconnect_settings = { path = "./autoconnect/autoconnect-settings" } autoconnect_web = { path = "./autoconnect/autoconnect-web" } autoconnect_ws = { path = "./autoconnect/autoconnect-ws" } autoconnect_ws_clientsm = { path = "./autoconnect/autoconnect-ws/autoconnect-ws-clientsm" } -autopush_common = { path = "./autopush-common", features = ["dual"] } +autopush_common = { path = "./autopush-common", features = ["bigtable"] } [profile.release] debug = 1 diff --git a/Dockerfile b/Dockerfile index a458bc565..21e3ffa93 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ # NOTE: Ensure builder's Rust version matches CI's in .circleci/config.yml -# RUST_VER -FROM rust:1.76-buster as builder +# RUST_VER +FROM rust:1.81-bookworm AS builder ARG CRATE ADD . /app @@ -19,9 +19,9 @@ RUN \ cargo install --path $CRATE --locked --root /app -FROM debian:buster-slim +FROM debian:bookworm-slim ARG BINARY -# FROM debian:buster # for debugging docker build +# FROM debian:bookworm # for debugging docker build RUN \ groupadd --gid 10001 app && \ useradd --uid 10001 --gid 10001 --home /app --create-home app && \ diff --git a/Makefile b/Makefile index 08c341ee0..0842428de 100644 --- a/Makefile +++ b/Makefile @@ -13,11 +13,6 @@ FLAKE8_CONFIG := $(TESTS_DIR)/.flake8 LOCUST_HOST := "wss://autoconnect.stage.mozaws.net" INSTALL_STAMP := .install.stamp -.PHONY: ddb - -ddb: - mkdir $@ - curl -sSL http://dynamodb-local.s3-website-us-west-2.amazonaws.com/dynamodb_local_latest.tar.gz | tar xzvC $@ .PHONY: install install: $(INSTALL_STAMP) ## Install dependencies with poetry diff --git a/README.md b/README.md index 3a037e853..14c566c25 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ [![docs][docs-badge]][docs] [![autoconnect API][autoconnect-API-docs-badge]][autoconnect-API-docs] [![autoendpoint API][autoendpoint-API-docs-badge]][autoendpoint-API-docs] -[![Connect to Matrix via the Riot webapp][matrix-badge]][matrix] +[![Open #push Matrix room in chat.mozilla.org web client][matrix-badge]][matrix] # Autopush-rs @@ -27,11 +27,11 @@ code files directly. ## Debugging on Mobile -Mobile devices can specify the Push Server URL via the "[secret settings](https://github.com/mozilla-mobile/fenix/wiki/%22Secret-settings%22-debug-menu-instructions)". +Mobile devices can specify the Push Server URL via the "[secret settings](https://firefox-source-docs.mozilla.org/mobile/android/fenix/Secret-settings-debug-menu-instructions.html)". _Do not use the mobile `about:config` menu settings. These are not read or used by the mobile browser._ -The secret settings can be activatedby following [these instructions](https://github.com/mozilla-mobile/fenix/wiki/%22Secret-settings%22-debug-menu-instructions). Once the secret menu is active, select +The secret settings can be activatedby following [these instructions](https://firefox-source-docs.mozilla.org/mobile/android/fenix/Secret-settings-debug-menu-instructions.html). Once the secret menu is active, select **Sync Debug** from the the mobile **Settings** menu, and specify the **Custom Push server** URL. **NOTE:** the default Push server url is `wss://push.services.mozilla.com/` diff --git a/autoconnect/Cargo.toml b/autoconnect/Cargo.toml index 4abe06919..c0ba4ba27 100644 --- a/autoconnect/Cargo.toml +++ b/autoconnect/Cargo.toml @@ -53,9 +53,7 @@ actix-service = "2.0" docopt = "1.1" [features] -default = ["dual", "emulator"] +default = ["bigtable"] bigtable = ["autopush_common/bigtable", "autoconnect_settings/bigtable"] -dynamodb = ["autopush_common/dynamodb", "autoconnect_settings/dynamodb"] -dual = ["bigtable", "dynamodb"] emulator = ["bigtable"] log_vapid = [] diff --git a/autoconnect/autoconnect-common/src/protocol.rs b/autoconnect/autoconnect-common/src/protocol.rs index ac021ddfa..7936a5d60 100644 --- a/autoconnect/autoconnect-common/src/protocol.rs +++ b/autoconnect/autoconnect-common/src/protocol.rs @@ -36,9 +36,7 @@ pub enum ClientMessage { Hello { uaid: Option, #[serde(rename = "channelIDs", skip_serializing_if = "Option::is_none")] - channel_ids: Option>, - #[serde(skip_serializing_if = "Option::is_none")] - use_webpush: Option, + _channel_ids: Option>, #[serde(skip_serializing_if = "Option::is_none")] broadcasts: Option>, }, @@ -95,8 +93,8 @@ pub enum ServerMessage { Hello { uaid: String, status: u32, - #[serde(skip_serializing_if = "Option::is_none")] - use_webpush: Option, + // This is required for output, but will always be "true" + use_webpush: bool, broadcasts: HashMap, }, diff --git a/autoconnect/autoconnect-common/src/test_support.rs b/autoconnect/autoconnect-common/src/test_support.rs index 0882ea019..760542de0 100644 --- a/autoconnect/autoconnect-common/src/test_support.rs +++ b/autoconnect/autoconnect-common/src/test_support.rs @@ -22,11 +22,7 @@ pub const CURRENT_MONTH: &str = "message_2018_06"; /// Return a simple MockDbClient that responds to hello (once) with a new uaid. pub fn hello_db() -> MockDbClient { - let mut db = MockDbClient::new(); - db.expect_rotating_message_table() - .times(1) - .return_const(Some(CURRENT_MONTH)); - db + MockDbClient::new() } /// Return a simple MockDbClient that responds to hello (once) with the @@ -34,17 +30,14 @@ pub fn hello_db() -> MockDbClient { pub fn hello_again_db(uaid: Uuid) -> MockDbClient { let mut db = MockDbClient::new(); db.expect_get_user().times(1).return_once(move |_| { - Ok(Some(User { - uaid, - // Last connected 10 minutes ago - connected_at: ms_since_epoch() - (10 * 60 * 1000), - current_month: Some(CURRENT_MONTH.to_owned()), - ..Default::default() - })) + let user = User::builder() + .uaid(uaid) + .connected_at(ms_since_epoch() - (10 * 60 * 1000)) + .build() + .unwrap(); + Ok(Some(user)) }); - db.expect_rotating_message_table() - .times(1) - .return_const(Some(CURRENT_MONTH)); + db.expect_update_user().times(1).return_once(|_| Ok(true)); db.expect_fetch_topic_messages() .times(1) diff --git a/autoconnect/autoconnect-settings/Cargo.toml b/autoconnect/autoconnect-settings/Cargo.toml index 25c7b7170..32c04aeb0 100644 --- a/autoconnect/autoconnect-settings/Cargo.toml +++ b/autoconnect/autoconnect-settings/Cargo.toml @@ -23,7 +23,5 @@ autopush_common.workspace = true [features] # specify the default via the calling crate, in order to simplify default chains. -dynamodb = [] bigtable = ["autopush_common/bigtable"] emulator = ["bigtable"] -dual = ["bigtable", "dynamodb"] diff --git a/autoconnect/autoconnect-settings/src/app_state.rs b/autoconnect/autoconnect-settings/src/app_state.rs index 43010d2b7..0d7656eca 100644 --- a/autoconnect/autoconnect-settings/src/app_state.rs +++ b/autoconnect/autoconnect-settings/src/app_state.rs @@ -2,10 +2,6 @@ use std::{sync::Arc, time::Duration}; #[cfg(feature = "bigtable")] use autopush_common::db::bigtable::BigTableClientImpl; -#[cfg(all(feature = "bigtable", feature = "dynamodb"))] -use autopush_common::db::dual::DualClientImpl; -#[cfg(feature = "dynamodb")] -use autopush_common::db::dynamodb::DdbClientImpl; use cadence::StatsdClient; use config::ConfigError; use fernet::{Fernet, MultiFernet}; @@ -74,11 +70,6 @@ impl AppState { let storage_type = StorageType::from_dsn(&db_settings.dsn); #[allow(unused)] let db: Box = match storage_type { - #[cfg(feature = "dynamodb")] - StorageType::DynamoDb => Box::new( - DdbClientImpl::new(metrics.clone(), &db_settings) - .map_err(|e| ConfigError::Message(e.to_string()))?, - ), #[cfg(feature = "bigtable")] StorageType::BigTable => { let client = BigTableClientImpl::new(metrics.clone(), &db_settings) @@ -86,13 +77,6 @@ impl AppState { client.spawn_sweeper(Duration::from_secs(30)); Box::new(client) } - #[cfg(all(feature = "bigtable", feature = "dynamodb"))] - StorageType::Dual => { - let client = DualClientImpl::new(metrics.clone(), &db_settings) - .map_err(|e| ConfigError::Message(e.to_string()))?; - client.spawn_sweeper(Duration::from_secs(30)); - Box::new(client) - } _ => panic!( "Invalid Storage type {:?}. Check {}__DB_DSN.", storage_type, diff --git a/autoconnect/autoconnect-settings/src/lib.rs b/autoconnect/autoconnect-settings/src/lib.rs index d9d0cccb5..6ac34f8ee 100644 --- a/autoconnect/autoconnect-settings/src/lib.rs +++ b/autoconnect/autoconnect-settings/src/lib.rs @@ -136,7 +136,7 @@ impl Default for Settings { megaphone_api_token: None, megaphone_poll_interval: Duration::from_secs(30), human_logs: false, - msg_limit: 100, + msg_limit: 150, actix_max_connections: None, actix_workers: None, } @@ -219,12 +219,17 @@ impl Settings { } pub fn test_settings() -> Self { + let db_dsn = Some("grpc://localhost:8086".to_string()); + // BigTable DB_SETTINGS. let db_settings = json!({ - "message_table": "message_test", - "router_table": "router_test" + "table_name":"projects/test/instances/test/tables/autopush", + "message_family":"message", + "router_family":"router", + "message_topic_family":"message_topic", }) .to_string(); Self { + db_dsn, db_settings, ..Default::default() } diff --git a/autoconnect/autoconnect-web/src/dockerflow.rs b/autoconnect/autoconnect-web/src/dockerflow.rs index b886a9fb7..1bbcb245f 100644 --- a/autoconnect/autoconnect-web/src/dockerflow.rs +++ b/autoconnect/autoconnect-web/src/dockerflow.rs @@ -26,10 +26,8 @@ pub fn config(config: &mut web::ServiceConfig) { } /// Handle the `/health` and `/__heartbeat__` routes -// note, this changes to `blocks_in_conditions` for 1.76+ -#[allow(clippy::blocks_in_conditions)] pub async fn health_route(state: Data) -> Json { - let status = if state + let healthy = state .db .health_check() .await @@ -37,15 +35,9 @@ pub async fn health_route(state: Data) -> Json { error!("Autoconnect Health Error: {:?}", e); e }) - .is_ok() - { - "OK" - } else { - "ERROR" - }; - //TODO: query local state and report results + .is_ok(); Json(json!({ - "status": status, + "status": if healthy { "OK" } else { "ERROR" }, "version": env!("CARGO_PKG_VERSION"), })) } diff --git a/autoconnect/autoconnect-web/src/error.rs b/autoconnect/autoconnect-web/src/error.rs index 258ae968d..f4d8c2f0d 100644 --- a/autoconnect/autoconnect-web/src/error.rs +++ b/autoconnect/autoconnect-web/src/error.rs @@ -1,6 +1,5 @@ use actix_http::ws::HandshakeError; use actix_web::{error::ResponseError, http::StatusCode, HttpResponse}; -use backtrace::Backtrace; use serde_json::json; use autopush_common::errors::ReportableError; @@ -34,10 +33,6 @@ impl ResponseError for ApiError { } impl ReportableError for ApiError { - fn backtrace(&self) -> Option<&Backtrace> { - None - } - fn is_sentry_event(&self) -> bool { match self { // Ignore failing upgrade to WebSocket @@ -45,10 +40,6 @@ impl ReportableError for ApiError { _ => true, } } - - fn metric_label(&self) -> Option<&'static str> { - None - } } impl ApiError { diff --git a/autoconnect/autoconnect-web/src/lib.rs b/autoconnect/autoconnect-web/src/lib.rs index 9cc2be767..5e74bcc57 100644 --- a/autoconnect/autoconnect-web/src/lib.rs +++ b/autoconnect/autoconnect-web/src/lib.rs @@ -7,7 +7,6 @@ extern crate slog_scope; pub mod dockerflow; pub mod error; -pub mod metrics; pub mod routes; #[cfg(test)] mod test; diff --git a/autoconnect/autoconnect-web/src/metrics.rs b/autoconnect/autoconnect-web/src/metrics.rs deleted file mode 100644 index 8590060df..000000000 --- a/autoconnect/autoconnect-web/src/metrics.rs +++ /dev/null @@ -1,182 +0,0 @@ -// TODO: Convert autopush-common::metrics to this? - -use std::net::UdpSocket; -use std::sync::Arc; -use std::time::Instant; - -use actix_web::{web::Data, HttpRequest}; -use cadence::{ - BufferedUdpMetricSink, CountedExt, Metric, MetricError, NopMetricSink, QueuingMetricSink, - StatsdClient, Timed, -}; - -use actix_web::HttpMessage; -use autoconnect_settings::{AppState, Settings}; -use autopush_common::tags::Tags; - -#[derive(Debug, Clone)] -pub struct MetricTimer { - pub label: String, - pub start: Instant, - pub tags: Tags, -} - -#[derive(Debug, Clone)] -pub struct Metrics { - client: Option>, - timer: Option, - tags: Option, -} - -impl Drop for Metrics { - fn drop(&mut self) { - let tags = self.tags.clone().unwrap_or_default(); - if let Some(client) = self.client.as_ref() { - if let Some(timer) = self.timer.as_ref() { - let lapse = (Instant::now() - timer.start).as_millis() as u64; - trace!( - "⌚ Ending timer at nanos: {:?} : {:?}", - &timer.label, - lapse; - tags - ); - let mut tagged = client.time_with_tags(&timer.label, lapse); - // Include any "hard coded" tags. - // tagged = tagged.with_tag("version", env!("CARGO_PKG_VERSION")); - let tags = timer.tags.tags.clone(); - let keys = tags.keys(); - for tag in keys { - tagged = tagged.with_tag(tag, tags.get(tag).unwrap()) - } - match tagged.try_send() { - Err(e) => { - // eat the metric, but log the error - warn!("⚠️ Metric {} error: {:?} ", &timer.label, e); - } - Ok(v) => { - trace!("⌚ {:?}", v.as_metric_str()); - } - } - } - } - } -} - -impl From<&HttpRequest> for Metrics { - fn from(req: &HttpRequest) -> Self { - let exts = req.extensions(); - let def_tags = Tags::from_request_head(req.head()); - let tags = exts.get::().unwrap_or(&def_tags); - Metrics { - client: Some(metrics_from_req(req)), - tags: Some(tags.clone()), - timer: None, - } - } -} - -impl From for Metrics { - fn from(client: StatsdClient) -> Self { - Metrics { - client: Some(Arc::new(client)), - tags: None, - timer: None, - } - } -} - -impl From<&actix_web::web::Data> for Metrics { - fn from(state: &actix_web::web::Data) -> Self { - Metrics { - client: Some(state.metrics.clone()), - tags: None, - timer: None, - } - } -} - -impl Metrics { - #![allow(unused)] // TODO: Start using metrics - - pub fn sink() -> StatsdClient { - StatsdClient::builder("", NopMetricSink).build() - } - - pub fn noop() -> Self { - Self { - client: Some(Arc::new(Self::sink())), - timer: None, - tags: None, - } - } - - pub fn start_timer(&mut self, label: &str, tags: Option) { - let mut mtags = self.tags.clone().unwrap_or_default(); - if let Some(t) = tags { - mtags.extend(t.tags) - } - - trace!("⌚ Starting timer... {:?}", &label; &mtags); - self.timer = Some(MetricTimer { - label: label.to_owned(), - start: Instant::now(), - tags: mtags, - }); - } - - // increment a counter with no tags data. - pub fn incr(self, label: &str) { - self.incr_with_tags(label, None) - } - - pub fn incr_with_tags(self, label: &str, tags: Option) { - if let Some(client) = self.client.as_ref() { - let mut tagged = client.incr_with_tags(label); - let mut mtags = self.tags.clone().unwrap_or_default(); - if let Some(t) = tags { - mtags.tags.extend(t.tags) - } - let tag_keys = mtags.tags.keys(); - for key in tag_keys.clone() { - // REALLY wants a static here, or at least a well defined ref. - tagged = tagged.with_tag(key, mtags.tags.get(key).unwrap()); - } - // Include any "hard coded" tags. - // incr = incr.with_tag("version", env!("CARGO_PKG_VERSION")); - match tagged.try_send() { - Err(e) => { - // eat the metric, but log the error - warn!("⚠️ Metric {} error: {:?}", label, e; mtags); - } - Ok(v) => trace!("☑️ {:?}", v.as_metric_str()), - } - } - } -} - -pub fn metrics_from_req(req: &HttpRequest) -> Arc { - req.app_data::>() - .expect("Could not get state in metrics_from_req") - .metrics - .clone() -} - -/// Create a cadence StatsdClient from the given options -pub fn metrics_from_settings(settings: &Settings) -> Result { - let builder = if let Some(statsd_host) = settings.statsd_host.as_ref() { - let socket = UdpSocket::bind("0.0.0.0:0")?; - socket.set_nonblocking(true)?; - - let host = (statsd_host.as_str(), settings.statsd_port); - let udp_sink = BufferedUdpMetricSink::from(host, socket)?; - let sink = QueuingMetricSink::from(udp_sink); - StatsdClient::builder(settings.statsd_label.as_ref(), sink) - } else { - StatsdClient::builder(settings.statsd_label.as_ref(), NopMetricSink) - }; - Ok(builder - .with_error_handler(|err| { - warn!("⚠️ Metric send error: {:?}", err); - }) - .build()) -} diff --git a/autoconnect/autoconnect-web/src/test.rs b/autoconnect/autoconnect-web/src/test.rs index a23bdf1c3..9cb7aaf0d 100644 --- a/autoconnect/autoconnect-web/src/test.rs +++ b/autoconnect/autoconnect-web/src/test.rs @@ -46,6 +46,8 @@ pub async fn hello_new_user() { let msg = json_msg(&mut framed).await; assert_eq!(msg["messageType"], "hello"); assert_eq!(msg["status"], 200); + // Ensure that the outbound response to the client includes the + // `use_webpush` flag set to `true` assert_eq!(msg["use_webpush"], true); assert!(msg["uaid"].is_string()); assert!(msg["broadcasts"].is_object()); diff --git a/autoconnect/autoconnect-ws/Cargo.toml b/autoconnect/autoconnect-ws/Cargo.toml index 9bfdf773c..b6b08accf 100644 --- a/autoconnect/autoconnect-ws/Cargo.toml +++ b/autoconnect/autoconnect-ws/Cargo.toml @@ -18,7 +18,7 @@ serde_json.workspace = true sentry.workspace = true slog-scope.workspace = true thiserror.workspace = true -tokio.workspace = true +tokio = { workspace = true, features = ["macros"] } async-trait = "0.1" strum = { version = "0.26", features = ["derive"] } diff --git a/autoconnect/autoconnect-ws/autoconnect-ws-sm/Cargo.toml b/autoconnect/autoconnect-ws/autoconnect-ws-sm/Cargo.toml index 002e51064..2e3dbfa30 100644 --- a/autoconnect/autoconnect-ws/autoconnect-ws-sm/Cargo.toml +++ b/autoconnect/autoconnect-ws/autoconnect-ws-sm/Cargo.toml @@ -27,5 +27,6 @@ actix-rt.workspace = true ctor.workspace = true mockall.workspace = true tokio.workspace = true +serde_json.workspace = true autoconnect_common = { workspace = true, features = ["test-support"] } diff --git a/autoconnect/autoconnect-ws/autoconnect-ws-sm/src/error.rs b/autoconnect/autoconnect-ws/autoconnect-ws-sm/src/error.rs index 815723205..c4f9becf2 100644 --- a/autoconnect/autoconnect-ws/autoconnect-ws-sm/src/error.rs +++ b/autoconnect/autoconnect-ws/autoconnect-ws-sm/src/error.rs @@ -40,8 +40,6 @@ where impl SMError { pub fn close_code(&self) -> actix_ws::CloseCode { match self.kind { - // TODO: applicable here? - //SMErrorKind::InvalidMessage(_) => CloseCode::Invalid, SMErrorKind::UaidReset => CloseCode::Normal, _ => CloseCode::Error, } @@ -70,7 +68,6 @@ impl ReportableError for SMError { } fn metric_label(&self) -> Option<&'static str> { - // TODO: match &self.kind { SMErrorKind::Database(e) => e.metric_label(), SMErrorKind::MakeEndpoint(e) => e.metric_label(), diff --git a/autoconnect/autoconnect-ws/autoconnect-ws-sm/src/identified/mod.rs b/autoconnect/autoconnect-ws/autoconnect-ws-sm/src/identified/mod.rs index a4d8ab395..d7488a248 100644 --- a/autoconnect/autoconnect-ws/autoconnect-ws-sm/src/identified/mod.rs +++ b/autoconnect/autoconnect-ws/autoconnect-ws-sm/src/identified/mod.rs @@ -1,7 +1,7 @@ use std::{fmt, mem, sync::Arc}; use actix_web::rt; -use cadence::{CountedExt, Timed}; +use cadence::Timed; use futures::channel::mpsc; use uuid::Uuid; @@ -12,7 +12,7 @@ use autoconnect_common::{ use autoconnect_settings::{AppState, Settings}; use autopush_common::{ - db::{error::DbResult, User, USER_RECORD_VERSION}, + db::User, notification::Notification, util::{ms_since_epoch, user_agent::UserAgentInfo}, }; @@ -245,7 +245,7 @@ impl WebPushClient { if let Some(node_id) = user.node_id { app_state .http - .put(&format!("{}/notif/{}", node_id, uaid.as_simple())) + .put(format!("{}/notif/{}", node_id, uaid.as_simple())) .send() .await? .error_for_status()?; @@ -279,66 +279,18 @@ impl WebPushClient { } } -/// Ensure an existing user's record is valid, returning its `ClientFlags` -/// -/// Somewhat similar to autoendpoint's `validate_webpush_user` function. When a -/// User record is invalid it will be dropped from the db and `None` will be -/// returned. -pub async fn process_existing_user( - app_state: &Arc, - user: &User, -) -> DbResult> { - // TODO: if BigTable, can we assume the user is migrated at this point (so - // we wouldn't need to validate `current_month` is not None?) - if let Some(rotating_message_table) = app_state.db.rotating_message_table() { - let Some(ref current_month) = user.current_month else { - debug!("Missing `current_month` value, dropping user"; "user" => ?user); - app_state - .metrics - .incr_with_tags("ua.expiration") - .with_tag("code", "104") - .with_tag("reason", "no_current_month") - .send(); - app_state.db.remove_user(&user.uaid).await?; - return Ok(None); - }; - - if current_month != rotating_message_table { - debug!("User is inactive, dropping user"; - "db.message_table" => rotating_message_table, - "user.current_month" => current_month, - "user" => ?user); - app_state - .metrics - .incr_with_tags("ua.expiration") - .with_tag("code", "105") - .with_tag("reason", "invalid_current_month") - .send(); - app_state.db.remove_user(&user.uaid).await?; - return Ok(None); - } - } - - let flags = ClientFlags { - check_storage: true, - old_record_version: user - .record_version - .map_or(true, |rec_ver| rec_ver < USER_RECORD_VERSION), - ..Default::default() - }; - Ok(Some(flags)) -} - #[derive(Debug)] pub struct ClientFlags { /// Whether check_storage queries for topic (not "timestamped") messages - include_topic: bool, + pub include_topic: bool, /// Flags the need to increment the last read for timestamp for timestamped messages - increment_storage: bool, + pub increment_storage: bool, /// Whether this client needs to check storage for messages - check_storage: bool, + pub check_storage: bool, /// Flags the need to drop the user record - old_record_version: bool, + pub old_record_version: bool, + /// First time a user has connected "today" + pub emit_channel_metrics: bool, } impl Default for ClientFlags { @@ -348,6 +300,7 @@ impl Default for ClientFlags { increment_storage: false, check_storage: false, old_record_version: false, + emit_channel_metrics: false, } } } diff --git a/autoconnect/autoconnect-ws/autoconnect-ws-sm/src/identified/on_client_msg.rs b/autoconnect/autoconnect-ws/autoconnect-ws-sm/src/identified/on_client_msg.rs index f7f78fb10..784626ea7 100644 --- a/autoconnect/autoconnect-ws/autoconnect-ws-sm/src/identified/on_client_msg.rs +++ b/autoconnect/autoconnect-ws/autoconnect-ws-sm/src/identified/on_client_msg.rs @@ -293,7 +293,9 @@ impl WebPushClient { let flags = &self.flags; if flags.check_storage { if flags.increment_storage { - debug!("▶️ WebPushClient:post_process_all_acked check_storage && increment_storage"); + debug!( + "▶️ WebPushClient:post_process_all_acked check_storage && increment_storage" + ); self.increment_storage().await?; } diff --git a/autoconnect/autoconnect-ws/autoconnect-ws-sm/src/identified/on_server_notif.rs b/autoconnect/autoconnect-ws/autoconnect-ws-sm/src/identified/on_server_notif.rs index 6ee206427..7e6b6f62f 100644 --- a/autoconnect/autoconnect-ws/autoconnect-ws-sm/src/identified/on_server_notif.rs +++ b/autoconnect/autoconnect-ws/autoconnect-ws-sm/src/identified/on_server_notif.rs @@ -181,9 +181,8 @@ impl WebPushClient { .or(self.current_timestamp); trace!("🗄️ WebPushClient::do_check_storage {:?}", ×tamp); // if we're to include topic messages, do those first. - // NOTE: DynamoDB would also wind up fetching the `current_timestamp` when pulling these, - // Bigtable can't fetch `current_timestamp` so we can't rely on `fetch_topic_messages()` - // returning a reasonable timestamp. + // NOTE: Bigtable can't fetch `current_timestamp`, so we can't rely on + // `fetch_topic_messages()` returning a reasonable timestamp. let topic_resp = if self.flags.include_topic { trace!("🗄️ WebPushClient::do_check_storage: fetch_topic_messages"); // Get the most recent max 11 messages. diff --git a/autoconnect/autoconnect-ws/autoconnect-ws-sm/src/unidentified.rs b/autoconnect/autoconnect-ws/autoconnect-ws-sm/src/unidentified.rs index f16b7099d..396543068 100644 --- a/autoconnect/autoconnect-ws/autoconnect-ws-sm/src/unidentified.rs +++ b/autoconnect/autoconnect-ws/autoconnect-ws-sm/src/unidentified.rs @@ -1,6 +1,6 @@ use std::{collections::HashMap, fmt, sync::Arc}; -use cadence::CountedExt; +use cadence::{CountedExt, Histogrammed}; use uuid::Uuid; use autoconnect_common::{ @@ -8,11 +8,14 @@ use autoconnect_common::{ protocol::{BroadcastValue, ClientMessage, ServerMessage}, }; use autoconnect_settings::{AppState, Settings}; -use autopush_common::{db::User, util::ms_since_epoch}; +use autopush_common::{ + db::{User, USER_RECORD_VERSION}, + util::{ms_since_epoch, ms_utc_midnight}, +}; use crate::{ error::{SMError, SMErrorKind}, - identified::{process_existing_user, ClientFlags, WebPushClient}, + identified::{ClientFlags, WebPushClient}, }; /// Represents a Client waiting for (or yet to process) a Hello message @@ -52,13 +55,12 @@ impl UnidentifiedClient { trace!("❓UnidentifiedClient::on_client_msg"); let ClientMessage::Hello { uaid, - use_webpush: Some(true), broadcasts, - .. + _channel_ids, } = msg else { return Err(SMError::invalid_message( - r#"Expected messageType="hello", "use_webpush": true"#.to_owned(), + r#"Expected messageType="hello""#.to_owned(), )); }; debug!( @@ -93,6 +95,18 @@ impl UnidentifiedClient { }) .send(); + // This is the first time that the user has connected "today". + if flags.emit_channel_metrics { + // Return the number of channels for the user using the internal channel_count. + // NOTE: this metric can be approximate since we're sampling to determine the + // approximate average of channels per user for business reasons. + self.app_state + .metrics + .histogram_with_tags("ua.connection.channel_count", user.channel_count() as u64) + .with_tag_value("desktop") + .send(); + } + let (broadcast_subs, broadcasts) = self .broadcast_init(&Broadcast::from_hashmap(broadcasts.unwrap_or_default())) .await; @@ -110,8 +124,8 @@ impl UnidentifiedClient { let smsg = ServerMessage::Hello { uaid: uaid.as_simple().to_string(), + use_webpush: true, status: 200, - use_webpush: Some(true), broadcasts, }; let smsgs = std::iter::once(smsg).chain(check_storage_smsgs); @@ -124,28 +138,30 @@ impl UnidentifiedClient { let connected_at = ms_since_epoch(); if let Some(uaid) = uaid { - // NOTE: previously a user would be dropped when - // serde_dynamodb::from_hashmap failed (but this now occurs inside - // the db impl) if let Some(mut user) = self.app_state.db.get_user(&uaid).await? { - if let Some(flags) = process_existing_user(&self.app_state, &user).await? { - user.node_id = Some(self.app_state.router_url.to_owned()); - if user.connected_at > connected_at { - let _ = self.app_state.metrics.incr("ua.already_connected"); - return Err(SMErrorKind::AlreadyConnected.into()); - } - user.connected_at = connected_at; - user.set_last_connect(); - if !self.app_state.db.update_user(&mut user).await? { - let _ = self.app_state.metrics.incr("ua.already_connected"); - return Err(SMErrorKind::AlreadyConnected.into()); - } - return Ok(GetOrCreateUser { - user, - existing_user: true, - flags, - }); + let flags = ClientFlags { + check_storage: true, + old_record_version: user + .record_version + .map_or(true, |rec_ver| rec_ver < USER_RECORD_VERSION), + emit_channel_metrics: user.connected_at < ms_utc_midnight(), + ..Default::default() + }; + user.node_id = Some(self.app_state.router_url.to_owned()); + if user.connected_at > connected_at { + let _ = self.app_state.metrics.incr("ua.already_connected"); + return Err(SMErrorKind::AlreadyConnected.into()); + } + user.connected_at = connected_at; + if !self.app_state.db.update_user(&mut user).await? { + let _ = self.app_state.metrics.incr("ua.already_connected"); + return Err(SMErrorKind::AlreadyConnected.into()); } + return Ok(GetOrCreateUser { + user, + existing_user: true, + flags, + }); } // NOTE: when the client's specified a uaid but get_user returns // None (or process_existing_user dropped the user record due to it @@ -153,18 +169,11 @@ impl UnidentifiedClient { // change from the previous state machine impl) } - // TODO: NOTE: A new User doesn't get a `set_last_connect()` (matching - // the previous impl) - let user = User { - current_month: self - .app_state - .db - .rotating_message_table() - .map(str::to_owned), - node_id: Some(self.app_state.router_url.to_owned()), - connected_at, - ..Default::default() - }; + let user = User::builder() + .node_id(self.app_state.router_url.to_owned()) + .connected_at(connected_at) + .build() + .map_err(|e| SMErrorKind::Internal(format!("User::builder error: {e}")))?; Ok(GetOrCreateUser { user, existing_user: false, @@ -201,7 +210,7 @@ struct GetOrCreateUser { #[cfg(test)] mod tests { - use std::sync::Arc; + use std::{str::FromStr, sync::Arc}; use autoconnect_common::{ protocol::ClientMessage, @@ -250,12 +259,17 @@ mod tests { db: hello_again_db(DUMMY_UAID).into_boxed_arc(), ..Default::default() }); - let msg = ClientMessage::Hello { - uaid: Some(DUMMY_UAID.to_string()), - channel_ids: None, - use_webpush: Some(true), - broadcasts: None, - }; + // Use a constructed JSON structure here to capture the sort of input we expect, + // which may not match what we derive into. + let js = serde_json::json!({ + "messageType": "hello", + "uaid": DUMMY_UAID, + "use_webpush": true, + "channelIDs": [], + "broadcasts": {} + }) + .to_string(); + let msg: ClientMessage = serde_json::from_str(&js).unwrap(); client.on_client_msg(msg).await.expect("Hello failed"); } @@ -266,12 +280,12 @@ mod tests { db: hello_db().into_boxed_arc(), ..Default::default() }); - let msg = ClientMessage::Hello { - uaid: None, - channel_ids: None, - use_webpush: Some(true), - broadcasts: None, - }; + // Ensure that we do not need to pass the "use_webpush" flag. + // (yes, this could just be passing the string, but I want to be + // very explicit here.) + let json = serde_json::json!({"messageType":"hello"}); + let raw = json.to_string(); + let msg = ClientMessage::from_str(&raw).unwrap(); client.on_client_msg(msg).await.expect("Hello failed"); } @@ -280,8 +294,7 @@ mod tests { let client = uclient(Default::default()); let msg = ClientMessage::Hello { uaid: Some("".to_owned()), - channel_ids: None, - use_webpush: Some(true), + _channel_ids: None, broadcasts: None, }; client.on_client_msg(msg).await.expect("Hello failed"); @@ -292,8 +305,7 @@ mod tests { let client = uclient(Default::default()); let msg = ClientMessage::Hello { uaid: Some("invalid".to_owned()), - channel_ids: None, - use_webpush: Some(true), + _channel_ids: None, broadcasts: None, }; client.on_client_msg(msg).await.expect("Hello failed"); diff --git a/autoconnect/autoconnect-ws/src/error.rs b/autoconnect/autoconnect-ws/src/error.rs index f62b44ed3..a914d8cdd 100644 --- a/autoconnect/autoconnect-ws/src/error.rs +++ b/autoconnect/autoconnect-ws/src/error.rs @@ -43,8 +43,7 @@ impl WSError { pub fn close_code(&self) -> actix_ws::CloseCode { match &self.kind { WSErrorKind::SM(e) => e.close_code(), - // TODO: applicable here? - //WSErrorKind::Protocol(_) => CloseCode::Protocol, + WSErrorKind::Protocol(_) => CloseCode::Protocol, WSErrorKind::UnsupportedMessage(_) => CloseCode::Unsupported, _ => CloseCode::Error, } diff --git a/autoconnect/autoconnect-ws/src/handler.rs b/autoconnect/autoconnect-ws/src/handler.rs index 3bce5706d..2629e1d66 100644 --- a/autoconnect/autoconnect-ws/src/handler.rs +++ b/autoconnect/autoconnect-ws/src/handler.rs @@ -47,10 +47,10 @@ pub fn spawn_webpush_ws( /// /// Manages: /// -///- I/O to and from the client: incoming `ClientMessage`s from the WebSocket -/// connection and `ServerNotification`s from the `ClientRegistry` (triggered -/// by `autoendpoint`), and in turn outgoing `ServerMessage`s written to the -/// WebSocket connection in response to those events. +/// - I/O to and from the client: incoming `ClientMessage`s from the WebSocket +/// connection and `ServerNotification`s from the `ClientRegistry` (triggered +/// by `autoendpoint`), and in turn outgoing `ServerMessage`s written to the +/// WebSocket connection in response to those events. /// - the lifecycle/cleanup of the Client pub(crate) async fn webpush_ws( client: UnidentifiedClient, @@ -122,17 +122,17 @@ async fn unidentified_ws( /// The loop waits on 3 different inputs: /// /// - msg_stream: Stream of WebPush Protocol `ServerMessage`s (from the Client -/// to the Server) that are passed to `WebPushClient::on_client_msg`. +/// to the Server) that are passed to `WebPushClient::on_client_msg`. /// /// - snotif_stream: Stream of `ServerNotification`s, most of which are -/// generated by autoendpoint, that are passed to -/// `WebPushClient::on_server_notif`. +/// generated by autoendpoint, that are passed to +/// `WebPushClient::on_server_notif`. /// /// - ping_manager: A multi-purpose timer that ticks when either a WebSocket -/// Ping (or WebPush Broadcast) should be sent to the Client or when we time -/// out the Client for not responding to a previous Ping in time. The Ping -/// encourages the connection to keep alive (it's more likely to be dropped if -/// completely idle) and aids in detecting Clients that are no longer connected +/// Ping (or WebPush Broadcast) should be sent to the Client or when we time +/// out the Client for not responding to a previous Ping in time. The Ping +/// encourages the connection to keep alive (it's more likely to be dropped if +/// completely idle) and aids in detecting Clients that are no longer connected async fn identified_ws( client: &mut WebPushClient, smsgs: impl IntoIterator, diff --git a/autoconnect/autoconnect-ws/src/ping.rs b/autoconnect/autoconnect-ws/src/ping.rs index 7cab0e79d..b163f63a8 100644 --- a/autoconnect/autoconnect-ws/src/ping.rs +++ b/autoconnect/autoconnect-ws/src/ping.rs @@ -47,12 +47,12 @@ impl PingManager { /// Signals either a: /// /// - WebSocket Ping (or a WebPush Broadcast, if one is pending) to be sent - /// to the Client to prevent its connection from idling out/disconnecting - /// due to inactivity + /// to the Client to prevent its connection from idling out/disconnecting + /// due to inactivity /// /// - WebSocket Ping was previously sent and the Client failed to respond - /// with a Pong within the `auto_ping_timeout` interval - /// (`WSError::PongTimeout` Error returned) + /// with a Pong within the `auto_ping_timeout` interval + /// (`WSError::PongTimeout` Error returned) pub async fn tick(&mut self) -> Result<(), WSError> { self.ping_or_timeout.tick().await; match self.waiting { diff --git a/autoconnect/src/main.rs b/autoconnect/src/main.rs index 420112f91..79062cf77 100644 --- a/autoconnect/src/main.rs +++ b/autoconnect/src/main.rs @@ -26,14 +26,12 @@ Usage: autoconnect [options] Options: -h, --help Show this message. - --config-connection=CONFIGFILE Connection configuration file path. - --config-shared=CONFIGFILE Common configuration file path. + --config=CONFIGFILE Connection configuration file path. "; #[derive(Debug, Deserialize)] struct Args { - flag_config_connection: Option, - flag_config_shared: Option, + flag_config: Option, } #[actix_web::main] @@ -43,10 +41,7 @@ async fn main() -> Result<()> { .and_then(|d| d.deserialize()) .unwrap_or_else(|e| e.exit()); let mut filenames = Vec::new(); - if let Some(shared_filename) = args.flag_config_shared { - filenames.push(shared_filename); - } - if let Some(config_filename) = args.flag_config_connection { + if let Some(config_filename) = args.flag_config { filenames.push(config_filename); } let settings = diff --git a/autoendpoint/Cargo.toml b/autoendpoint/Cargo.toml index bc0e48d17..94586a0d4 100644 --- a/autoendpoint/Cargo.toml +++ b/autoendpoint/Cargo.toml @@ -24,14 +24,11 @@ openssl.workspace = true rand.workspace = true regex.workspace = true reqwest.workspace = true -rusoto_core.workspace = true -rusoto_dynamodb.workspace = true sentry.workspace = true sentry-actix.workspace = true sentry-core.workspace = true serde.workspace = true serde_derive.workspace = true -serde_dynamodb.workspace = true serde_json.workspace = true slog.workspace = true slog-async.workspace = true @@ -44,7 +41,7 @@ tokio.workspace = true url.workspace = true uuid.workspace = true -a2 = { version = "0.8" } +a2 = { version = "0.10" } bytebuffer = "2.1" again = { version = "0.1.2", default-features = false, features = [ "log", @@ -52,10 +49,13 @@ again = { version = "0.1.2", default-features = false, features = [ ] } async-trait = "0.1" autopush_common = { path = "../autopush-common" } -jsonwebtoken = "8.0" # v9.0 breaks vapid aud tests. https://mozilla-hub.atlassian.net/browse/SYNC-4201 -validator = "0.17" -validator_derive = "0.17" +jsonwebtoken = "9.3.0" +validator = "0.18" +validator_derive = "0.18" + yup-oauth2 = "8.1" +# Updating to 9+ requires configuring rust-tls (See https://github.com/dermesser/yup-oauth2/issues/235) +# yup-oauth2 = { version = "10.0.1", features = ["hyper-rustls"] } # For mockito test debugging #ureq={ version="2.4", features=["json"] } @@ -63,13 +63,22 @@ yup-oauth2 = "8.1" [dev-dependencies] deadpool = { workspace = true } mockall.workspace = true -mockito = "0.31" +mockito = "1.4" tempfile = "3.2.0" tokio = { workspace = true, features = ["fs", "macros"] } [features] -default = ["dual"] +default = ["bigtable"] +# data store types bigtable = ["autopush_common/bigtable"] -dynamodb = ["autopush_common/dynamodb"] -dual = ["bigtable", "dynamodb"] + +# enable emulator to call locally run data store. emulator = ["bigtable"] + +# Enable "stub" router for local testing purposes. +# The "stub" will return specified error strings or success +# depending on which `app_id` client is called based on the registration +# and endpoint. See [autoendpoint::router::stub::router::StubRouter] +stub = [] +# Verbosely log vapid assertions (NOT ADVISED FOR WIDE PRODUCTION USE) +log_vapid = [] diff --git a/autoendpoint/src/error.rs b/autoendpoint/src/error.rs index 61ab12750..8881193b1 100644 --- a/autoendpoint/src/error.rs +++ b/autoendpoint/src/error.rs @@ -34,6 +34,7 @@ const RETRY_AFTER_PERIOD: &str = "120"; // retry after 2 minutes; pub struct ApiError { pub kind: ApiErrorKind, pub backtrace: Backtrace, + pub extras: Option>, } impl ApiError { @@ -295,6 +296,7 @@ where ApiError { kind: ApiErrorKind::from(item), backtrace: Backtrace::new(), + extras: None, } } } @@ -360,11 +362,17 @@ impl ReportableError for ApiError { } fn extras(&self) -> Vec<(&str, String)> { + let mut extras: Vec<(&str, String)> = match &self.extras { + Some(extras) => extras.iter().map(|e| (e.0.as_str(), e.1.clone())).collect(), + None => Default::default(), + }; + match &self.kind { - ApiErrorKind::Router(e) => e.extras(), - ApiErrorKind::LogCheck => vec![("coffee", "Unsupported".to_owned())], - _ => vec![], - } + ApiErrorKind::Router(e) => extras.extend(e.extras()), + ApiErrorKind::LogCheck => extras.extend(vec![("coffee", "Unsupported".to_owned())]), + _ => {} + }; + extras } } @@ -397,7 +405,10 @@ fn errno_from_validation_errors(e: &ValidationErrors) -> Option { mod tests { use autopush_common::{db::error::DbError, sentry::event_from_error}; + use crate::routers::RouterError; + use super::{ApiError, ApiErrorKind}; + use crate::error::ReportableError; #[test] fn sentry_event_with_extras() { @@ -431,4 +442,23 @@ mod tests { // "Retry-After" is applied on any 503 response (See ApiError::error_response) assert_eq!(e.kind.status(), actix_http::StatusCode::SERVICE_UNAVAILABLE) } + + /// Ensure that extras set on a given error are included in the ApiError.extras() call. + #[tokio::test] + async fn pass_extras() { + let e = RouterError::NotFound; + let mut ae = ApiError::from(e); + ae.extras = Some([("foo".to_owned(), "bar".to_owned())].to_vec()); + + let aex: Vec<(&str, String)> = ae.extras(); + assert!(aex.contains(&("foo", "bar".to_owned()))); + + let e = ApiErrorKind::LogCheck; + let mut ae = ApiError::from(e); + ae.extras = Some([("foo".to_owned(), "bar".to_owned())].to_vec()); + + let aex: Vec<(&str, String)> = ae.extras(); + assert!(aex.contains(&("foo", "bar".to_owned()))); + assert!(aex.contains(&("coffee", "Unsupported".to_owned()))); + } } diff --git a/autoendpoint/src/extractors/authorization_check.rs b/autoendpoint/src/extractors/authorization_check.rs index 2a39147be..1da3f2868 100644 --- a/autoendpoint/src/extractors/authorization_check.rs +++ b/autoendpoint/src/extractors/authorization_check.rs @@ -9,6 +9,8 @@ use futures::FutureExt; use openssl::error::ErrorStack; use uuid::Uuid; +use autopush_common::util::user_agent::UserAgentInfo; + /// Verifies the request authorization via the authorization header. /// /// The expected token is the HMAC-SHA256 hash of the UAID, signed with one of @@ -16,7 +18,9 @@ use uuid::Uuid; /// NOTE: This is *ONLY* for internal calls that require authorization and should /// NOT be used by calls that are using VAPID authentication (e.g. /// subscription provider endpoints) -pub struct AuthorizationCheck; +pub struct AuthorizationCheck { + pub user_agent: UserAgentInfo, +} impl AuthorizationCheck { pub fn generate_token(auth_key: &str, user: &Uuid) -> Result { @@ -27,6 +31,7 @@ impl AuthorizationCheck { token: &str, uaid: &Uuid, auth_keys: &[String], + user_agent: UserAgentInfo, ) -> Result { // Check the token against the expected token for each key for key in auth_keys { @@ -38,7 +43,7 @@ impl AuthorizationCheck { if expected_token.len() == token.len() && openssl::memcmp::eq(expected_token.as_bytes(), token.as_bytes()) { - return Ok(Self); + return Ok(Self { user_agent }); } } Err(ApiErrorKind::InvalidLocalAuth("incorrect auth token".to_owned()).into()) @@ -66,8 +71,9 @@ impl FromRequest for AuthorizationCheck { .ok_or_else(|| ApiErrorKind::InvalidLocalAuth("missing auth header".to_owned()))?; let token = get_token_from_auth_header(auth_header) .ok_or_else(|| ApiErrorKind::InvalidLocalAuth("missing auth token".to_owned()))?; + let user_agent = UserAgentInfo::from(&req); - Self::validate_token(token, &uaid, &state.settings.auth_keys()) + Self::validate_token(token, &uaid, &state.settings.auth_keys(), user_agent) } .boxed_local() } @@ -91,6 +97,7 @@ fn get_token_from_auth_header(header: &str) -> Option<&str> { mod test { use crate::error::ApiResult; + use autopush_common::util::user_agent::UserAgentInfo; use super::*; @@ -101,7 +108,7 @@ mod test { let auth_keys = ["HJVPy4ZwF4Yz_JdvXTL8hRcwIhv742vC60Tg5Ycrvw8=".to_owned()].to_vec(); let token = AuthorizationCheck::generate_token(auth_keys.first().unwrap(), &uaid).unwrap(); - AuthorizationCheck::validate_token(&token, &uaid, &auth_keys)?; + AuthorizationCheck::validate_token(&token, &uaid, &auth_keys, UserAgentInfo::default())?; Ok(()) } diff --git a/autoendpoint/src/extractors/notification_headers.rs b/autoendpoint/src/extractors/notification_headers.rs index abb6bd639..55987a06d 100644 --- a/autoendpoint/src/extractors/notification_headers.rs +++ b/autoendpoint/src/extractors/notification_headers.rs @@ -2,7 +2,7 @@ use crate::error::{ApiError, ApiErrorKind, ApiResult}; use crate::headers::crypto_key::CryptoKeyHeader; use crate::headers::util::{get_header, get_owned_header}; use actix_web::HttpRequest; -use autopush_common::util::InsertOpt; +use autopush_common::{util::InsertOpt, MAX_NOTIFICATION_TTL}; use lazy_static::lazy_static; use regex::Regex; use std::cmp::min; @@ -16,9 +16,6 @@ lazy_static! { Regex::new(r"(?P[0-9A-Za-z\-_]+)=+(?P[,;]|$)").unwrap(); } -/// 60 days -const MAX_TTL: i64 = 60 * 60 * 24 * 60; - /// Extractor and validator for notification headers #[derive(Clone, Debug, Eq, PartialEq, Validate)] pub struct NotificationHeaders { @@ -71,7 +68,9 @@ impl NotificationHeaders { let ttl = get_header(req, "ttl") .and_then(|ttl| ttl.parse().ok()) // Enforce a maximum TTL, but don't error - .map(|ttl| min(ttl, MAX_TTL)) + // NOTE: In order to trap for negative TTLs, this should be a + // signed value, otherwise we will error out with NO_TTL. + .map(|ttl| min(ttl, MAX_NOTIFICATION_TTL as i64)) .ok_or(ApiErrorKind::NoTTL)?; let topic = get_owned_header(req, "topic"); @@ -216,9 +215,9 @@ impl NotificationHeaders { #[cfg(test)] mod tests { use super::NotificationHeaders; - use super::MAX_TTL; use crate::error::{ApiErrorKind, ApiResult}; use actix_web::test::TestRequest; + use autopush_common::MAX_NOTIFICATION_TTL; /// Assert that a result is a validation error and check its serialization /// against the JSON value. @@ -265,7 +264,6 @@ mod tests { .insert_header(("TTL", "-1")) .to_http_request(); let result = NotificationHeaders::from_request(&req, false); - assert_validation_error( result, serde_json::json!({ @@ -285,12 +283,12 @@ mod tests { #[test] fn maximum_ttl() { let req = TestRequest::post() - .insert_header(("TTL", (MAX_TTL + 1).to_string())) + .insert_header(("TTL", (MAX_NOTIFICATION_TTL + 1).to_string())) .to_http_request(); let result = NotificationHeaders::from_request(&req, false); assert!(result.is_ok()); - assert_eq!(result.unwrap().ttl, MAX_TTL); + assert_eq!(result.unwrap().ttl, MAX_NOTIFICATION_TTL as i64); } /// A valid topic results in no errors diff --git a/autoendpoint/src/extractors/router_data_input.rs b/autoendpoint/src/extractors/router_data_input.rs index 348af9856..dd83975ef 100644 --- a/autoendpoint/src/extractors/router_data_input.rs +++ b/autoendpoint/src/extractors/router_data_input.rs @@ -8,8 +8,6 @@ use uuid::Uuid; lazy_static! { static ref VALID_TOKEN: Regex = Regex::new(r"^[^ ]{8,}$").unwrap(); - static ref VALID_ADM_TOKEN: Regex = - Regex::new(r"^amzn1.adm-registration.v3.[^ ]{256,}$").unwrap(); } /// Extracts the router data from the request body and validates the token @@ -43,7 +41,8 @@ impl FromRequest for RouterDataInput { RouterType::FCM | RouterType::GCM | RouterType::APNS => { VALID_TOKEN.is_match(&data.token) } - RouterType::ADM => VALID_ADM_TOKEN.is_match(&data.token), + #[cfg(feature = "stub")] + RouterType::STUB => data.token.as_str() == "success", }; if !is_valid { diff --git a/autoendpoint/src/extractors/routers.rs b/autoendpoint/src/extractors/routers.rs index 49d1aaf23..c4c012a29 100644 --- a/autoendpoint/src/extractors/routers.rs +++ b/autoendpoint/src/extractors/routers.rs @@ -1,7 +1,8 @@ use crate::error::{ApiError, ApiResult}; -use crate::routers::adm::router::AdmRouter; use crate::routers::apns::router::ApnsRouter; use crate::routers::fcm::router::FcmRouter; +#[cfg(feature = "stub")] +use crate::routers::stub::router::StubRouter; use crate::routers::webpush::WebPushRouter; use crate::routers::Router; use crate::server::AppState; @@ -13,7 +14,6 @@ use std::fmt::{self, Display}; use std::str::FromStr; use std::sync::Arc; -/// Valid `DynamoDbUser::router_type` values #[derive(Copy, Clone, Debug, Eq, PartialEq)] #[allow(clippy::upper_case_acronyms)] pub enum RouterType { @@ -21,7 +21,8 @@ pub enum RouterType { FCM, GCM, APNS, - ADM, + #[cfg(feature = "stub")] + STUB, } impl FromStr for RouterType { @@ -33,7 +34,8 @@ impl FromStr for RouterType { "fcm" => Ok(RouterType::FCM), "gcm" => Ok(RouterType::GCM), "apns" => Ok(RouterType::APNS), - "adm" => Ok(RouterType::ADM), + #[cfg(feature = "stub")] + "stub" => Ok(RouterType::STUB), _ => Err(()), } } @@ -46,7 +48,8 @@ impl Display for RouterType { RouterType::FCM => "fcm", RouterType::GCM => "gcm", RouterType::APNS => "apns", - RouterType::ADM => "adm", + #[cfg(feature = "stub")] + RouterType::STUB => "stub", }) } } @@ -57,7 +60,8 @@ pub struct Routers { webpush: WebPushRouter, fcm: Arc, apns: Arc, - adm: Arc, + #[cfg(feature = "stub")] + stub: Arc, } impl FromRequest for Routers { @@ -78,7 +82,8 @@ impl FromRequest for Routers { }, fcm: app_state.fcm_router.clone(), apns: app_state.apns_router.clone(), - adm: app_state.adm_router.clone(), + #[cfg(feature = "stub")] + stub: app_state.stub_router.clone(), }) } } @@ -90,7 +95,8 @@ impl Routers { RouterType::WebPush => &self.webpush, RouterType::FCM | RouterType::GCM => self.fcm.as_ref(), RouterType::APNS => self.apns.as_ref(), - RouterType::ADM => self.adm.as_ref(), + #[cfg(feature = "stub")] + RouterType::STUB => self.stub.as_ref(), } } } diff --git a/autoendpoint/src/extractors/subscription.rs b/autoendpoint/src/extractors/subscription.rs index 8d29831a2..9885a013d 100644 --- a/autoendpoint/src/extractors/subscription.rs +++ b/autoendpoint/src/extractors/subscription.rs @@ -1,18 +1,17 @@ use std::borrow::Cow; -use std::fmt; +use std::error::Error; use std::str::FromStr; use actix_web::{dev::Payload, web::Data, FromRequest, HttpRequest}; use autopush_common::{ db::User, tags::Tags, - util::{b64_decode_std, b64_decode_url, sec_since_epoch}, + util::{b64_decode_std, b64_decode_url}, }; use cadence::{CountedExt, StatsdClient}; use futures::{future::LocalBoxFuture, FutureExt}; use jsonwebtoken::{Algorithm, DecodingKey, Validation}; use openssl::hash::MessageDigest; -use serde::{Deserialize, Serialize}; use url::Url; use uuid::Uuid; @@ -23,12 +22,12 @@ use crate::extractors::{ }; use crate::headers::{ crypto_key::CryptoKeyHeader, - vapid::{VapidError, VapidHeader, VapidHeaderWithKey, VapidVersionData}, + vapid::{VapidClaims, VapidError, VapidHeader, VapidHeaderWithKey, VapidVersionData}, }; use crate::metrics::Metrics; use crate::server::AppState; -const ONE_DAY_IN_SECONDS: u64 = 60 * 60 * 24; +use crate::settings::Settings; /// Extracts subscription data from `TokenInfo` and verifies auth/crypto headers #[derive(Clone, Debug)] @@ -36,34 +35,14 @@ pub struct Subscription { pub user: User, pub channel_id: Uuid, pub vapid: Option, + /// A stored value here indicates that the subscription update + /// should be tracked internally. + /// (This should ONLY be applied for messages that match known + /// Mozilla provided VAPID public keys.) + /// + pub tracking_id: Option, } -#[derive(Debug, Serialize, Deserialize)] -pub struct VapidClaims { - exp: u64, - aud: String, - sub: String, -} - -impl Default for VapidClaims { - fn default() -> Self { - Self { - exp: sec_since_epoch() + ONE_DAY_IN_SECONDS, - aud: "No audience".to_owned(), - sub: "No sub".to_owned(), - } - } -} - -impl fmt::Display for VapidClaims { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("VapidClaims") - .field("exp", &self.exp) - .field("aud", &self.aud) - .field("sub", &self.sub) - .finish() - } -} impl FromRequest for Subscription { type Error = ApiError; type Future = LocalBoxFuture<'static, Result>; @@ -84,7 +63,9 @@ impl FromRequest for Subscription { .fernet .decrypt(&repad_base64(&token_info.token)) .map_err(|e| { - error!("🔐 fernet: {:?}", e); + // Since we're decrypting and endpoint, we get a lot of spam links. + // This can fill our logs. + trace!("🔐 fernet: {:?}", e); ApiErrorKind::InvalidToken })?; @@ -93,7 +74,34 @@ impl FromRequest for Subscription { .map(|vapid| extract_public_key(vapid, &token_info)) .transpose()?; - trace!("Vapid: {:?}", &vapid); + trace!("raw vapid: {:?}", &vapid); + let trackable = if let Some(vapid) = &vapid { + app_state.reliability.is_trackable(vapid) + } else { + false + }; + + // Capturing the vapid sub right now will cause too much cardinality. Instead, + // let's just capture if we have a valid VAPID, as well as what sort of bad sub + // values we get. + if let Some(ref header) = vapid { + let sub = header + .vapid + .sub() + .map_err(|e: VapidError| { + // Capture the type of error and add it to metrics. + let mut tags = Tags::default(); + tags.tags + .insert("error".to_owned(), e.as_metric().to_owned()); + metrics + .clone() + .incr_with_tags("notification.auth.error", Some(tags)); + }) + .unwrap_or_default(); + // For now, record that we had a good (?) VAPID sub, + metrics.clone().incr("notification.auth.ok"); + info!("VAPID sub: {:?}", sub) + }; match token_info.api_version { ApiVersion::Version1 => version_1_validation(&token)?, @@ -119,17 +127,21 @@ impl FromRequest for Subscription { // Validate the VAPID JWT token and record the version if let Some(vapid) = &vapid { - validate_vapid_jwt(vapid, &app_state.settings.endpoint_url(), &metrics)?; + validate_vapid_jwt(vapid, &app_state.settings, &metrics)?; app_state .metrics .incr(&format!("updates.vapid.draft{:02}", vapid.vapid.version()))?; } + let tracking_id = + trackable.then(|| app_state.reliability.get_tracking_id(req.headers())); + Ok(Subscription { user, channel_id, vapid, + tracking_id, }) } .boxed_local() @@ -160,7 +172,12 @@ fn parse_vapid(token_info: &TokenInfo, metrics: &StatsdClient) -> ApiResult return Ok(None), }; - let vapid = VapidHeader::parse(auth_header)?; + let vapid = VapidHeader::parse(auth_header).inspect_err(|e| { + metrics + .incr_with_tags("notification.auth.error") + .with_tag("error", e.as_metric()) + .send(); + })?; metrics .incr_with_tags("notification.auth") @@ -249,6 +266,18 @@ fn version_2_validation(token: &[u8], vapid: Option<&VapidHeaderWithKey>) -> Api Ok(()) } +// Perform a very brain dead conversion of a string to a CamelCaseVersion +fn term_to_label(term: &str) -> String { + term.split(' ').fold("".to_owned(), |prev, word: &str| { + format!( + "{}{}{}", + prev, + word.get(0..1).unwrap_or_default().to_ascii_uppercase(), + word.get(1..).unwrap_or_default() + ) + }) +} + /// Validate the VAPID JWT token. Specifically, /// - Check the signature /// - Make sure it hasn't expired @@ -257,16 +286,21 @@ fn version_2_validation(token: &[u8], vapid: Option<&VapidHeaderWithKey>) -> Api /// This is mostly taken care of by the jsonwebtoken library fn validate_vapid_jwt( vapid: &VapidHeaderWithKey, - domain: &Url, + settings: &Settings, metrics: &Metrics, ) -> ApiResult<()> { let VapidHeaderWithKey { vapid, public_key } = vapid; let public_key = decode_public_key(public_key)?; + let mut validation = Validation::new(Algorithm::ES256); + let audience: Vec<&str> = settings.vapid_aud.iter().map(|s| s.as_str()).collect(); + validation.set_audience(&audience); + validation.set_required_spec_claims(&["exp", "aud", "sub"]); + let token_data = match jsonwebtoken::decode::( &vapid.token, &DecodingKey::from_ec_der(&public_key), - &Validation::new(Algorithm::ES256), + &validation, ) { Ok(v) => v, Err(e) => match e.kind() { @@ -297,8 +331,37 @@ fn validate_vapid_jwt( // the Json parse error. return Err(VapidError::InvalidVapid(e.to_string()).into()); } + jsonwebtoken::errors::ErrorKind::InvalidAudience => { + return Err(VapidError::InvalidAudience.into()); + } _ => { - metrics.clone().incr("notification.auth.bad_vapid.other"); + // Attempt to match up the majority of ErrorKind variants. + // The third-party errors all defer to the source, so we can + // use that to differentiate for actual errors. + let mut tags = Tags::default(); + let label = if e.source().is_none() { + // These two have the most cardinality, so we need to handle + // them separately. + let mut label_name = e.to_string(); + if label_name.contains(':') { + // if the error begins with a common tag e.g. "Missing required claim: ..." + // then convert it to a less cardinal version. This is lossy, but acceptable. + label_name = + term_to_label(label_name.split(':').next().unwrap_or_default()); + } else if label_name.contains(' ') { + // if a space still snuck through somehow, remove it. + label_name = term_to_label(&label_name); + } + label_name + } else { + // If you need to dig into these, there's always the logs. + "Other".to_owned() + }; + tags.tags.insert("error".to_owned(), label); + metrics + .clone() + .incr_with_tags("notification.auth.bad_vapid.other", Some(tags)); + error!("Bad Aud: Unexpected VAPID error: {:?}", &e); return Err(e.into()); } }, @@ -306,7 +369,7 @@ fn validate_vapid_jwt( // Dump the claims. // Note, this can produce a LOT of log messages if this feature is enabled. - #[cfg(log_vapid)] + #[cfg(feature = "log_vapid")] if let Some(claims_str) = vapid.token.split('.').next() { use base64::Engine; info!( @@ -321,7 +384,7 @@ fn validate_vapid_jwt( ); }; - if token_data.claims.exp > (sec_since_epoch() + ONE_DAY_IN_SECONDS) { + if token_data.claims.exp > VapidClaims::default_exp() { // The expiration is too far in the future return Err(VapidError::FutureExpirationToken.into()); } @@ -335,8 +398,14 @@ fn validate_vapid_jwt( } }; + let domain = &settings.endpoint_url(); + if domain != &aud { - error!("Bad Aud: I am <{:?}>, asked for <{:?}> ", domain, aud); + info!( + "Bad Aud: I am <{:?}>, asked for <{:?}> ", + domain.as_str(), + token_data.claims.aud + ); metrics.clone().incr("notification.auth.bad_vapid.domain"); return Err(VapidError::InvalidAudience.into()); } @@ -345,16 +414,52 @@ fn validate_vapid_jwt( } #[cfg(test)] -mod tests { - use super::{validate_vapid_jwt, VapidClaims}; +pub mod tests { + use super::{term_to_label, validate_vapid_jwt, VapidClaims}; use crate::error::ApiErrorKind; use crate::extractors::subscription::repad_base64; use crate::headers::vapid::{VapidError, VapidHeader, VapidHeaderWithKey, VapidVersionData}; use crate::metrics::Metrics; - use autopush_common::util::{b64_decode_std, sec_since_epoch}; + use crate::settings::Settings; + + use autopush_common::util::b64_decode_std; + use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; - use std::str::FromStr; - use url::Url; + + pub const PUB_KEY: &str = + "BM3bVjW_wuZC54alIbqjTbaBNtthriVtdZlchOyOSdbVYeYQu2i5inJdft7jUWIAy4O9xHBbY196Gf-1odb8hds"; + + lazy_static! { + static ref PRIV_KEY: Vec = b64_decode_std( + "MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgZImOgpRszunnU3j1\ + oX5UQiX8KU4X2OdbENuvc/t8wpmhRANCAATN21Y1v8LmQueGpSG6o022gTbbYa4l\ + bXWZXITsjknW1WHmELtouYpyXX7e41FiAMuDvcRwW2Nfehn/taHW/IXb", + ) + .unwrap(); + } + + /// Make a vapid header. + /// *NOTE*: This follows a python format where you only specify overrides. Any value not + /// specified will use a default value. + pub fn make_vapid(sub: &str, aud: &str, exp: u64, key: String) -> VapidHeaderWithKey { + let jwk_header = jsonwebtoken::Header::new(jsonwebtoken::Algorithm::ES256); + let enc_key = jsonwebtoken::EncodingKey::from_ec_der(&PRIV_KEY); + let claims = VapidClaims { + exp, + aud: aud.to_string(), + sub: sub.to_string(), + }; + let token = jsonwebtoken::encode(&jwk_header, &claims, &enc_key).unwrap(); + + VapidHeaderWithKey { + public_key: key.to_owned(), + vapid: VapidHeader { + scheme: "vapid".to_string(), + token, + version_data: VapidVersionData::Version1, + }, + } + } #[test] fn repad_base64_1_padding() { @@ -368,70 +473,41 @@ mod tests { #[test] fn vapid_aud_valid() { - let priv_key = b64_decode_std( - "MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgZImOgpRszunnU3j1\ - oX5UQiX8KU4X2OdbENuvc/t8wpmhRANCAATN21Y1v8LmQueGpSG6o022gTbbYa4l\ - bXWZXITsjknW1WHmELtouYpyXX7e41FiAMuDvcRwW2Nfehn/taHW/IXb", - ) - .unwrap(); // Specify a potentially invalid padding. let public_key = "BM3bVjW_wuZC54alIbqjTbaBNtthriVtdZlchOyOSdbVYeYQu2i5inJdft7jUWIAy4O9xHBbY196Gf-1odb8hds==".to_owned(); let domain = "https://push.services.mozilla.org"; - let jwk_header = jsonwebtoken::Header::new(jsonwebtoken::Algorithm::ES256); - let enc_key = jsonwebtoken::EncodingKey::from_ec_der(&priv_key); - let claims = VapidClaims { - exp: sec_since_epoch() + super::ONE_DAY_IN_SECONDS - 100, - aud: domain.to_owned(), - sub: "mailto:admin@example.com".to_owned(), + let test_settings = Settings { + endpoint_url: domain.to_owned(), + ..Default::default() }; - let token = jsonwebtoken::encode(&jwk_header, &claims, &enc_key).unwrap(); - let header = VapidHeaderWithKey { + let header = make_vapid( + "mailto:admin@example.com", + domain, + VapidClaims::default_exp() - 100, public_key, - vapid: VapidHeader { - scheme: "vapid".to_string(), - token, - version_data: VapidVersionData::Version1, - }, - }; - let result = validate_vapid_jwt(&header, &Url::from_str(domain).unwrap(), &Metrics::noop()); + ); + let result = validate_vapid_jwt(&header, &test_settings, &Metrics::noop()); assert!(result.is_ok()); } #[test] fn vapid_aud_invalid() { - let priv_key = b64_decode_std( - "MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgZImOgpRszunnU3j1\ - oX5UQiX8KU4X2OdbENuvc/t8wpmhRANCAATN21Y1v8LmQueGpSG6o022gTbbYa4l\ - bXWZXITsjknW1WHmELtouYpyXX7e41FiAMuDvcRwW2Nfehn/taHW/IXb", - ) - .unwrap(); - let public_key = "BM3bVjW_wuZC54alIbqjTbaBNtthriVtdZlchOyOSdbVYeYQu2i5inJdft7jUWIAy4O9xHBbY196Gf-1odb8hds".to_owned(); let domain = "https://push.services.mozilla.org"; - let jwk_header = jsonwebtoken::Header::new(jsonwebtoken::Algorithm::ES256); - let enc_key = jsonwebtoken::EncodingKey::from_ec_der(&priv_key); - let claims = VapidClaims { - exp: sec_since_epoch() + super::ONE_DAY_IN_SECONDS - 100, - aud: domain.to_owned(), - sub: "mailto:admin@example.com".to_owned(), - }; - let token = jsonwebtoken::encode(&jwk_header, &claims, &enc_key).unwrap(); - let header = VapidHeaderWithKey { - public_key, - vapid: VapidHeader { - scheme: "vapid".to_string(), - token, - version_data: VapidVersionData::Version1, - }, + let test_settings = Settings { + endpoint_url: domain.to_owned(), + ..Default::default() }; + let header = make_vapid( + "mailto:admin@example.com", + "https://example.com", + VapidClaims::default_exp() - 100, + PUB_KEY.to_owned(), + ); assert!(matches!( - validate_vapid_jwt( - &header, - &Url::from_str("http://example.org").unwrap(), - &Metrics::noop() - ) - .unwrap_err() - .kind, + validate_vapid_jwt(&header, &test_settings, &Metrics::noop()) + .unwrap_err() + .kind, ApiErrorKind::VapidError(VapidError::InvalidAudience) )); } @@ -445,37 +521,30 @@ mod tests { sub: String, } - let priv_key = b64_decode_std( - "MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgZImOgpRszunnU3j1\ - oX5UQiX8KU4X2OdbENuvc/t8wpmhRANCAATN21Y1v8LmQueGpSG6o022gTbbYa4l\ - bXWZXITsjknW1WHmELtouYpyXX7e41FiAMuDvcRwW2Nfehn/taHW/IXb", - ) - .unwrap(); - let public_key = "BM3bVjW_wuZC54alIbqjTbaBNtthriVtdZlchOyOSdbVYeYQu2i5inJdft7jUWIAy4O9xHBbY196Gf-1odb8hds".to_owned(); let domain = "https://push.services.mozilla.org"; + let test_settings = Settings { + endpoint_url: "domain".to_owned(), + ..Default::default() + }; let jwk_header = jsonwebtoken::Header::new(jsonwebtoken::Algorithm::ES256); - let enc_key = jsonwebtoken::EncodingKey::from_ec_der(&priv_key); + let enc_key = jsonwebtoken::EncodingKey::from_ec_der(&PRIV_KEY); let claims = StrExpVapidClaims { - exp: (sec_since_epoch() + super::ONE_DAY_IN_SECONDS - 100).to_string(), + exp: (VapidClaims::default_exp() - 100).to_string(), aud: domain.to_owned(), sub: "mailto:admin@example.com".to_owned(), }; let token = jsonwebtoken::encode(&jwk_header, &claims, &enc_key).unwrap(); let header = VapidHeaderWithKey { - public_key, + public_key: PUB_KEY.to_owned(), vapid: VapidHeader { scheme: "vapid".to_string(), token, version_data: VapidVersionData::Version1, }, }; - let vv = validate_vapid_jwt( - &header, - &Url::from_str("http://example.org").unwrap(), - &Metrics::noop(), - ) - .unwrap_err() - .kind; + let vv = validate_vapid_jwt(&header, &test_settings, &Metrics::noop()) + .unwrap_err() + .kind; assert!(matches![ vv, ApiErrorKind::VapidError(VapidError::InvalidVapid(_)) @@ -491,20 +560,18 @@ mod tests { sub: String, } - let priv_key = b64_decode_std( - "MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgZImOgpRszunnU3j1\ - oX5UQiX8KU4X2OdbENuvc/t8wpmhRANCAATN21Y1v8LmQueGpSG6o022gTbbYa4l\ - bXWZXITsjknW1WHmELtouYpyXX7e41FiAMuDvcRwW2Nfehn/taHW/IXb", - ) - .unwrap(); // pretty much matches the kind of key we get from some partners. let public_key_standard = "BM3bVjW/wuZC54alIbqjTbaBNtthriVtdZlchOyOSdbVYeYQu2i5inJdft7jUWIAy4O9xHBbY196Gf+1odb8hds=".to_owned(); let public_key_url_safe = "BM3bVjW_wuZC54alIbqjTbaBNtthriVtdZlchOyOSdbVYeYQu2i5inJdft7jUWIAy4O9xHBbY196Gf-1odb8hds=".to_owned(); let domain = "https://push.services.mozilla.org"; + let test_settings = Settings { + endpoint_url: domain.to_owned(), + ..Default::default() + }; let jwk_header = jsonwebtoken::Header::new(jsonwebtoken::Algorithm::ES256); - let enc_key = jsonwebtoken::EncodingKey::from_ec_der(&priv_key); + let enc_key = jsonwebtoken::EncodingKey::from_ec_der(&PRIV_KEY); let claims = VapidClaims { - exp: sec_since_epoch() + super::ONE_DAY_IN_SECONDS - 100, + exp: VapidClaims::default_exp() - 100, aud: domain.to_owned(), sub: "mailto:admin@example.com".to_owned(), }; @@ -518,9 +585,7 @@ mod tests { version_data: VapidVersionData::Version1, }, }; - assert!( - validate_vapid_jwt(&header, &Url::from_str(domain).unwrap(), &Metrics::noop()).is_ok() - ); + assert!(validate_vapid_jwt(&header, &test_settings, &Metrics::noop()).is_ok()); // try standard form with no padding let header = VapidHeaderWithKey { public_key: public_key_standard.trim_end_matches('=').to_owned(), @@ -530,9 +595,7 @@ mod tests { version_data: VapidVersionData::Version1, }, }; - assert!( - validate_vapid_jwt(&header, &Url::from_str(domain).unwrap(), &Metrics::noop()).is_ok() - ); + assert!(validate_vapid_jwt(&header, &test_settings, &Metrics::noop()).is_ok()); // try URL safe form with padding let header = VapidHeaderWithKey { public_key: public_key_url_safe.clone(), @@ -542,9 +605,7 @@ mod tests { version_data: VapidVersionData::Version1, }, }; - assert!( - validate_vapid_jwt(&header, &Url::from_str(domain).unwrap(), &Metrics::noop()).is_ok() - ); + assert!(validate_vapid_jwt(&header, &test_settings, &Metrics::noop()).is_ok()); // try URL safe form without padding let header = VapidHeaderWithKey { public_key: public_key_url_safe.trim_end_matches('=').to_owned(), @@ -554,9 +615,7 @@ mod tests { version_data: VapidVersionData::Version1, }, }; - assert!( - validate_vapid_jwt(&header, &Url::from_str(domain).unwrap(), &Metrics::noop()).is_ok() - ); + assert!(validate_vapid_jwt(&header, &test_settings, &Metrics::noop()).is_ok()); } #[test] @@ -568,40 +627,42 @@ mod tests { sub: Option, } - let priv_key = b64_decode_std( - "MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgZImOgpRszunnU3j1\ - oX5UQiX8KU4X2OdbENuvc/t8wpmhRANCAATN21Y1v8LmQueGpSG6o022gTbbYa4l\ - bXWZXITsjknW1WHmELtouYpyXX7e41FiAMuDvcRwW2Nfehn/taHW/IXb", - ) - .unwrap(); - let public_key = "BM3bVjW_wuZC54alIbqjTbaBNtthriVtdZlchOyOSdbVYeYQu2i5inJdft7jUWIAy4O9xHBbY196Gf-1odb8hds".to_owned(); let domain = "https://push.services.mozilla.org"; + let test_settings = Settings { + endpoint_url: domain.to_owned(), + ..Default::default() + }; let jwk_header = jsonwebtoken::Header::new(jsonwebtoken::Algorithm::ES256); - let enc_key = jsonwebtoken::EncodingKey::from_ec_der(&priv_key); + let enc_key = jsonwebtoken::EncodingKey::from_ec_der(&PRIV_KEY); let claims = NoSubVapidClaims { - exp: sec_since_epoch() + super::ONE_DAY_IN_SECONDS - 100, + exp: VapidClaims::default_exp() - 100, aud: domain.to_owned(), sub: None, }; let token = jsonwebtoken::encode(&jwk_header, &claims, &enc_key).unwrap(); let header = VapidHeaderWithKey { - public_key, + public_key: PUB_KEY.to_owned(), vapid: VapidHeader { scheme: "vapid".to_string(), token, version_data: VapidVersionData::Version1, }, }; - let vv = validate_vapid_jwt( - &header, - &Url::from_str("http://example.org").unwrap(), - &Metrics::noop(), - ) - .unwrap_err() - .kind; + let vv = validate_vapid_jwt(&header, &test_settings, &Metrics::noop()) + .unwrap_err() + .kind; assert!(matches![ vv, ApiErrorKind::VapidError(VapidError::InvalidVapid(_)) ]) } + + #[test] + fn test_crapitalize() { + assert_eq!( + "LabelFieldWithoutData", + term_to_label("LabelField without data") + ); + assert_eq!("UntouchedField", term_to_label("UntouchedField")); + } } diff --git a/autoendpoint/src/extractors/user.rs b/autoendpoint/src/extractors/user.rs index dc5301bea..ecddfedff 100644 --- a/autoendpoint/src/extractors/user.rs +++ b/autoendpoint/src/extractors/user.rs @@ -47,37 +47,14 @@ pub async fn validate_user( } if router_type == RouterType::WebPush { - validate_webpush_user(user, channel_id, app_state.db.as_ref(), &app_state.metrics).await?; + validate_webpush_user(user, channel_id, app_state.db.as_ref()).await?; } Ok(router_type) } /// Make sure the user is not inactive and the subscription channel exists -async fn validate_webpush_user( - user: &User, - channel_id: &Uuid, - db: &dyn DbClient, - metrics: &StatsdClient, -) -> ApiResult<()> { - if let Some(rotating_message_table) = db.rotating_message_table() { - // DynamoDB: Make sure the user is active (has a valid message table) - let Some(ref current_month) = user.current_month else { - debug!("Missing `current_month` value, dropping user"; "user" => ?user); - drop_user(user.uaid, db, metrics).await?; - return Err(ApiErrorKind::NoSubscription.into()); - }; - - if current_month != rotating_message_table { - debug!("User is inactive, dropping user"; - "db.rotating_message_table" => rotating_message_table, - "user.current_month" => current_month, - "user" => ?user); - drop_user(user.uaid, db, metrics).await?; - return Err(ApiErrorKind::NoSubscription.into()); - } - } - +async fn validate_webpush_user(user: &User, channel_id: &Uuid, db: &dyn DbClient) -> ApiResult<()> { // Make sure the subscription channel exists let channel_ids = db.get_channels(&user.uaid).await?; diff --git a/autoendpoint/src/headers/vapid.rs b/autoendpoint/src/headers/vapid.rs index bd98e9280..349e9fc0e 100644 --- a/autoendpoint/src/headers/vapid.rs +++ b/autoendpoint/src/headers/vapid.rs @@ -1,9 +1,76 @@ -use crate::headers::util::split_key_value; use std::collections::HashMap; +use std::fmt; + +use base64::Engine; +use serde::{Deserialize, Serialize}; +use serde_json::Value; use thiserror::Error; +use crate::headers::util::split_key_value; +use autopush_common::util::{sec_since_epoch, ONE_DAY_IN_SECONDS}; + pub const ALLOWED_SCHEMES: [&str; 3] = ["bearer", "webpush", "vapid"]; +/* +The Assertion block for the VAPID header. + +Please note: We require the `sub` claim in addition to the `exp` and `aud`. +See [HTTP Endpoints for Notficiations::Lexicon::{vapid_key}](https://mozilla-services.github.io/autopush-rs/http.html#lexicon-1) +for details. + + */ +#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] +pub struct VapidClaims { + pub exp: u64, + pub aud: String, + pub sub: String, +} + +impl Default for VapidClaims { + fn default() -> Self { + Self { + exp: VapidClaims::default_exp(), + aud: "No audience".to_owned(), + sub: "No sub".to_owned(), + } + } +} + +impl VapidClaims { + pub fn default_exp() -> u64 { + sec_since_epoch() + ONE_DAY_IN_SECONDS + } +} + +impl fmt::Display for VapidClaims { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("VapidClaims") + .field("exp", &self.exp) + .field("aud", &self.aud) + .field("sub", &self.sub) + .finish() + } +} + +impl TryFrom for VapidClaims { + type Error = VapidError; + fn try_from(header: VapidHeader) -> Result { + let b64_str = header + .token + .split('.') + .nth(1) + .ok_or(VapidError::InvalidVapid(header.token.to_string()))?; + let value_str = String::from_utf8( + base64::engine::general_purpose::URL_SAFE_NO_PAD + .decode(b64_str) + .map_err(|e| VapidError::InvalidVapid(e.to_string()))?, + ) + .map_err(|e| VapidError::InvalidVapid(e.to_string()))?; + serde_json::from_str::(&value_str) + .map_err(|e| VapidError::InvalidVapid(e.to_string())) + } +} + /// Parses the VAPID authorization header #[derive(Clone, Debug, Eq, PartialEq)] pub struct VapidHeader { @@ -75,6 +142,33 @@ impl VapidHeader { VapidVersionData::Version2 { .. } => 2, } } + + pub fn sub(&self) -> Result { + let data: HashMap = serde_json::from_str(&self.token).map_err(|e| { + warn!("🔐 Vapid: {:?}", e); + VapidError::SubInvalid + })?; + + if let Some(sub_candiate) = data.get("sub") { + if let Some(sub) = sub_candiate.as_str() { + if !sub.starts_with("mailto:") || !sub.starts_with("https://") { + info!("🔐 Vapid: Bad Format {:?}", sub); + return Err(VapidError::SubBadFormat); + } + if sub.is_empty() { + info!("🔐 Empty Vapid sub"); + return Err(VapidError::SubEmpty); + } + info!("🔐 Vapid: sub: {:?}", sub); + return Ok(sub.to_owned()); + } + } + Err(VapidError::SubMissing) + } + + pub fn claims(&self) -> Result { + VapidClaims::try_from(self.clone()) + } } #[derive(Debug, Error, Eq, PartialEq)] @@ -97,36 +191,72 @@ pub enum VapidError { FutureExpirationToken, #[error("Unknown auth scheme")] UnknownScheme, + #[error("Unparsable sub string")] + SubInvalid, + #[error("Improperly formatted sub string")] + SubBadFormat, + #[error("Empty sub string")] + SubEmpty, + #[error("Missing sub")] + SubMissing, +} + +impl VapidError { + pub fn as_metric(&self) -> &str { + match self { + Self::MissingToken => "missing_token", + Self::InvalidVapid(_) => "invalid_vapid", + Self::MissingKey => "missing_key", + Self::InvalidKey(_) => "invalid_key", + Self::InvalidAudience => "invalid_audience", + Self::InvalidExpiry => "invalid_expiry", + Self::KeyMismatch => "key_mismatch", + Self::FutureExpirationToken => "future_expiration_token", + Self::UnknownScheme => "unknown_scheme", + Self::SubInvalid => "invalid_sub", + Self::SubBadFormat => "bad_format_sub", + Self::SubEmpty => "empty_sub", + Self::SubMissing => "missing_sub", + } + } } #[cfg(test)] mod tests { - use super::{VapidHeader, VapidVersionData}; - - const TOKEN: &str = "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiJ9.eyJhdWQiOiJodHRwc\ - zovL3B1c2guc2VydmljZXMubW96aWxsYS5jb20iLCJzdWIiOiJtYWlsdG86YWRtaW5AZXhh\ - bXBsZS5jb20iLCJleHAiOiIxNDYzMDAxMzQwIn0.y_dvPoTLBo60WwtocJmaTWaNet81_jT\ - TJuyYt2CkxykLqop69pirSWLLRy80no9oTL8SDLXgTaYF1OrTIEkDow"; - const KEY: &str = "BAS7pgV_RFQx5yAwSePfrmjvNm1sDXyMpyDSCL1IXRU32cdtopiAmSys\ - WTCrL_aZg2GE1B_D9v7weQVXC3zDmnQ"; + + use super::{VapidClaims, VapidHeader, VapidVersionData}; + + // This was generated externally using the py_vapid package. const VALID_HEADER: &str = "vapid t=eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiJ9.ey\ - JhdWQiOiJodHRwczovL3B1c2guc2VydmljZXMubW96aWxsYS5jb20iLCJzdWIiOiJtYWlsd\ - G86YWRtaW5AZXhhbXBsZS5jb20iLCJleHAiOiIxNDYzMDAxMzQwIn0.y_dvPoTLBo60Wwto\ - cJmaTWaNet81_jTTJuyYt2CkxykLqop69pirSWLLRy80no9oTL8SDLXgTaYF1OrTIEkDow,\ - k=BAS7pgV_RFQx5yAwSePfrmjvNm1sDXyMpyDSCL1IXRU32cdtopiAmSysWTCrL_aZg2GE1\ - B_D9v7weQVXC3zDmnQ"; + JhdWQiOiJodHRwczovL3B1c2guc2VydmljZXMubW96aWxsYS5jb20iLCJleHAiOjE3MTM1N\ + jQ4NzIsInN1YiI6Im1haWx0bzphZG1pbkBleGFtcGxlLmNvbSJ9.t7uOYm8nbqFkuNpDeln\ + -UeqSC58xu96Mc9tUVifQu1zAAndHYwvMd3-u--PuUo3S_VrqYXEaIlNIOOrGd3iUBA,k=B\ + LMymkOqvT6OZ1o9etCqV4jGPkvOXNz5FdBjsAR9zR5oeCV1x5CBKuSLTlHon-H_boHTzMtM\ + oNHsAGDlDB6X7vI"; #[test] fn parse_succeeds() { + // brain dead header parser. + let mut parts = VALID_HEADER.split(' ').nth(1).unwrap().split(','); + let token = parts.next().unwrap().split('=').nth(1).unwrap().to_string(); + let public_key = parts.next().unwrap().split('=').nth(1).unwrap().to_string(); + + let expected_header = VapidHeader { + scheme: "vapid".to_string(), + token, + version_data: VapidVersionData::Version2 { public_key }, + }; + + let returned_header = VapidHeader::parse(VALID_HEADER); + assert_eq!(returned_header, Ok(expected_header.clone())); + assert_eq!( - VapidHeader::parse(VALID_HEADER), - Ok(VapidHeader { - scheme: "vapid".to_string(), - token: TOKEN.to_string(), - version_data: VapidVersionData::Version2 { - public_key: KEY.to_string() - } + returned_header.unwrap().claims(), + Ok(VapidClaims { + exp: 1713564872, + aud: "https://push.services.mozilla.com".to_string(), + sub: "mailto:admin@example.com".to_string() }) - ); + ) } } diff --git a/autoendpoint/src/routers/adm/client.rs b/autoendpoint/src/routers/adm/client.rs deleted file mode 100644 index 9da510ee7..000000000 --- a/autoendpoint/src/routers/adm/client.rs +++ /dev/null @@ -1,362 +0,0 @@ -use crate::routers::adm::error::AdmError; -use crate::routers::adm::settings::{AdmProfile, AdmSettings}; -use crate::routers::common::message_size_check; -use crate::routers::RouterError; -use autopush_common::util::sec_since_epoch; -use futures::lock::Mutex; -use reqwest::StatusCode; -use serde::Deserialize; -use std::collections::HashMap; -use std::time::Duration; -use url::Url; - -/// Holds profile-specific ADM data and authentication. This client handles -/// sending notifications to ADM. -pub struct AdmClient { - base_url: Url, - profile: AdmProfile, - timeout: Duration, - max_data: usize, - http: reqwest::Client, - token_info: Mutex, -} - -/// Holds information about the cached access token -#[derive(Default)] -struct TokenInfo { - token: String, - expiration_time: u64, -} - -/// A successful OAuth token response -#[derive(Deserialize)] -struct TokenResponse { - access_token: String, - expires_in: u64, -} - -/// A successful send response -#[derive(Deserialize)] -struct AdmSendResponse { - #[serde(rename = "registrationID")] - registration_id: String, -} - -/// An error returned by ADM -#[derive(Deserialize)] -struct AdmResponseError { - reason: Option, -} - -impl AdmClient { - /// Create an `AdmClient` using the provided profile - pub fn new(settings: &AdmSettings, profile: AdmProfile, http: reqwest::Client) -> Self { - AdmClient { - base_url: settings.base_url.clone(), - profile, - timeout: Duration::from_secs(settings.timeout as u64), - max_data: settings.max_data, - http, - // The default TokenInfo has dummy values to trigger a token fetch - token_info: Mutex::default(), - } - } - - /// Get an ADM access token (from cache or request a new one) - async fn get_access_token(&self) -> Result { - let mut token_info = self.token_info.lock().await; - - if token_info.expiration_time > sec_since_epoch() + 60 { - trace!("Using cached access token"); - return Ok(token_info.token.clone()); - } - - trace!("Access token is out of date, requesting a new one"); - let oauth_url = self.base_url.join("auth/O2/token").unwrap(); - let response = self - .http - .post(oauth_url) - .form(&serde_json::json!({ - "grant_type": "client_credentials", - "scope": "messaging:push", - "client_id": &self.profile.client_id, - "client_secret": &self.profile.client_secret, - })) - .send() - .await - .map_err(AdmError::Http)?; - trace!("response = {:?}", response); - - if response.status() != 200 { - let status = response.status(); - let error_response: AdmResponseError = response - .json() - .await - .map_err(AdmError::DeserializeResponse)?; - return Err(RouterError::Upstream { - status: status.to_string(), - message: error_response - .reason - .unwrap_or_else(|| "Unknown reason".to_string()), - }); - } - - let token_response: TokenResponse = response - .json() - .await - .map_err(AdmError::DeserializeResponse)?; - token_info.token = token_response.access_token; - token_info.expiration_time = sec_since_epoch() + token_response.expires_in; - - Ok(token_info.token.clone()) - } - - /// Send the message data to ADM. The device's current registration ID is - /// returned. If it is different than the current stored ID, the stored ID - /// should be updated. - pub async fn send( - &self, - data: HashMap<&'static str, String>, - registration_id: String, - ttl: usize, - ) -> Result { - // Build the ADM message - let message = serde_json::json!({ - "data": data, - "expiresAfter": ttl, - }); - let message_json = message.to_string(); - message_size_check(message_json.as_bytes(), self.max_data)?; - - // Prepare request data - let access_token = self.get_access_token().await?; - let url = self - .base_url - .join(&format!( - "messaging/registrations/{registration_id}/messages" - )) - .map_err(AdmError::ParseUrl)?; - - // Make the request - let response = self - .http - .post(url) - .header("Authorization", format!("Bearer {}", access_token.as_str())) - .header("Content-Type", "application/json") - .header("Accept", "application/json") - .header( - "X-Amzn-Type-Version", - "com.amazon.device.messaging.ADMMessage@1.0", - ) - .header( - "X-Amzn-Accept-Type", - "com.amazon.device.messaging.ADMSendResult@1.0", - ) - .body(message_json) - .timeout(self.timeout) - .send() - .await - .map_err(|e| { - if e.is_timeout() { - RouterError::RequestTimeout - } else { - RouterError::Connect(e) - } - })?; - - // Handle error - let status = response.status(); - if status != 200 { - let response_error: AdmResponseError = response - .json() - .await - .map_err(AdmError::DeserializeResponse)?; - - return Err(match (status, response_error.reason) { - (StatusCode::UNAUTHORIZED, _) => RouterError::Authentication, - (StatusCode::NOT_FOUND, _) => RouterError::NotFound, - (status, reason) => RouterError::Upstream { - status: status.to_string(), - message: reason.unwrap_or_else(|| "Unknown reason".to_string()), - }, - }); - } - - let send_response: AdmSendResponse = response - .json() - .await - .map_err(AdmError::DeserializeResponse)?; - Ok(send_response.registration_id) - } -} - -#[cfg(test)] -pub mod tests { - use crate::routers::adm::client::AdmClient; - use crate::routers::adm::settings::{AdmProfile, AdmSettings}; - use crate::routers::RouterError; - use std::collections::HashMap; - use url::Url; - - pub const REGISTRATION_ID: &str = "test-registration-id"; - pub const CLIENT_ID: &str = "test-client-id"; - pub const CLIENT_SECRET: &str = "test-client-secret"; - const ACCESS_TOKEN: &str = "test-access-token"; - - /// Mock the OAuth token endpoint to provide the access token - pub fn mock_token_endpoint() -> mockito::Mock { - mockito::mock("POST", "/auth/O2/token") - .with_body( - serde_json::json!({ - "access_token": ACCESS_TOKEN, - "expires_in": 3600, - "scope": "messaging:push", - "token_type": "Bearer" - }) - .to_string(), - ) - .create() - } - - /// Start building a mock for the ADM endpoint - pub fn mock_adm_endpoint_builder() -> mockito::Mock { - mockito::mock( - "POST", - format!("/messaging/registrations/{REGISTRATION_ID}/messages").as_str(), - ) - } - - /// Make an AdmClient which uses the mock server - fn make_client() -> AdmClient { - AdmClient::new( - &AdmSettings { - base_url: Url::parse(&mockito::server_url()).unwrap(), - ..Default::default() - }, - AdmProfile { - client_id: CLIENT_ID.to_string(), - client_secret: CLIENT_SECRET.to_string(), - }, - reqwest::Client::new(), - ) - } - - /// The ADM client uses the access token and parameters to build the - /// expected request. - #[tokio::test] - async fn sends_correct_request() { - let client = make_client(); - let _token_mock = mock_token_endpoint(); - let adm_mock = mock_adm_endpoint_builder() - .match_header("Authorization", format!("Bearer {ACCESS_TOKEN}").as_str()) - .match_header("Content-Type", "application/json") - .match_header("Accept", "application/json") - .match_header( - "X-Amzn-Type-Version", - "com.amazon.device.messaging.ADMMessage@1.0", - ) - .match_header( - "X-Amzn-Accept-Type", - "com.amazon.device.messaging.ADMSendResult@1.0", - ) - .match_body(r#"{"data":{"is_test":"true"},"expiresAfter":42}"#) - .with_body(r#"{"registrationID":"test-registration-id2"}"#) - .create(); - - let mut data = HashMap::new(); - data.insert("is_test", "true".to_string()); - - let result = client.send(data, REGISTRATION_ID.to_string(), 42).await; - assert!(result.is_ok(), "result = {result:?}"); - assert_eq!(result.unwrap(), "test-registration-id2"); - adm_mock.assert(); - } - - /// Authorization errors are handled - #[tokio::test] - async fn unauthorized() { - let client = make_client(); - let _token_mock = mock_token_endpoint(); - let _adm_mock = mock_adm_endpoint_builder() - .with_status(401) - .with_body(r#"{"reason":"test-message"}"#) - .create(); - - let result = client - .send(HashMap::new(), REGISTRATION_ID.to_string(), 42) - .await; - assert!(result.is_err()); - assert!( - matches!(result.as_ref().unwrap_err(), RouterError::Authentication), - "result = {result:?}" - ); - } - - /// 404 errors are handled - #[tokio::test] - async fn not_found() { - let client = make_client(); - let _token_mock = mock_token_endpoint(); - let _adm_mock = mock_adm_endpoint_builder() - .with_status(404) - .with_body(r#"{"reason":"test-message"}"#) - .create(); - - let result = client - .send(HashMap::new(), REGISTRATION_ID.to_string(), 42) - .await; - assert!(result.is_err()); - assert!( - matches!(result.as_ref().unwrap_err(), RouterError::NotFound), - "result = {result:?}" - ); - } - - /// Unhandled errors (where a reason is returned) are wrapped and returned - #[tokio::test] - async fn other_adm_error() { - let client = make_client(); - let _token_mock = mock_token_endpoint(); - let _fcm_mock = mock_adm_endpoint_builder() - .with_status(400) - .with_body(r#"{"reason":"test-message"}"#) - .create(); - - let result = client - .send(HashMap::new(), REGISTRATION_ID.to_string(), 42) - .await; - assert!(result.is_err()); - assert!( - matches!( - result.as_ref().unwrap_err(), - RouterError::Upstream { status, message } - if status == "400 Bad Request" && message == "test-message" - ), - "result = {result:?}" - ); - } - - /// Unknown errors (where a reason is NOT returned) are handled - #[tokio::test] - async fn unknown_adm_error() { - let client = make_client(); - let _token_mock = mock_token_endpoint(); - let _fcm_mock = mock_adm_endpoint_builder() - .with_status(400) - .with_body("{}") - .create(); - - let result = client - .send(HashMap::new(), REGISTRATION_ID.to_string(), 42) - .await; - assert!(result.is_err()); - assert!( - matches!( - result.as_ref().unwrap_err(), - RouterError::Upstream { status, message } - if status == "400 Bad Request" && message == "Unknown reason" - ), - "result = {result:?}" - ); - } -} diff --git a/autoendpoint/src/routers/adm/error.rs b/autoendpoint/src/routers/adm/error.rs deleted file mode 100644 index 7db3fd096..000000000 --- a/autoendpoint/src/routers/adm/error.rs +++ /dev/null @@ -1,67 +0,0 @@ -use crate::error::ApiErrorKind; -use crate::routers::RouterError; -use actix_web::http::StatusCode; - -/// Errors that may occur in the Amazon Device Messaging router -#[derive(thiserror::Error, Debug)] -pub enum AdmError { - #[error("HTTP error: {0}")] - Http(#[from] reqwest::Error), - - #[error("Error while building a URL: {0}")] - ParseUrl(#[from] url::ParseError), - - #[error("Failed to decode the profile settings")] - ProfileSettingsDecode(#[from] serde_json::Error), - - #[error("Unable to deserialize ADM response")] - DeserializeResponse(#[source] reqwest::Error), - - #[error("No registration ID found for user")] - NoRegistrationId, - - #[error("No ADM profile found for user")] - NoProfile, - - #[error("User has invalid ADM profile")] - InvalidProfile, -} - -impl AdmError { - /// Get the associated HTTP status code - pub fn status(&self) -> StatusCode { - match self { - AdmError::ParseUrl(_) => StatusCode::BAD_REQUEST, - - AdmError::NoRegistrationId | AdmError::NoProfile | AdmError::InvalidProfile => { - StatusCode::GONE - } - - AdmError::Http(_) | AdmError::ProfileSettingsDecode(_) => { - StatusCode::INTERNAL_SERVER_ERROR - } - - AdmError::DeserializeResponse(_) => StatusCode::BAD_GATEWAY, - } - } - - /// Get the associated error number - pub fn errno(&self) -> Option { - match self { - AdmError::NoRegistrationId | AdmError::NoProfile | AdmError::InvalidProfile => { - Some(106) - } - - AdmError::Http(_) - | AdmError::ParseUrl(_) - | AdmError::ProfileSettingsDecode(_) - | AdmError::DeserializeResponse(_) => None, - } - } -} - -impl From for ApiErrorKind { - fn from(e: AdmError) -> Self { - ApiErrorKind::Router(RouterError::Adm(e)) - } -} diff --git a/autoendpoint/src/routers/adm/mod.rs b/autoendpoint/src/routers/adm/mod.rs deleted file mode 100644 index 51e0945b0..000000000 --- a/autoendpoint/src/routers/adm/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -//! A notification router for Amazon devices, using Amazon Device Messaging - -pub mod client; -pub mod error; -pub mod router; -pub mod settings; diff --git a/autoendpoint/src/routers/adm/router.rs b/autoendpoint/src/routers/adm/router.rs deleted file mode 100644 index 3c3431bb9..000000000 --- a/autoendpoint/src/routers/adm/router.rs +++ /dev/null @@ -1,362 +0,0 @@ -use autopush_common::db::client::DbClient; - -use crate::error::{ApiErrorKind, ApiResult}; -use crate::extractors::notification::Notification; -use crate::extractors::router_data_input::RouterDataInput; -use crate::routers::adm::client::AdmClient; -use crate::routers::adm::error::AdmError; -use crate::routers::adm::settings::AdmSettings; -use crate::routers::common::{build_message_data, handle_error, incr_success_metrics}; -use crate::routers::{Router, RouterError, RouterResponse}; -use async_trait::async_trait; -use cadence::StatsdClient; -use serde_json::Value; -use std::collections::HashMap; -use std::sync::Arc; -use url::Url; - -/// 31 days, specified by ADM -const MAX_TTL: usize = 2419200; - -/// Amazon Device Messaging router -pub struct AdmRouter { - settings: AdmSettings, - endpoint_url: Url, - metrics: Arc, - db: Box, - /// A map from profile name to an authenticated ADM client - clients: HashMap, -} - -impl AdmRouter { - /// Create a new `AdmRouter` - pub fn new( - settings: AdmSettings, - endpoint_url: Url, - http: reqwest::Client, - metrics: Arc, - db: Box, - ) -> Result { - let profiles = settings.profiles()?; - - let clients: HashMap = profiles - .into_iter() - .map(|(name, profile)| (name, AdmClient::new(&settings, profile, http.clone()))) - .collect(); - trace!("Initialized {} ADM clients", clients.len()); - - Ok(Self { - settings, - endpoint_url, - metrics, - db, - clients, - }) - } - - /// if we have any clients defined, this connection is "active" - pub fn active(&self) -> bool { - !self.clients.is_empty() - } -} - -#[async_trait(?Send)] -impl Router for AdmRouter { - fn register( - &self, - router_input: &RouterDataInput, - app_id: &str, - ) -> Result, RouterError> { - if !self.clients.contains_key(app_id) { - return Err(AdmError::InvalidProfile.into()); - } - - let mut router_data = HashMap::new(); - router_data.insert( - "token".to_string(), - serde_json::to_value(&router_input.token).unwrap(), - ); - router_data.insert( - "creds".to_string(), - serde_json::json!({ "profile": app_id }), - ); - - Ok(router_data) - } - - async fn route_notification(&self, notification: &Notification) -> ApiResult { - debug!( - "Sending ADM notification to UAID {}", - notification.subscription.user.uaid - ); - trace!("Notification = {:?}", notification); - - let router_data = notification - .subscription - .user - .router_data - .as_ref() - .ok_or(AdmError::NoRegistrationId)?; - let registration_id = router_data - .get("token") - .and_then(Value::as_str) - .ok_or(AdmError::NoRegistrationId)?; - let profile = router_data - .get("creds") - .and_then(Value::as_object) - .and_then(|obj| obj.get("profile")) - .and_then(Value::as_str) - .ok_or(AdmError::NoProfile)?; - let ttl = MAX_TTL.min(self.settings.min_ttl.max(notification.headers.ttl as usize)); - let message_data = build_message_data(notification)?; - - // Send the notification to ADM - let client = self.clients.get(profile).ok_or(AdmError::InvalidProfile)?; - trace!("Sending message to ADM: {:?}", message_data); - let new_registration_id = match client - .send(message_data, registration_id.to_string(), ttl) - .await - { - Ok(id) => id, - Err(e) => { - return Err(handle_error( - e, - &self.metrics, - self.db.as_ref(), - "adm", - profile, - notification.subscription.user.uaid, - ) - .await) - } - }; - - // Sent successfully, update metrics and make response - trace!("ADM request was successful"); - incr_success_metrics(&self.metrics, "adm", profile, notification); - - // If the returned registration ID is different than the old one, update - // the user. - if new_registration_id != registration_id { - trace!("ADM reports a new registration ID for user, updating our copy"); - let mut user = notification.subscription.user.clone(); - let mut router_data = router_data.clone(); - - router_data.insert( - "token".to_string(), - serde_json::to_value(&new_registration_id).unwrap(), - ); - user.router_data = Some(router_data); - - if !self.db.update_user(&mut user).await? { - // Unlikely to occur on mobile records - return Err(ApiErrorKind::General("Conditional update failed".to_owned()).into()); - } - } - - Ok(RouterResponse::success( - self.endpoint_url - .join(&format!("/m/{}", notification.message_id)) - .expect("Message ID is not URL-safe") - .to_string(), - notification.headers.ttl as usize, - )) - } -} - -#[cfg(test)] -mod tests { - use crate::error::ApiErrorKind; - use crate::extractors::routers::RouterType; - use crate::routers::adm::client::tests::{ - mock_adm_endpoint_builder, mock_token_endpoint, CLIENT_ID, CLIENT_SECRET, REGISTRATION_ID, - }; - use crate::routers::adm::error::AdmError; - use crate::routers::adm::router::AdmRouter; - use crate::routers::adm::settings::AdmSettings; - use crate::routers::common::tests::{make_notification, CHANNEL_ID}; - use crate::routers::RouterError; - use crate::routers::{Router, RouterResponse}; - use autopush_common::db::{client::DbClient, mock::MockDbClient, User}; - use cadence::StatsdClient; - use mockall::predicate; - use serde_json::Value; - use std::collections::HashMap; - use std::sync::Arc; - use url::Url; - - /// Create a router for testing - fn make_router(db: Box) -> AdmRouter { - AdmRouter::new( - AdmSettings { - base_url: Url::parse(&mockito::server_url()).unwrap(), - profiles: format!( - r#"{{ "dev": {{ "client_id": "{CLIENT_ID}", "client_secret": "{CLIENT_SECRET}" }} }}"# - ), - ..Default::default() - }, - Url::parse("http://localhost:8080/").unwrap(), - reqwest::Client::new(), - Arc::new(StatsdClient::from_sink("autopush", cadence::NopMetricSink)), - db, - ) - .unwrap() - } - - /// Create default user router data - fn default_router_data() -> HashMap { - let mut map = HashMap::new(); - map.insert( - "token".to_string(), - serde_json::to_value(REGISTRATION_ID).unwrap(), - ); - map.insert("creds".to_string(), serde_json::json!({ "profile": "dev" })); - map - } - - /// A notification with no data is sent to ADM - #[tokio::test] - async fn successful_routing_no_data() { - let db = MockDbClient::new().into_boxed_arc(); - let router = make_router(db); - let _token_mock = mock_token_endpoint(); - let adm_mock = mock_adm_endpoint_builder() - .match_body( - serde_json::json!({ - "data": { "chid": CHANNEL_ID }, - "expiresAfter": 60 - }) - .to_string() - .as_str(), - ) - .with_body(r#"{"registrationID":"test-registration-id"}"#) - .create(); - let notification = make_notification(default_router_data(), None, RouterType::ADM); - - let result = router.route_notification(¬ification).await; - assert!(result.is_ok(), "result = {result:?}"); - assert_eq!( - result.unwrap(), - RouterResponse::success("http://localhost:8080/m/test-message-id".to_string(), 0) - ); - adm_mock.assert(); - } - - /// A notification with data is sent to ADM - #[tokio::test] - async fn successful_routing_with_data() { - let db = MockDbClient::new().into_boxed_arc(); - let router = make_router(db); - let _token_mock = mock_token_endpoint(); - let adm_mock = mock_adm_endpoint_builder() - .match_body( - serde_json::json!({ - "data": { - "chid": CHANNEL_ID, - "body": "test-data", - "con": "test-encoding", - "enc": "test-encryption", - "cryptokey": "test-crypto-key", - "enckey": "test-encryption-key" - }, - "expiresAfter": 60 - }) - .to_string() - .as_str(), - ) - .with_body(r#"{"registrationID":"test-registration-id"}"#) - .create(); - let data = "test-data".to_string(); - let notification = make_notification(default_router_data(), Some(data), RouterType::ADM); - - let result = router.route_notification(¬ification).await; - assert!(result.is_ok(), "result = {result:?}"); - assert_eq!( - result.unwrap(), - RouterResponse::success("http://localhost:8080/m/test-message-id".to_string(), 0) - ); - adm_mock.assert(); - } - - /// If there is no client for the user's profile, an error is returned and - /// the ADM request is not sent. - #[tokio::test] - async fn missing_client() { - let db = MockDbClient::new().into_boxed_arc(); - let router = make_router(db); - let _token_mock = mock_token_endpoint(); - let adm_mock = mock_adm_endpoint_builder().expect(0).create(); - let mut router_data = default_router_data(); - router_data.insert( - "creds".to_string(), - serde_json::json!({ "profile": "unknown-profile" }), - ); - let notification = make_notification(router_data, None, RouterType::ADM); - - let result = router.route_notification(¬ification).await; - assert!(result.is_err()); - assert!( - matches!( - result.as_ref().unwrap_err().kind, - ApiErrorKind::Router(RouterError::Adm(AdmError::InvalidProfile)) - ), - "result = {result:?}" - ); - adm_mock.assert(); - } - - /// If the ADM user no longer exists (404), we drop the user from our database - #[tokio::test] - async fn no_adm_user() { - let notification = make_notification(default_router_data(), None, RouterType::ADM); - let mut db = MockDbClient::new(); - db.expect_remove_user() - .with(predicate::eq(notification.subscription.user.uaid)) - .times(1) - .return_once(|_| Ok(())); - - let router = make_router(db.into_boxed_arc()); - let _token_mock = mock_token_endpoint(); - let _adm_mock = mock_adm_endpoint_builder() - .with_status(404) - .with_body(r#"{"reason":"test-message"}"#) - .create(); - - let result = router.route_notification(¬ification).await; - assert!(result.is_err()); - assert!( - matches!( - result.as_ref().unwrap_err().kind, - ApiErrorKind::Router(RouterError::NotFound) - ), - "result = {result:?}" - ); - } - - /// If ADM returns a new registration token, update our copy to match - #[tokio::test] - async fn update_registration_token() { - let notification = make_notification(default_router_data(), None, RouterType::ADM); - let mut db = MockDbClient::new(); - db.expect_update_user() - .withf(|user: &User| { - user.router_data - .as_ref() - .unwrap() - .get("token") - .and_then(Value::as_str) - == Some("test-registration-id2") - }) - .times(1) - .return_once(|_| Ok(true)); - - let router = make_router(db.into_boxed_arc()); - let _token_mock = mock_token_endpoint(); - let _adm_mock = mock_adm_endpoint_builder() - .with_body(r#"{"registrationID":"test-registration-id2"}"#) - .create(); - - let result = router.route_notification(¬ification).await; - assert!(result.is_ok()); - } -} diff --git a/autoendpoint/src/routers/adm/settings.rs b/autoendpoint/src/routers/adm/settings.rs deleted file mode 100644 index dbbd2e412..000000000 --- a/autoendpoint/src/routers/adm/settings.rs +++ /dev/null @@ -1,48 +0,0 @@ -use std::collections::HashMap; -use url::Url; - -/// Settings for `AdmRouter` -#[derive(Clone, Debug, serde::Deserialize)] -#[serde(default)] -#[serde(deny_unknown_fields)] -pub struct AdmSettings { - /// A JSON dict of `AdmProfile`s. This must be a `String` because - /// environment variables cannot encode a `HashMap` - pub profiles: String, - /// The max size of notification data in bytes - pub max_data: usize, - /// The base URL to use for ADM requests - pub base_url: Url, - /// The number of seconds to wait for ADM requests to complete - pub timeout: usize, - /// The minimum TTL to use for ADM notifications - pub min_ttl: usize, -} - -/// Settings for a specific ADM profile -#[derive(Clone, Debug, Default, serde::Deserialize)] -#[serde(default)] -#[serde(deny_unknown_fields)] -pub struct AdmProfile { - pub client_id: String, - pub client_secret: String, -} - -impl Default for AdmSettings { - fn default() -> Self { - Self { - profiles: "{}".to_string(), - max_data: 6000, - base_url: Url::parse("https://api.amazon.com").unwrap(), - timeout: 3, - min_ttl: 60, - } - } -} - -impl AdmSettings { - /// Read the profiles from the JSON string - pub fn profiles(&self) -> serde_json::Result> { - serde_json::from_str(&self.profiles) - } -} diff --git a/autoendpoint/src/routers/apns/error.rs b/autoendpoint/src/routers/apns/error.rs index 1aa526f3f..57b6a2eab 100644 --- a/autoendpoint/src/routers/apns/error.rs +++ b/autoendpoint/src/routers/apns/error.rs @@ -1,6 +1,7 @@ use crate::error::ApiErrorKind; use crate::routers::RouterError; use actix_web::http::StatusCode; +use autopush_common::errors::ReportableError; use std::io; /// Errors that may occur in the Apple Push Notification Service router @@ -86,3 +87,24 @@ impl From for ApiErrorKind { ApiErrorKind::Router(RouterError::Apns(e)) } } + +impl ReportableError for ApnsError { + fn is_sentry_event(&self) -> bool { + !matches!(self, ApnsError::SizeLimit(_) | ApnsError::Unregistered) + } + + fn metric_label(&self) -> Option<&'static str> { + match &self { + ApnsError::SizeLimit(_) => Some("notification.bridge.error.apns.oversized"), + ApnsError::ApnsUpstream(_) => Some("notification.bridge.error.apns.upstream"), + _ => None, + } + } + + fn extras(&self) -> Vec<(&str, String)> { + match self { + ApnsError::ApnsUpstream(e) => vec![("error", e.to_string())], + _ => vec![], + } + } +} diff --git a/autoendpoint/src/routers/apns/router.rs b/autoendpoint/src/routers/apns/router.rs index c405685c7..48c497a43 100644 --- a/autoendpoint/src/routers/apns/router.rs +++ b/autoendpoint/src/routers/apns/router.rs @@ -153,9 +153,19 @@ impl ApnsRouter { } else { settings.key.as_bytes().to_vec() }; + // Timeouts defined in ApnsSettings settings.rs config and can be modified. + // We define them to prevent possible a2 library changes that could + // create unexpected behavior if timeouts are altered. + // They currently map to values matching the detaults in the a2 lib v0.10. + let apns_settings = ApnsSettings::default(); + let config = a2::ClientConfig { + endpoint, + request_timeout_secs: apns_settings.request_timeout_secs, + pool_idle_timeout_secs: apns_settings.pool_idle_timeout_secs, + }; let client = ApnsClientData { client: Box::new( - a2::Client::certificate_parts(&cert, &key, endpoint) + a2::Client::certificate_parts(&cert, &key, config) .map_err(ApnsError::ApnsClient)?, ), topic: settings @@ -226,21 +236,6 @@ impl ApnsRouter { ApiError::from(ApnsError::ApnsUpstream(error)) } - /// Convert all of the floats in a JSON value into integers. DynamoDB - /// returns all numbers as floats, but deserializing to `APS` will fail if - /// it expects an integer and gets a float. - fn convert_value_float_to_int(value: &mut Value) { - if let Some(float) = value.as_f64() { - *value = Value::Number(serde_json::Number::from(float as i64)); - } - - if let Some(object) = value.as_object_mut() { - object - .values_mut() - .for_each(Self::convert_value_float_to_int); - } - } - /// if we have any clients defined, this connection is "active" pub fn active(&self) -> bool { !self.clients.is_empty() @@ -266,7 +261,7 @@ impl ApnsRouter { // still needed or used. (I want to get rid of this.) if let Some(v) = replacement.get("title") { if let Some(v) = v.as_str() { - holder.title = v.to_owned(); + v.clone_into(&mut holder.title); aps = aps.set_title(&holder.title); } else { return Err(ApnsError::InvalidApsData); @@ -274,7 +269,7 @@ impl ApnsRouter { } if let Some(v) = replacement.get("subtitle") { if let Some(v) = v.as_str() { - holder.subtitle = v.to_owned(); + v.clone_into(&mut holder.subtitle); aps = aps.set_subtitle(&holder.subtitle); } else { return Err(ApnsError::InvalidApsData); @@ -282,7 +277,7 @@ impl ApnsRouter { } if let Some(v) = replacement.get("body") { if let Some(v) = v.as_str() { - holder.body = v.to_owned(); + v.clone_into(&mut holder.body); aps = aps.set_body(&holder.body); } else { return Err(ApnsError::InvalidApsData); @@ -290,7 +285,7 @@ impl ApnsRouter { } if let Some(v) = replacement.get("title_loc_key") { if let Some(v) = v.as_str() { - holder.title_loc_key = v.to_owned(); + v.clone_into(&mut holder.title_loc_key); aps = aps.set_title_loc_key(&holder.title_loc_key); } else { return Err(ApnsError::InvalidApsData); @@ -314,7 +309,7 @@ impl ApnsRouter { } if let Some(v) = replacement.get("action_loc_key") { if let Some(v) = v.as_str() { - holder.action_loc_key = v.to_owned(); + v.clone_into(&mut holder.action_loc_key); aps = aps.set_action_loc_key(&holder.action_loc_key); } else { return Err(ApnsError::InvalidApsData); @@ -322,7 +317,7 @@ impl ApnsRouter { } if let Some(v) = replacement.get("loc_key") { if let Some(v) = v.as_str() { - holder.loc_key = v.to_owned(); + v.clone_into(&mut holder.loc_key); aps = aps.set_loc_key(&holder.loc_key); } else { return Err(ApnsError::InvalidApsData); @@ -346,7 +341,7 @@ impl ApnsRouter { } if let Some(v) = replacement.get("launch_image") { if let Some(v) = v.as_str() { - holder.launch_image = v.to_owned(); + v.clone_into(&mut holder.launch_image); aps = aps.set_launch_image(&holder.launch_image); } else { return Err(ApnsError::InvalidApsData); @@ -424,13 +419,7 @@ impl Router for ApnsRouter { .get("rel_channel") .and_then(Value::as_str) .ok_or(ApnsError::NoReleaseChannel)?; - // XXX: We don't really use anything that is a numeric here, aside from - // mutable contant, and even there we should just check for presense. - // Once we're off of DynamoDB, we might want to kill the map. - let aps_json = router_data.get("aps").cloned().map(|mut value| { - Self::convert_value_float_to_int(&mut value); - value - }); + let aps_json = router_data.get("aps").cloned(); let mut message_data = build_message_data(notification)?; message_data.insert("ver", notification.message_id.clone()); @@ -461,6 +450,7 @@ impl Router for ApnsRouter { apns_topic: Some(topic), apns_collapse_id: None, apns_expiration: Some(notification.timestamp + notification.headers.ttl as u64), + ..Default::default() }, ); payload.data = message_data @@ -612,7 +602,8 @@ mod tests { Ok(apns_success_response()) }); - let db = MockDbClient::new().into_boxed_arc(); + let mdb = MockDbClient::new(); + let db = mdb.into_boxed_arc(); let router = make_router(client, db); let notification = make_notification(default_router_data(), None, RouterType::APNS); @@ -649,7 +640,8 @@ mod tests { Ok(apns_success_response()) }); - let db = MockDbClient::new().into_boxed_arc(); + let mdb = MockDbClient::new(); + let db = mdb.into_boxed_arc(); let router = make_router(client, db); let data = "test-data".to_string(); let notification = make_notification(default_router_data(), Some(data), RouterType::APNS); diff --git a/autoendpoint/src/routers/apns/settings.rs b/autoendpoint/src/routers/apns/settings.rs index b03efbd1c..c072ba870 100644 --- a/autoendpoint/src/routers/apns/settings.rs +++ b/autoendpoint/src/routers/apns/settings.rs @@ -10,6 +10,11 @@ pub struct ApnsSettings { pub channels: String, /// The max size of notification data in bytes pub max_data: usize, + // These values correspond to the a2 library ClientConfig struct. + // https://github.com/WalletConnect/a2/blob/master/src/client.rs#L65-L71. + // Utilized by apns router config in creating the client. + pub request_timeout_secs: Option, + pub pool_idle_timeout_secs: Option, } /// Settings for a specific APNS release channel @@ -27,10 +32,12 @@ pub struct ApnsChannel { } impl Default for ApnsSettings { - fn default() -> Self { - Self { + fn default() -> ApnsSettings { + ApnsSettings { channels: "{}".to_string(), max_data: 4096, + request_timeout_secs: Some(20), + pool_idle_timeout_secs: Some(600), } } } diff --git a/autoendpoint/src/routers/common.rs b/autoendpoint/src/routers/common.rs index f3ac70cf3..e09ffd373 100644 --- a/autoendpoint/src/routers/common.rs +++ b/autoendpoint/src/routers/common.rs @@ -1,5 +1,6 @@ use crate::error::{ApiError, ApiResult}; use crate::extractors::notification::Notification; +use crate::headers::vapid::VapidHeaderWithKey; use crate::routers::RouterError; use actix_web::http::StatusCode; use autopush_common::db::client::DbClient; @@ -44,6 +45,7 @@ pub async fn handle_error( platform: &str, app_id: &str, uaid: Uuid, + vapid: Option, ) -> ApiError { match &error { RouterError::Authentication => { @@ -69,7 +71,8 @@ pub async fn handle_error( ); } RouterError::RequestTimeout => { - warn!("Bridge timeout"); + // Bridge timeouts are common. + info!("Bridge timeout"); incr_error_metric( metrics, platform, @@ -116,6 +119,17 @@ pub async fn handle_error( error.errno(), ); } + RouterError::TooMuchData(_) => { + // Do not log this error since it's fairly common. + incr_error_metric( + metrics, + platform, + app_id, + "too_much_data", + error.status(), + error.errno(), + ); + } _ => { warn!("Unknown error while sending bridge request: {}", error); incr_error_metric( @@ -129,7 +143,14 @@ pub async fn handle_error( } } - ApiError::from(error) + let mut err = ApiError::from(error); + + if let Some(Ok(claims)) = vapid.map(|v| v.vapid.claims()) { + let mut extras = err.extras.unwrap_or_default(); + extras.extend([("sub".to_owned(), claims.sub)]); + err.extras = Some(extras); + }; + err } /// Increment `notification.bridge.error` @@ -207,16 +228,18 @@ pub mod tests { data: Option, router_type: RouterType, ) -> Notification { + let user = User::builder() + .router_data(router_data) + .router_type(router_type.to_string()) + .build() + .unwrap(); Notification { message_id: "test-message-id".to_string(), subscription: Subscription { - user: User { - router_data: Some(router_data), - router_type: router_type.to_string(), - ..Default::default() - }, + user, channel_id: channel_id(), vapid: None, + tracking_id: None, }, headers: NotificationHeaders { ttl: 0, diff --git a/autoendpoint/src/routers/fcm/client.rs b/autoendpoint/src/routers/fcm/client.rs index ca4f883f5..92e49bf64 100644 --- a/autoendpoint/src/routers/fcm/client.rs +++ b/autoendpoint/src/routers/fcm/client.rs @@ -20,7 +20,6 @@ pub struct FcmClient { timeout: Duration, max_data: usize, authenticator: Option, - pub is_gcm: bool, http_client: reqwest::Client, } @@ -77,7 +76,6 @@ impl FcmClient { timeout: Duration::from_secs(settings.timeout as u64), max_data: settings.max_data, authenticator: auth, - is_gcm: server_credential.is_gcm.unwrap_or_default(), http_client: http, }) } @@ -87,7 +85,7 @@ impl FcmClient { &self, data: HashMap<&'static str, String>, routing_token: String, - ttl: usize, + ttl: u64, ) -> Result<(), RouterError> { // Check the payload size. FCM only cares about the `data` field when // checking size. @@ -191,7 +189,7 @@ pub mod tests { pub const GCM_PROJECT_ID: &str = "valid_gcm_access_token"; /// Write service data to a temporary file - pub fn make_service_key() -> String { + pub fn make_service_key(server: &mockito::ServerGuard) -> String { // Taken from the yup-oauth2 tests serde_json::json!({ "type": "service_account", @@ -201,15 +199,16 @@ pub mod tests { "client_email": "yup-test-sa-1@yup-test-243420.iam.gserviceaccount.com", "client_id": "102851967901799660408", "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": mockito::server_url() + "/token", + "token_uri": server.url() + "/token", "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/yup-test-sa-1%40yup-test-243420.iam.gserviceaccount.com" }).to_string() } /// Mock the OAuth token endpoint to provide the access token - pub fn mock_token_endpoint() -> mockito::Mock { - mockito::mock("POST", "/token") + pub async fn mock_token_endpoint(server: &mut mockito::ServerGuard) -> mockito::Mock { + server + .mock("POST", "/token") .with_body( serde_json::json!({ "access_token": ACCESS_TOKEN, @@ -218,19 +217,23 @@ pub mod tests { }) .to_string(), ) - .create() + .create_async() + .await } /// Start building a mock for the FCM endpoint - pub fn mock_fcm_endpoint_builder(id: &str) -> mockito::Mock { - mockito::mock("POST", format!("/v1/projects/{id}/messages:send").as_str()) + pub fn mock_fcm_endpoint_builder(server: &mut mockito::ServerGuard, id: &str) -> mockito::Mock { + server.mock("POST", format!("/v1/projects/{id}/messages:send").as_str()) } /// Make a FcmClient from the service auth data - async fn make_client(credential: FcmServerCredential) -> FcmClient { + async fn make_client( + server: &mockito::ServerGuard, + credential: FcmServerCredential, + ) -> FcmClient { FcmClient::new( &FcmSettings { - base_url: Url::parse(&mockito::server_url()).unwrap(), + base_url: Url::parse(&server.url()).unwrap(), server_credentials: serde_json::json!(credential).to_string(), ..Default::default() }, @@ -245,14 +248,19 @@ pub mod tests { /// expected FCM request. #[tokio::test] async fn sends_correct_fcm_request() { - let client = make_client(FcmServerCredential { - project_id: PROJECT_ID.to_owned(), - is_gcm: None, - server_access_token: make_service_key(), - }) + let mut server = mockito::Server::new_async().await; + + let client = make_client( + &server, + FcmServerCredential { + project_id: PROJECT_ID.to_owned(), + is_gcm: None, + server_access_token: make_service_key(&server), + }, + ) .await; - let _token_mock = mock_token_endpoint(); - let fcm_mock = mock_fcm_endpoint_builder(PROJECT_ID) + let _token_mock = mock_token_endpoint(&mut server).await; + let fcm_mock = mock_fcm_endpoint_builder(&mut server, PROJECT_ID) .match_header("Authorization", format!("Bearer {ACCESS_TOKEN}").as_str()) .match_header("Content-Type", "application/json") .match_body(r#"{"message":{"android":{"data":{"is_test":"true"},"ttl":"42s"},"token":"test-token"}}"#) @@ -269,17 +277,23 @@ pub mod tests { /// Authorization errors are handled #[tokio::test] async fn unauthorized() { - let client = make_client(FcmServerCredential { - project_id: PROJECT_ID.to_owned(), - is_gcm: None, - server_access_token: make_service_key(), - }) + let mut server = mockito::Server::new_async().await; + + let client = make_client( + &server, + FcmServerCredential { + project_id: PROJECT_ID.to_owned(), + is_gcm: None, + server_access_token: make_service_key(&server), + }, + ) .await; - let _token_mock = mock_token_endpoint(); - let _fcm_mock = mock_fcm_endpoint_builder(PROJECT_ID) + let _token_mock = mock_token_endpoint(&mut server).await; + let _fcm_mock = mock_fcm_endpoint_builder(&mut server, PROJECT_ID) .with_status(401) .with_body(r#"{"error":{"status":"UNAUTHENTICATED","message":"test-message"}}"#) - .create(); + .create_async() + .await; let result = client .send(HashMap::new(), "test-token".to_string(), 42) @@ -294,17 +308,23 @@ pub mod tests { /// 404 errors are handled #[tokio::test] async fn not_found() { - let client = make_client(FcmServerCredential { - project_id: PROJECT_ID.to_owned(), - is_gcm: None, - server_access_token: make_service_key(), - }) + let mut server = mockito::Server::new_async().await; + + let client = make_client( + &server, + FcmServerCredential { + project_id: PROJECT_ID.to_owned(), + is_gcm: None, + server_access_token: make_service_key(&server), + }, + ) .await; - let _token_mock = mock_token_endpoint(); - let _fcm_mock = mock_fcm_endpoint_builder(PROJECT_ID) + let _token_mock = mock_token_endpoint(&mut server).await; + let _fcm_mock = mock_fcm_endpoint_builder(&mut server, PROJECT_ID) .with_status(404) .with_body(r#"{"error":{"status":"NOT_FOUND","message":"test-message"}}"#) - .create(); + .create_async() + .await; let result = client .send(HashMap::new(), "test-token".to_string(), 42) @@ -319,17 +339,23 @@ pub mod tests { /// Unhandled errors (where an error object is returned) are wrapped and returned #[tokio::test] async fn other_fcm_error() { - let client = make_client(FcmServerCredential { - project_id: PROJECT_ID.to_owned(), - is_gcm: Some(false), - server_access_token: make_service_key(), - }) + let mut server = mockito::Server::new_async().await; + + let client = make_client( + &server, + FcmServerCredential { + project_id: PROJECT_ID.to_owned(), + is_gcm: Some(false), + server_access_token: make_service_key(&server), + }, + ) .await; - let _token_mock = mock_token_endpoint(); - let _fcm_mock = mock_fcm_endpoint_builder(PROJECT_ID) + let _token_mock = mock_token_endpoint(&mut server).await; + let _fcm_mock = mock_fcm_endpoint_builder(&mut server, PROJECT_ID) .with_status(400) .with_body(r#"{"error":{"status":"TEST_ERROR","message":"test-message"}}"#) - .create(); + .create_async() + .await; let result = client .send(HashMap::new(), "test-token".to_string(), 42) @@ -348,17 +374,23 @@ pub mod tests { /// Unknown errors (where an error object is NOT returned) is handled #[tokio::test] async fn unknown_fcm_error() { - let client = make_client(FcmServerCredential { - project_id: PROJECT_ID.to_owned(), - is_gcm: Some(true), - server_access_token: make_service_key(), - }) + let mut server = mockito::Server::new_async().await; + + let client = make_client( + &server, + FcmServerCredential { + project_id: PROJECT_ID.to_owned(), + is_gcm: Some(true), + server_access_token: make_service_key(&server), + }, + ) .await; - let _token_mock = mock_token_endpoint(); - let _fcm_mock = mock_fcm_endpoint_builder(PROJECT_ID) + let _token_mock = mock_token_endpoint(&mut server).await; + let _fcm_mock = mock_fcm_endpoint_builder(&mut server, PROJECT_ID) .with_status(400) .with_body("{}") - .create(); + .create_async() + .await; let result = client .send(HashMap::new(), "test-token".to_string(), 42) diff --git a/autoendpoint/src/routers/fcm/error.rs b/autoendpoint/src/routers/fcm/error.rs index 7de571042..57211fed4 100644 --- a/autoendpoint/src/routers/fcm/error.rs +++ b/autoendpoint/src/routers/fcm/error.rs @@ -1,5 +1,7 @@ use crate::error::ApiErrorKind; use crate::routers::RouterError; + +use autopush_common::errors::ReportableError; use reqwest::StatusCode; /// Errors that may occur in the Firebase Cloud Messaging router @@ -71,9 +73,33 @@ impl FcmError { | FcmError::NoOAuthToken => None, } } +} - pub fn extras(&self) -> Vec<(&str, String)> { +impl From for ApiErrorKind { + fn from(e: FcmError) -> Self { + ApiErrorKind::Router(RouterError::Fcm(e)) + } +} + +impl ReportableError for FcmError { + fn is_sentry_event(&self) -> bool { + matches!(&self, FcmError::InvalidAppId(_) | FcmError::NoAppId) + } + + fn metric_label(&self) -> Option<&'static str> { + match &self { + FcmError::InvalidAppId(_) | FcmError::NoAppId => { + Some("notification.bridge.error.fcm.badappid") + } + _ => None, + } + } + + fn extras(&self) -> Vec<(&str, String)> { match self { + FcmError::InvalidAppId(appid) => { + vec![("app_id", appid.to_string())] + } FcmError::EmptyResponse(status) => { vec![("status", status.to_string())] } @@ -84,9 +110,3 @@ impl FcmError { } } } - -impl From for ApiErrorKind { - fn from(e: FcmError) -> Self { - ApiErrorKind::Router(RouterError::Fcm(e)) - } -} diff --git a/autoendpoint/src/routers/fcm/router.rs b/autoendpoint/src/routers/fcm/router.rs index 05bd560ce..e497dd173 100644 --- a/autoendpoint/src/routers/fcm/router.rs +++ b/autoendpoint/src/routers/fcm/router.rs @@ -1,4 +1,4 @@ -use autopush_common::db::client::DbClient; +use autopush_common::{db::client::DbClient, MAX_NOTIFICATION_TTL}; use crate::error::ApiResult; use crate::extractors::notification::Notification; @@ -16,9 +16,6 @@ use std::sync::Arc; use url::Url; use uuid::Uuid; -/// 28 days -const MAX_TTL: usize = 28 * 24 * 60 * 60; - /// Firebase Cloud Messaging router pub struct FcmRouter { settings: FcmSettings, @@ -156,7 +153,8 @@ impl Router for FcmRouter { let (routing_token, app_id) = self.routing_info(router_data, ¬ification.subscription.user.uaid)?; - let ttl = MAX_TTL.min(self.settings.min_ttl.max(notification.headers.ttl as usize)); + let ttl = + MAX_NOTIFICATION_TTL.min(self.settings.min_ttl.max(notification.headers.ttl as u64)); // Send the notification to FCM let client = self @@ -175,6 +173,7 @@ impl Router for FcmRouter { platform, &app_id, notification.subscription.user.uaid, + notification.subscription.vapid.clone(), ) .await); }; @@ -219,11 +218,12 @@ mod tests { /// Create a router for testing, using the given service auth file async fn make_router( + server: &mut mockito::ServerGuard, fcm_credential: String, gcm_credential: String, db: Box, ) -> FcmRouter { - let url = &mockito::server_url(); + let url = &server.url(); FcmRouter::new( FcmSettings { base_url: Url::parse(url).unwrap(), @@ -264,11 +264,15 @@ mod tests { /// A notification with no data is sent to FCM #[tokio::test] async fn successful_routing_no_data() { - let db = MockDbClient::new().into_boxed_arc(); - let router = make_router(make_service_key(), "whatever".to_string(), db).await; + let mut server = mockito::Server::new_async().await; + + let mdb = MockDbClient::new(); + let db = mdb.into_boxed_arc(); + let service_key = make_service_key(&server); + let router = make_router(&mut server, service_key, "whatever".to_string(), db).await; assert!(router.active()); - let _token_mock = mock_token_endpoint(); - let fcm_mock = mock_fcm_endpoint_builder(PROJECT_ID) + let _token_mock = mock_token_endpoint(&mut server).await; + let fcm_mock = mock_fcm_endpoint_builder(&mut server, PROJECT_ID) .match_body( serde_json::json!({ "message": { @@ -299,10 +303,14 @@ mod tests { /// A notification with data is sent to FCM #[tokio::test] async fn successful_routing_with_data() { - let db = MockDbClient::new().into_boxed_arc(); - let router = make_router(make_service_key(), "whatever".to_string(), db).await; - let _token_mock = mock_token_endpoint(); - let fcm_mock = mock_fcm_endpoint_builder(PROJECT_ID) + let mut server = mockito::Server::new_async().await; + + let mdb = MockDbClient::new(); + let db = mdb.into_boxed_arc(); + let service_key = make_service_key(&server); + let router = make_router(&mut server, service_key, "whatever".to_string(), db).await; + let _token_mock = mock_token_endpoint(&mut server).await; + let fcm_mock = mock_fcm_endpoint_builder(&mut server, PROJECT_ID) .match_body( serde_json::json!({ "message": { @@ -340,10 +348,16 @@ mod tests { /// the FCM request is not sent. #[tokio::test] async fn missing_client() { + let mut server = mockito::Server::new_async().await; + let db = MockDbClient::new().into_boxed_arc(); - let router = make_router(make_service_key(), "whatever".to_string(), db).await; - let _token_mock = mock_token_endpoint(); - let fcm_mock = mock_fcm_endpoint_builder(PROJECT_ID).expect(0).create(); + let service_key = make_service_key(&server); + let router = make_router(&mut server, service_key, "whatever".to_string(), db).await; + let _token_mock = mock_token_endpoint(&mut server).await; + let fcm_mock = mock_fcm_endpoint_builder(&mut server, PROJECT_ID) + .expect(0) + .create_async() + .await; let mut router_data = default_router_data(); let app_id = "app_id".to_string(); router_data.insert( @@ -367,6 +381,8 @@ mod tests { /// If the FCM user no longer exists (404), we drop the user from our database #[tokio::test] async fn no_fcm_user() { + let mut server = mockito::Server::new_async().await; + let notification = make_notification(default_router_data(), None, RouterType::FCM); let mut db = MockDbClient::new(); db.expect_remove_user() @@ -374,17 +390,20 @@ mod tests { .times(1) .return_once(|_| Ok(())); + let service_key = make_service_key(&server); let router = make_router( - make_service_key(), + &mut server, + service_key, "whatever".to_string(), db.into_boxed_arc(), ) .await; - let _token_mock = mock_token_endpoint(); - let _fcm_mock = mock_fcm_endpoint_builder(PROJECT_ID) + let _token_mock = mock_token_endpoint(&mut server).await; + let _fcm_mock = mock_fcm_endpoint_builder(&mut server, PROJECT_ID) .with_status(404) .with_body(r#"{"error":{"status":"NOT_FOUND","message":"test-message"}}"#) - .create(); + .create_async() + .await; let result = router.route_notification(¬ification).await; assert!(result.is_err()); diff --git a/autoendpoint/src/routers/fcm/settings.rs b/autoendpoint/src/routers/fcm/settings.rs index a91ea26da..d6f659269 100644 --- a/autoendpoint/src/routers/fcm/settings.rs +++ b/autoendpoint/src/routers/fcm/settings.rs @@ -8,7 +8,7 @@ use url::Url; #[serde(deny_unknown_fields)] pub struct FcmSettings { /// The minimum TTL to use for FCM notifications - pub min_ttl: usize, + pub min_ttl: u64, /// A JSON dict of `FcmCredential`s. This must be a `String` because /// environment variables cannot encode a `HashMap` /// This contains both GCM and FCM credentials. diff --git a/autoendpoint/src/routers/mod.rs b/autoendpoint/src/routers/mod.rs index ac5e27778..766bd8dc9 100644 --- a/autoendpoint/src/routers/mod.rs +++ b/autoendpoint/src/routers/mod.rs @@ -3,7 +3,6 @@ use crate::error::ApiResult; use crate::extractors::notification::Notification; use crate::extractors::router_data_input::RouterDataInput; -use crate::routers::adm::error::AdmError; use crate::routers::apns::error::ApnsError; use crate::routers::fcm::error::FcmError; @@ -16,10 +15,13 @@ use autopush_common::errors::ReportableError; use std::collections::HashMap; use thiserror::Error; -pub mod adm; +#[cfg(feature = "stub")] +use self::stub::error::StubError; pub mod apns; mod common; pub mod fcm; +#[cfg(feature = "stub")] +pub mod stub; pub mod webpush; #[async_trait(?Send)] @@ -74,17 +76,18 @@ impl From for HttpResponse { #[derive(Debug, Error)] pub enum RouterError { - #[error(transparent)] - Adm(#[from] AdmError), - #[error(transparent)] Apns(#[from] ApnsError), #[error(transparent)] Fcm(#[from] FcmError), + #[cfg(feature = "stub")] + #[error(transparent)] + Stub(#[from] StubError), + #[error("Database error while saving notification")] - SaveDb(#[source] DbError), + SaveDb(#[source] DbError, Option), #[error("User was deleted during routing")] UserWasDeleted, @@ -118,11 +121,12 @@ impl RouterError { /// Get the associated HTTP status code pub fn status(&self) -> StatusCode { match self { - RouterError::Adm(e) => e.status(), RouterError::Apns(e) => e.status(), RouterError::Fcm(e) => StatusCode::from_u16(e.status().as_u16()).unwrap_or_default(), - RouterError::SaveDb(e) => e.status(), + RouterError::SaveDb(e, _) => e.status(), + #[cfg(feature = "stub")] + RouterError::Stub(e) => e.status(), RouterError::UserWasDeleted | RouterError::NotFound => StatusCode::GONE, @@ -139,17 +143,19 @@ impl RouterError { /// Get the associated error number pub fn errno(&self) -> Option { match self { - RouterError::Adm(e) => e.errno(), RouterError::Apns(e) => e.errno(), RouterError::Fcm(e) => e.errno(), + #[cfg(feature = "stub")] + RouterError::Stub(e) => e.errno(), + RouterError::TooMuchData(_) => Some(104), RouterError::UserWasDeleted => Some(105), RouterError::NotFound => Some(106), - RouterError::SaveDb(_) => Some(201), + RouterError::SaveDb(_, _) => Some(201), RouterError::Authentication => Some(901), @@ -162,38 +168,23 @@ impl RouterError { RouterError::Upstream { .. } => None, } } +} - pub fn metric_label(&self) -> Option<&'static str> { - // NOTE: Some metrics are emitted for other Errors via handle_error - // callbacks, whereas some are emitted via this method. These 2 should - // be consoliated: https://mozilla-hub.atlassian.net/browse/SYNC-3695 - let err = match self { - RouterError::Adm(AdmError::InvalidProfile | AdmError::NoProfile) => { - "notification.bridge.error.adm.profile" - } - RouterError::Apns(ApnsError::SizeLimit(_)) => { - "notification.bridge.error.apns.oversized" - } - RouterError::Fcm(FcmError::InvalidAppId(_) | FcmError::NoAppId) => { - "notification.bridge.error.fcm.badappid" - } - RouterError::TooMuchData(_) => "notification.bridge.error.too_much_data", - RouterError::SaveDb(e) => e.metric_label().unwrap_or_default(), - _ => "", - }; - if !err.is_empty() { - return Some(err); +impl ReportableError for RouterError { + fn reportable_source(&self) -> Option<&(dyn ReportableError + 'static)> { + match &self { + RouterError::Apns(e) => Some(e), + RouterError::Fcm(e) => Some(e), + RouterError::SaveDb(e, _) => Some(e), + _ => None, } - None } - pub fn is_sentry_event(&self) -> bool { + fn is_sentry_event(&self) -> bool { match self { - RouterError::Adm(e) => !matches!(e, AdmError::InvalidProfile | AdmError::NoProfile), // apns handle_error emits a metric for ApnsError::Unregistered - RouterError::Apns(ApnsError::SizeLimit(_)) - | RouterError::Apns(ApnsError::Unregistered) => false, - RouterError::Fcm(e) => !matches!(e, FcmError::InvalidAppId(_) | FcmError::NoAppId), + RouterError::Apns(e) => e.is_sentry_event(), + RouterError::Fcm(e) => e.is_sentry_event(), // common handle_error emits metrics for these RouterError::Authentication | RouterError::GCMAuthentication @@ -202,15 +193,34 @@ impl RouterError { | RouterError::RequestTimeout | RouterError::TooMuchData(_) | RouterError::Upstream { .. } => false, - RouterError::SaveDb(e) => e.is_sentry_event(), + RouterError::SaveDb(e, _) => e.is_sentry_event(), _ => true, } } - pub fn extras(&self) -> Vec<(&str, String)> { + fn metric_label(&self) -> Option<&'static str> { + // NOTE: Some metrics are emitted for other Errors via handle_error + // callbacks, whereas some are emitted via this method. These 2 should + // be consoliated: https://mozilla-hub.atlassian.net/browse/SYNC-3695 match self { + RouterError::Apns(e) => e.metric_label(), + RouterError::Fcm(e) => e.metric_label(), + RouterError::TooMuchData(_) => Some("notification.bridge.error.too_much_data"), + _ => None, + } + } + + fn extras(&self) -> Vec<(&str, String)> { + match &self { + RouterError::Apns(e) => e.extras(), RouterError::Fcm(e) => e.extras(), - RouterError::SaveDb(e) => e.extras(), + RouterError::SaveDb(e, sub) => { + let mut extras = e.extras(); + if let Some(sub) = sub { + extras.append(&mut vec![("sub", sub.clone())]); + }; + extras + } _ => vec![], } } diff --git a/autoendpoint/src/routers/stub/client.rs b/autoendpoint/src/routers/stub/client.rs new file mode 100644 index 000000000..fde68e24f --- /dev/null +++ b/autoendpoint/src/routers/stub/client.rs @@ -0,0 +1,38 @@ +use super::error::StubError; +use super::settings::StubServerSettings; +use crate::routers::RouterError; + +pub struct StubClient { + success: bool, + err: Option, +} + +impl StubClient { + /// Always succeeds + pub fn success() -> Self { + Self { + success: true, + err: None, + } + } + + pub fn error(err: StubError) -> Self { + Self { + success: false, + err: Some(err), + } + } + + /// Reply to the "client" based on the the type of app_id they specified. + pub async fn call(&self, settings: &StubServerSettings) -> Result<(), RouterError> { + if !self.success { + return Err(self + .err + .clone() + .unwrap_or(StubError::General(settings.error.clone())) + .into()); + } + + Ok(()) + } +} diff --git a/autoendpoint/src/routers/stub/error.rs b/autoendpoint/src/routers/stub/error.rs new file mode 100644 index 000000000..eca6d6053 --- /dev/null +++ b/autoendpoint/src/routers/stub/error.rs @@ -0,0 +1,56 @@ +use crate::error::ApiErrorKind; +use crate::routers::RouterError; +use actix_web::http::StatusCode; + +/// Errors that may occur in the Firebase Cloud Messaging router +#[derive(thiserror::Error, Clone, Debug)] +pub enum StubError { + #[error("Failed to decode the credential settings")] + Credential, + + #[error("Invalid JSON response from Test")] + InvalidResponse, + + #[error("General error")] + General(String), + + #[error("Missing User error")] + Missing, +} + +impl StubError { + /// Get the associated HTTP status code + pub fn status(&self) -> StatusCode { + match self { + StubError::Missing => StatusCode::GONE, + + StubError::Credential | StubError::General(_) => StatusCode::INTERNAL_SERVER_ERROR, + + StubError::InvalidResponse => StatusCode::BAD_GATEWAY, + } + } + + /// Get the associated error number + pub fn errno(&self) -> Option { + match self { + StubError::Missing => Some(106), + + _ => None, + } + } + + pub fn extras(&self) -> Vec<(&str, String)> { + match self { + StubError::General(status) => { + vec![("status", status.to_string())] + } + _ => vec![], + } + } +} + +impl From for ApiErrorKind { + fn from(e: StubError) -> Self { + ApiErrorKind::Router(RouterError::Stub(e)) + } +} diff --git a/autoendpoint/src/routers/stub/mod.rs b/autoendpoint/src/routers/stub/mod.rs new file mode 100644 index 000000000..3e9c2ae64 --- /dev/null +++ b/autoendpoint/src/routers/stub/mod.rs @@ -0,0 +1,4 @@ +pub mod client; +pub mod error; +pub mod router; +pub mod settings; diff --git a/autoendpoint/src/routers/stub/router.rs b/autoendpoint/src/routers/stub/router.rs new file mode 100644 index 000000000..94fbf9f2c --- /dev/null +++ b/autoendpoint/src/routers/stub/router.rs @@ -0,0 +1,118 @@ +use super::client::StubClient; +use super::error::StubError; +use super::settings::{StubServerSettings, StubSettings}; +use crate::error::ApiResult; +use crate::extractors::notification::Notification; +use crate::extractors::router_data_input::RouterDataInput; +use crate::routers::{Router, RouterError, RouterResponse}; +use async_trait::async_trait; +use serde_json::Value; +use std::collections::HashMap; + +/// ##Stub router +/// +/// This will create a testing router who's behavior is determined +/// by the messages that it handles. This router can be used for +/// local development and testing. The Stub router can be controlled +/// by specifying the action in the `app_id` of the subscription URL. +/// The `success` `app_id` is always defined. +/// +/// for example, using a subscription registration URL of +/// `/v1/stub/success/registration` with the request body of +/// `"{\"token\":\"success\", \"key\":"..."}"` will return a successful +/// registration endpoint that uses the provided VAPID key. Calling that endpoint +/// with a VAPID signed request will succeed. +/// +/// Likewise, using a subscription registration URL of +/// `/v1/stub/error/registration` with the request body of +/// `"{\"token\":\"General Error\", \"key\":"..."}"` will return that error +/// when the subscription endpoint is called. +pub struct StubRouter { + settings: StubSettings, + server_settings: StubServerSettings, + /// A map from application ID to an authenticated FCM client + clients: HashMap, +} + +impl StubRouter { + /// Create a new `StubRouter` + pub fn new(settings: StubSettings) -> Result { + let server_settings = + serde_json::from_str::(&settings.server_credentials) + .unwrap_or_default(); + let clients = Self::create_clients(&server_settings); + Ok(Self { + settings, + server_settings, + clients, + }) + } + + /// Create Test clients for each application. Tests can specify which client + /// to use by designating the value in the `app_id` of the subscription URL. + /// While the `success` client is always defined, the `error` client can take on + /// different error results based on the server configuration. See [StubServerSettings] + /// for details. + fn create_clients(server_settings: &StubServerSettings) -> HashMap { + let mut clients = HashMap::new(); + // TODO: Expand this to provide for additional error states based on app_id drawn + // from the StubServerSettings. + clients.insert("success".to_owned(), StubClient::success()); + clients.insert( + "error".to_owned(), + StubClient::error(StubError::General(server_settings.error.clone())), + ); + clients + } +} + +#[async_trait(?Send)] +impl Router for StubRouter { + fn register( + &self, + router_data_input: &RouterDataInput, + app_id: &str, + ) -> Result, RouterError> { + if !self.clients.contains_key(app_id) { + return Err( + StubError::General(format!("Unknown App ID: {}", app_id.to_owned())).into(), + ); + } + + let mut router_data = HashMap::new(); + router_data.insert( + "token".to_owned(), + serde_json::to_value(&router_data_input.token).unwrap(), + ); + router_data.insert("app_id".to_owned(), serde_json::to_value(app_id).unwrap()); + + Ok(router_data) + } + + async fn route_notification(&self, notification: &Notification) -> ApiResult { + debug!( + "Sending Test notification to UAID {}", + notification.subscription.user.uaid + ); + trace!("Notification = {:?}", notification); + + let router_data = notification + .subscription + .user + .router_data + .as_ref() + .ok_or(StubError::General("No Registration".to_owned()))?; + + let default = Value::String("success".to_owned()); + let client_type = router_data.get("type").unwrap_or(&default); + let client = self + .clients + .get(&client_type.to_string().to_lowercase()) + .unwrap_or_else(|| self.clients.get("error").unwrap()); + client.call(&self.server_settings).await?; + Ok(RouterResponse::success( + format!("{}/m/123", self.settings.url), + notification.headers.ttl as usize, + )) + } +} diff --git a/autoendpoint/src/routers/stub/settings.rs b/autoendpoint/src/routers/stub/settings.rs new file mode 100644 index 000000000..6342314c5 --- /dev/null +++ b/autoendpoint/src/routers/stub/settings.rs @@ -0,0 +1,44 @@ +/// Settings for `StubRouter` +/// These can be specified externally by the calling "client" during +/// registration and contain values that will be echoed back. +/// The `server_credentials` block is a JSON structure that contains +/// the default response for any routing request. This defaults to +/// 'error'. +#[derive(Clone, Debug, serde::Deserialize)] +#[serde(default)] +#[serde(deny_unknown_fields)] +pub struct StubSettings { + pub url: String, + #[serde(rename = "credentials")] + pub server_credentials: String, +} + +/// `StubServerSettings` allows the server configuration file to specify +/// the default error to use for requests to the "error" `app_id`. +/// Requests to endpoints associated with this client will return the +/// `error` string. +#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] +pub struct StubServerSettings { + #[serde(default)] + pub error: String, +} + +impl Default for StubServerSettings { + fn default() -> Self { + Self { + error: "General Error".to_owned(), + } + } +} + +/// The `StubSettings` contains client provided data that can override +/// the response error string when endpoints associated with this client +/// are called. +impl Default for StubSettings { + fn default() -> Self { + Self { + url: "http://localhost:8080".to_owned(), + server_credentials: "{\"error\":\"General Error\"}".to_owned(), + } + } +} diff --git a/autoendpoint/src/routers/webpush.rs b/autoendpoint/src/routers/webpush.rs index 737bca12c..3d2646c46 100644 --- a/autoendpoint/src/routers/webpush.rs +++ b/autoendpoint/src/routers/webpush.rs @@ -7,8 +7,9 @@ use std::sync::Arc; use url::Url; use uuid::Uuid; -use crate::error::{ApiErrorKind, ApiResult}; +use crate::error::{ApiError, ApiErrorKind, ApiResult}; use crate::extractors::{notification::Notification, router_data_input::RouterDataInput}; +use crate::headers::vapid::VapidHeaderWithKey; use crate::routers::{Router, RouterError, RouterResponse}; use autopush_common::db::{client::DbClient, User}; @@ -104,7 +105,10 @@ impl Router for WebPushRouter { Ok(Some(user)) => user, Ok(None) => { trace!("✉ No user found, must have been deleted"); - return Err(ApiErrorKind::Router(RouterError::UserWasDeleted).into()); + return Err(self.handle_error( + ApiErrorKind::Router(RouterError::UserWasDeleted), + notification.subscription.vapid.clone(), + )); } Err(e) => { // Database error, but we already stored the message so it's ok @@ -157,6 +161,17 @@ impl Router for WebPushRouter { } impl WebPushRouter { + /// Use the same sort of error chokepoint that all the mobile clients use. + fn handle_error(&self, error: ApiErrorKind, vapid: Option) -> ApiError { + let mut err = ApiError::from(error); + if let Some(Ok(claims)) = vapid.map(|v| v.vapid.claims()) { + let mut extras = err.extras.unwrap_or_default(); + extras.extend([("sub".to_owned(), claims.sub)]); + err.extras = Some(extras); + }; + err + } + /// Send the notification to the node async fn send_notification( &self, @@ -188,7 +203,20 @@ impl WebPushRouter { notification.clone().into(), ) .await - .map_err(|e| ApiErrorKind::Router(RouterError::SaveDb(e)).into()) + .map_err(|e| { + self.handle_error( + ApiErrorKind::Router(RouterError::SaveDb( + e, + // try to extract the `sub` from the VAPID clamis. + notification + .subscription + .vapid + .as_ref() + .map(|vapid| vapid.vapid.claims().map(|c| c.sub).unwrap_or_default()), + )), + notification.subscription.vapid.clone(), + ) + }) } /// Remove the node ID from a user. This is done if the user is no longer @@ -250,3 +278,42 @@ impl WebPushRouter { } } } + +#[cfg(test)] +mod test { + use std::boxed::Box; + use std::sync::Arc; + + use reqwest; + + use crate::extractors::subscription::tests::{make_vapid, PUB_KEY}; + use crate::headers::vapid::VapidClaims; + use autopush_common::errors::ReportableError; + + use super::*; + use autopush_common::db::mock::MockDbClient; + + fn make_router(db: Box) -> WebPushRouter { + WebPushRouter { + db, + metrics: Arc::new(StatsdClient::from_sink("autopush", cadence::NopMetricSink)), + http: reqwest::Client::new(), + endpoint_url: Url::parse("http://localhost:8080/").unwrap(), + } + } + + #[tokio::test] + async fn pass_extras() { + let router = make_router(Box::new(MockDbClient::new())); + let sub = "foo@example.com"; + let vapid = make_vapid( + sub, + "https://push.services.mozilla.org", + VapidClaims::default_exp(), + PUB_KEY.to_owned(), + ); + + let err = router.handle_error(ApiErrorKind::LogCheck, Some(vapid)); + assert!(err.extras().contains(&("sub", sub.to_owned()))); + } +} diff --git a/autoendpoint/src/routes/health.rs b/autoendpoint/src/routes/health.rs index 49657cde5..249c5dcd6 100644 --- a/autoendpoint/src/routes/health.rs +++ b/autoendpoint/src/routes/health.rs @@ -1,4 +1,5 @@ //! Health and Dockerflow routes +use std::collections::HashMap; use std::thread; use actix_web::{ @@ -17,18 +18,18 @@ use crate::server::AppState; pub async fn health_route(state: Data) -> Json { let router_health = interpret_table_health(state.db.router_table_exists().await); let message_health = interpret_table_health(state.db.message_table_exists().await); + let mut routers: HashMap<&str, bool> = HashMap::new(); + routers.insert("apns", state.apns_router.active()); + routers.insert("fcm", state.fcm_router.active()); - Json(json!({ - "status": "OK", - "version": env!("CARGO_PKG_VERSION"), - "router_table": router_health, - "message_table": message_health, - "routers": { - "adm": state.adm_router.active(), - "apns": state.apns_router.active(), - "fcm": state.fcm_router.active(), - } - })) + let health = json!({ + "status": "OK", + "version": env!("CARGO_PKG_VERSION"), + "router_table": router_health, + "message_table": message_health, + "routers": routers}); + + Json(health) } /// Convert the result of a DB health check to JSON diff --git a/autoendpoint/src/routes/registration.rs b/autoendpoint/src/routes/registration.rs index a7edb90c7..c93c3f49f 100644 --- a/autoendpoint/src/routes/registration.rs +++ b/autoendpoint/src/routes/registration.rs @@ -1,6 +1,6 @@ use actix_web::web::{Data, Json}; use actix_web::{HttpRequest, HttpResponse}; -use cadence::{CountedExt, StatsdClient}; +use cadence::{CountedExt, Histogrammed, StatsdClient}; use uuid::Uuid; use crate::error::{ApiErrorKind, ApiResult}; @@ -35,12 +35,11 @@ pub async fn register_uaid_route( incr_metric("ua.command.register", &app_state.metrics, &request); // Register user and channel in database - let user = User { - router_type: path_args.router_type.to_string(), - router_data: Some(router_data), - current_month: app_state.db.rotating_message_table().map(str::to_owned), - ..Default::default() - }; + let user = User::builder() + .router_type(path_args.router_type.to_string()) + .router_data(router_data) + .build() + .map_err(|e| ApiErrorKind::General(format!("User::builder error: {e}")))?; let channel_id = router_data_input.channel_id.unwrap_or_else(Uuid::new_v4); trace!("🌍 Creating user with UAID {}", user.uaid); trace!("🌍 user = {:?}", user); @@ -158,14 +157,46 @@ pub async fn new_channel_route( } /// Handle the `GET /v1/{router_type}/{app_id}/registration/{uaid}` route +/// Since this is called daily by a mobile device, it can serve as part of +/// a liveliness check for the device. This is more authoritative than +/// relying on the bridge service to return a "success", since the bridge +/// may retain "inactive" devices, but will immediately drop devices +/// where Firefox has been uninstalled or that have been factory reset. pub async fn get_channels_route( - _auth: AuthorizationCheck, + auth: AuthorizationCheck, path_args: RegistrationPathArgsWithUaid, app_state: Data, ) -> ApiResult { let uaid = path_args.user.uaid; + let db = &app_state.db; debug!("🌍 Getting channel IDs for UAID {uaid}"); - let channel_ids = app_state.db.get_channels(&uaid).await?; + // + if let Some(mut user) = db.get_user(&uaid).await? { + db.update_user(&mut user).await?; + // report the rough user agent so we know what clients are actively pinging us. + let os = auth.user_agent.metrics_os.clone(); + let browser = auth.user_agent.metrics_browser.clone(); + // use the "real" version here. (these are less normalized) + info!("Mobile client check"; + "os" => auth.user_agent.os, + "os_version" => auth.user_agent.os_version, + "browser" => auth.user_agent.browser_name, + "browser_version" => auth.user_agent.browser_version); + // use the "metrics" version since we need to consider cardinality. + app_state + .metrics + .incr_with_tags("ua.connection.check") + .with_tag("os", &os) + .with_tag("browser", &browser) + .send(); + } + let channel_ids = db.get_channels(&uaid).await?; + + app_state + .metrics + .histogram_with_tags("ua.connection.channel_count", channel_ids.len() as u64) + .with_tag_value("mobile") + .send(); Ok(HttpResponse::Ok().json(serde_json::json!({ "uaid": uaid, diff --git a/autoendpoint/src/server.rs b/autoendpoint/src/server.rs index 75a9f40d1..bebbc9a8e 100644 --- a/autoendpoint/src/server.rs +++ b/autoendpoint/src/server.rs @@ -13,18 +13,15 @@ use serde_json::json; #[cfg(feature = "bigtable")] use autopush_common::db::bigtable::BigTableClientImpl; -#[cfg(feature = "dual")] -use autopush_common::db::dual::DualClientImpl; -#[cfg(feature = "dynamodb")] -use autopush_common::db::dynamodb::DdbClientImpl; use autopush_common::{ db::{client::DbClient, spawn_pool_periodic_reporter, DbSettings, StorageType}, middleware::sentry::SentryWrapper, }; -use crate::error::{ApiError, ApiErrorKind, ApiResult}; use crate::metrics; -use crate::routers::{adm::router::AdmRouter, apns::router::ApnsRouter, fcm::router::FcmRouter}; +#[cfg(feature = "stub")] +use crate::routers::stub::router::StubRouter; +use crate::routers::{apns::router::ApnsRouter, fcm::router::FcmRouter}; use crate::routes::{ health::{health_route, lb_heartbeat_route, log_check, status_route, version_route}, registration::{ @@ -34,6 +31,10 @@ use crate::routes::{ webpush::{delete_notification_route, webpush_route}, }; use crate::settings::Settings; +use crate::{ + error::{ApiError, ApiErrorKind, ApiResult}, + settings::VapidTracker, +}; #[derive(Clone)] pub struct AppState { @@ -45,7 +46,9 @@ pub struct AppState { pub http: reqwest::Client, pub fcm_router: Arc, pub apns_router: Arc, - pub adm_router: Arc, + #[cfg(feature = "stub")] + pub stub_router: Arc, + pub reliability: Arc, } pub struct Server; @@ -67,11 +70,6 @@ impl Server { }, }; let db: Box = match StorageType::from_dsn(&db_settings.dsn) { - #[cfg(feature = "dynamodb")] - StorageType::DynamoDb => { - debug!("Using Dynamodb"); - Box::new(DdbClientImpl::new(metrics.clone(), &db_settings)?) - } #[cfg(feature = "bigtable")] StorageType::BigTable => { debug!("Using BigTable"); @@ -79,12 +77,6 @@ impl Server { client.spawn_sweeper(Duration::from_secs(30)); Box::new(client) } - #[cfg(all(feature = "bigtable", feature = "dual"))] - StorageType::Dual => { - let client = DualClientImpl::new(metrics.clone(), &db_settings)?; - client.spawn_sweeper(Duration::from_secs(30)); - Box::new(client) - } _ => { debug!("No idea what {:?} is", &db_settings.dsn); return Err(ApiErrorKind::General( @@ -117,13 +109,9 @@ impl Server { ) .await?, ); - let adm_router = Arc::new(AdmRouter::new( - settings.adm.clone(), - endpoint_url, - http.clone(), - metrics.clone(), - db.clone(), - )?); + let reliability = Arc::new(VapidTracker(settings.tracking_keys())); + #[cfg(feature = "stub")] + let stub_router = Arc::new(StubRouter::new(settings.stub.clone())?); let app_state = AppState { metrics: metrics.clone(), settings, @@ -132,7 +120,9 @@ impl Server { http, fcm_router, apns_router, - adm_router, + #[cfg(feature = "stub")] + stub_router, + reliability, }; spawn_pool_periodic_reporter( diff --git a/autoendpoint/src/settings.rs b/autoendpoint/src/settings.rs index f3fa34659..e519ac3ef 100644 --- a/autoendpoint/src/settings.rs +++ b/autoendpoint/src/settings.rs @@ -1,13 +1,16 @@ //! Application settings +use actix_http::header::HeaderMap; use config::{Config, ConfigError, Environment, File}; use fernet::{Fernet, MultiFernet}; use serde::Deserialize; use url::Url; -use crate::routers::adm::settings::AdmSettings; +use crate::headers::vapid::VapidHeaderWithKey; use crate::routers::apns::settings::ApnsSettings; use crate::routers::fcm::settings::FcmSettings; +#[cfg(feature = "stub")] +use crate::routers::stub::settings::StubSettings; pub const ENV_PREFIX: &str = "autoend"; @@ -28,6 +31,16 @@ pub struct Settings { pub router_table_name: String, pub message_table_name: String, + pub vapid_aud: Vec, + + /// A stringified JSON list of VAPID public keys which should be tracked internally. + /// This should ONLY include Mozilla generated and consumed messages (e.g. "SendToTab", etc.) + /// These keys should be specified in stripped, b64encoded, X962 format (e.g. a single line of + /// base64 encoded data without padding). + /// You can use `scripts/convert_pem_to_x962.py` to easily convert EC Public keys stored in + /// PEM format into appropriate x962 format. + pub tracking_keys: String, + pub max_data_bytes: usize, pub crypto_keys: String, pub auth_keys: String, @@ -42,7 +55,8 @@ pub struct Settings { pub fcm: FcmSettings, pub apns: ApnsSettings, - pub adm: AdmSettings, + #[cfg(feature = "stub")] + pub stub: StubSettings, } impl Default for Settings { @@ -56,6 +70,10 @@ impl Default for Settings { db_settings: "".to_owned(), router_table_name: "router".to_string(), message_table_name: "message".to_string(), + vapid_aud: vec![ + "https://push.services.mozilla.org".to_string(), + "http://127.0.0.1:9160".to_string(), + ], // max data is a bit hard to figure out, due to encryption. Using something // like pywebpush, if you encode a block of 4096 bytes, you'll get a // 4216 byte data block. Since we're going to be receiving this, we have to @@ -63,6 +81,7 @@ impl Default for Settings { max_data_bytes: 5630, crypto_keys: format!("[{}]", Fernet::generate_key()), auth_keys: r#"["AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB="]"#.to_string(), + tracking_keys: r#"[]"#.to_string(), human_logs: false, connection_timeout_millis: 1000, request_timeout_millis: 3000, @@ -71,7 +90,8 @@ impl Default for Settings { statsd_label: "autoendpoint".to_string(), fcm: FcmSettings::default(), apns: ApnsSettings::default(), - adm: AdmSettings::default(), + #[cfg(feature = "stub")] + stub: StubSettings::default(), } } } @@ -91,11 +111,8 @@ impl Settings { // down to the sub structures. config = config.add_source(Environment::with_prefix(ENV_PREFIX).separator("__")); - let built = config.build()?; - - built - .try_deserialize::() - .map_err(|error| match error { + let built: Self = config.build()?.try_deserialize::().map_err(|error| { + match error { // Configuration errors are not very sysop friendly, Try to make them // a bit more 3AM useful. ConfigError::Message(error_msg) => { @@ -112,7 +129,10 @@ impl Settings { error!("Configuration error: Other: {:?}", &error); error } - }) + } + })?; + + Ok(built) } /// Convert a string like `[item1,item2]` into a iterator over `item1` and `item2`. @@ -149,6 +169,17 @@ impl Settings { .collect() } + /// Get the list of tracking public keys + // TODO: this should return a Vec<[u8]> so that key formatting errors do not cause + // false rejections. This is not a problem now since we have access to the source + // public key, but that may not always be true. + pub fn tracking_keys(&self) -> Vec { + let keys = &self.tracking_keys.replace(['"', ' '], ""); + Self::read_list_from_str(keys, "Invalid AUTOEND_TRACKING_KEYS") + .map(|v| v.to_owned()) + .collect() + } + /// Get the URL for this endpoint server pub fn endpoint_url(&self) -> Url { let endpoint = if self.endpoint_url.is_empty() { @@ -160,10 +191,42 @@ impl Settings { } } +#[derive(Clone, Debug)] +pub struct VapidTracker(pub Vec); +impl VapidTracker { + /// Very simple string check to see if the Public Key specified in the Vapid header + /// matches the set of trackable keys. + pub fn is_trackable(&self, vapid: &VapidHeaderWithKey) -> bool { + // ideally, [Settings.with_env_and_config_file()] does the work of pre-populating + // the Settings.tracking_vapid_pubs cache, but we can't rely on that. + self.0.contains(&vapid.public_key) + } + + /// Extract the message Id from the headers (if present), otherwise just make one up. + pub fn get_tracking_id(&self, headers: &HeaderMap) -> String { + headers + .get("X-MessageId") + .and_then(|v| + // TODO: we should convert the public key string to a bitarray + // this would prevent any formatting errors from falsely rejecting + // the key. We're ok with comparing strings because we currently + // have access to the same public key value string that is being + // used, but that may not always be the case. + v.to_str().ok()) + .map(|v| v.to_owned()) + .unwrap_or_else(|| uuid::Uuid::new_v4().as_simple().to_string()) + } +} + #[cfg(test)] mod tests { - use super::Settings; - use crate::error::ApiResult; + use actix_http::header::{HeaderMap, HeaderName, HeaderValue}; + + use super::{Settings, VapidTracker}; + use crate::{ + error::ApiResult, + headers::vapid::{VapidHeader, VapidHeaderWithKey}, + }; #[test] fn test_auth_keys() -> ApiResult<()> { @@ -243,4 +306,49 @@ mod tests { env::remove_var(&timeout); } } + + #[test] + fn test_tracking_keys() -> ApiResult<()> { + let settings = Settings{ + tracking_keys: r#"["BLMymkOqvT6OZ1o9etCqV4jGPkvOXNz5FdBjsAR9zR5oeCV1x5CBKuSLTlHon-H_boHTzMtMoNHsAGDlDB6X7vI"]"#.to_owned(), + ..Default::default() + }; + + let test_header = VapidHeaderWithKey { + vapid: VapidHeader { + scheme: "".to_owned(), + token: "".to_owned(), + version_data: crate::headers::vapid::VapidVersionData::Version1, + }, + public_key: "BLMymkOqvT6OZ1o9etCqV4jGPkvOXNz5FdBjsAR9zR5oeCV1x5CBKuSLTlHon-H_boHTzMtMoNHsAGDlDB6X7vI".to_owned() + }; + + let key_set = settings.tracking_keys(); + assert!(!key_set.is_empty()); + + let reliability = VapidTracker(key_set); + assert!(reliability.is_trackable(&test_header)); + + Ok(()) + } + + #[test] + fn test_tracking_id() -> ApiResult<()> { + let mut headers = HeaderMap::new(); + let keys = Vec::new(); + let reliability = VapidTracker(keys); + + let key = reliability.get_tracking_id(&headers); + assert!(!key.is_empty()); + + headers.insert( + HeaderName::from_lowercase(b"x-messageid").unwrap(), + HeaderValue::from_static("123foobar456"), + ); + + let key = reliability.get_tracking_id(&headers); + assert_eq!(key, "123foobar456".to_owned()); + + Ok(()) + } } diff --git a/autopush-common/Cargo.toml b/autopush-common/Cargo.toml index 4a999d086..053681e2d 100644 --- a/autopush-common/Cargo.toml +++ b/autopush-common/Cargo.toml @@ -10,7 +10,6 @@ name = "autopush_common" [dependencies] actix-web.workspace = true -actix-http.workspace = true backtrace.workspace = true base64.workspace = true cadence.workspace = true @@ -22,23 +21,16 @@ futures.workspace = true futures-util.workspace = true hex.workspace = true httparse.workspace = true -hyper.workspace = true lazy_static.workspace = true -log.workspace = true mockall.workspace = true openssl.workspace = true rand.workspace = true regex.workspace = true reqwest.workspace = true -rusoto_core = { workspace = true, optional = true } -rusoto_credential = { workspace = true, optional = true } -rusoto_dynamodb = { workspace = true, optional = true } sentry-backtrace.workspace = true sentry.workspace = true -sentry-core.workspace = true serde.workspace = true serde_derive.workspace = true -serde_dynamodb.workspace = true serde_json.workspace = true slog.workspace = true slog-async.workspace = true @@ -47,18 +39,14 @@ slog-mozlog-json.workspace = true slog-scope.workspace = true slog-stdlog.workspace = true slog-term.workspace = true -tokio.workspace = true -tokio-core.workspace = true -# tokio-postgres.workspace = true thiserror.workspace = true -tungstenite.workspace = true uuid.workspace = true url.workspace = true again = "0.1" async-trait = "0.1" +derive_builder = "0.20" gethostname = "0.4" -futures-backoff = "0.1.0" num_cpus = "1.16" woothee = "0.13" @@ -75,12 +63,13 @@ form_urlencoded = { version = "1.2", optional = true } [dev-dependencies] mockito = "0.31" tempfile = "3.2.0" -tokio = { version = "0.2", features = ["macros"] } +tokio = { workspace = true, features = ["macros"] } actix-rt = "2.8" [features] # NOTE: Do not set a `default` here, rather specify them in the calling library. # This is to reduce complexity around feature specification. + bigtable = [ "dep:google-cloud-rust-raw", "dep:grpcio", @@ -88,9 +77,6 @@ bigtable = [ "dep:protobuf", "dep:form_urlencoded", ] -dynamodb = ["dep:rusoto_core", "dep:rusoto_credential", "dep:rusoto_dynamodb"] -dual = ["dynamodb", "bigtable"] -aws = [] emulator = [ "bigtable", ] # used for testing big table, requires an external bigtable emulator running. diff --git a/autopush-common/build.rs b/autopush-common/build.rs index d2a3f84c1..06440274b 100644 --- a/autopush-common/build.rs +++ b/autopush-common/build.rs @@ -1,5 +1,5 @@ pub fn main() { - if !(cfg!(feature = "dynamodb") || cfg!(feature = "bigtable")) { - panic!("No database defined! Please compile with either `features=dynamodb` or `features=bigtable`"); + if !cfg!(feature = "bigtable") { + panic!("No database defined! Please compile with `features=bigtable`"); } } diff --git a/autopush-common/src/db/bigtable/bigtable_client/error.rs b/autopush-common/src/db/bigtable/bigtable_client/error.rs index 25f3a5035..a15bed701 100644 --- a/autopush-common/src/db/bigtable/bigtable_client/error.rs +++ b/autopush-common/src/db/bigtable/bigtable_client/error.rs @@ -1,7 +1,6 @@ use std::fmt::{self, Display}; use actix_web::http::StatusCode; -use backtrace::Backtrace; use deadpool::managed::{PoolError, TimeoutType}; use thiserror::Error; @@ -149,14 +148,6 @@ impl BigTableError { } impl ReportableError for BigTableError { - fn reportable_source(&self) -> Option<&(dyn ReportableError + 'static)> { - None - } - - fn backtrace(&self) -> Option<&Backtrace> { - None - } - fn is_sentry_event(&self) -> bool { #[allow(clippy::match_like_matches_macro)] match self { diff --git a/autopush-common/src/db/bigtable/bigtable_client/merge.rs b/autopush-common/src/db/bigtable/bigtable_client/merge.rs index af129c56c..c0bac46a1 100644 --- a/autopush-common/src/db/bigtable/bigtable_client/merge.rs +++ b/autopush-common/src/db/bigtable/bigtable_client/merge.rs @@ -191,7 +191,10 @@ impl RowMerger { let cell = &mut row.cell_in_progress; if chunk.has_family_name() { - cell.family = chunk.take_family_name().get_value().to_owned(); + chunk + .take_family_name() + .get_value() + .clone_into(&mut cell.family); } else { if self.last_seen_cell_family.is_none() { return Err(BigTableError::InvalidChunk( @@ -294,7 +297,7 @@ impl RowMerger { if row_in_progress.last_family != cell_in_progress.family { family_changed = true; let cip_family = cell_in_progress.family.clone(); - row_in_progress.last_family = cip_family.clone(); + row_in_progress.last_family.clone_from(&cip_family); // append the cell in progress to the completed cells for this family in the row. // @@ -314,7 +317,7 @@ impl RowMerger { // If the family changed, or the cell name changed if family_changed || row_in_progress.last_qualifier != cell_in_progress.qualifier { let qualifier = cell_in_progress.qualifier.clone(); - row_in_progress.last_qualifier = qualifier.clone(); + row_in_progress.last_qualifier.clone_from(&qualifier); let qualifier_cells = vec![Cell { family: cell_in_progress.family.clone(), timestamp: cell_in_progress.timestamp, diff --git a/autopush-common/src/db/bigtable/bigtable_client/mod.rs b/autopush-common/src/db/bigtable/bigtable_client/mod.rs index a1155302f..2e84aaffa 100644 --- a/autopush-common/src/db/bigtable/bigtable_client/mod.rs +++ b/autopush-common/src/db/bigtable/bigtable_client/mod.rs @@ -1,3 +1,4 @@ +use std::borrow::Cow; use std::collections::{BTreeMap, HashMap, HashSet}; use std::fmt; use std::fmt::Display; @@ -23,12 +24,11 @@ use uuid::Uuid; use crate::db::{ client::{DbClient, FetchMessageResponse}, error::{DbError, DbResult}, - DbSettings, Notification, NotificationRecord, User, MAX_CHANNEL_TTL, MAX_ROUTER_TTL, - USER_RECORD_VERSION, + DbSettings, Notification, NotificationRecord, User, MAX_ROUTER_TTL, USER_RECORD_VERSION, }; pub use self::metadata::MetadataBuilder; -use self::row::Row; +use self::row::{Row, RowCells}; use super::pool::BigTablePool; use super::BigTableDbSettings; @@ -169,9 +169,14 @@ fn filter_chain(filters: impl Into>) -> RowFilter { } /// Return a ReadRowsRequest against table for a given row key -fn read_row_request(table_name: &str, row_key: &str) -> bigtable::ReadRowsRequest { +fn read_row_request( + table_name: &str, + app_profile_id: &str, + row_key: &str, +) -> bigtable::ReadRowsRequest { let mut req = bigtable::ReadRowsRequest::default(); req.set_table_name(table_name.to_owned()); + req.set_app_profile_id(app_profile_id.to_owned()); let mut row_keys = RepeatedField::default(); row_keys.push(row_key.as_bytes().to_vec()); @@ -196,6 +201,46 @@ fn to_string(value: Vec, name: &str) -> Result { }) } +/// Parse the "set" (see [DbClient::add_channels]) of channel ids in a bigtable Row. +/// +/// Cells should solely contain the set of channels otherwise an Error is returned. +fn channels_from_cells(cells: &RowCells) -> DbResult> { + let mut result = HashSet::new(); + for cells in cells.values() { + let Some(cell) = cells.last() else { + continue; + }; + let Some((_, chid)) = cell.qualifier.split_once("chid:") else { + return Err(DbError::Integrity( + "get_channels expected: chid:".to_owned(), + None, + )); + }; + result.insert(Uuid::from_str(chid).map_err(|e| DbError::General(e.to_string()))?); + } + Ok(result) +} + +/// Convert the [HashSet] of channel ids to cell entries for a bigtable Row +fn channels_to_cells(channels: Cow>, expiry: SystemTime) -> Vec { + let channels = channels.into_owned(); + let mut cells = Vec::with_capacity(channels.len().min(100_000)); + for (i, channel_id) in channels.into_iter().enumerate() { + // There is a limit of 100,000 mutations per batch for bigtable. + // https://cloud.google.com/bigtable/quotas + // If you have 100,000 channels, you have too many. + if i >= 100_000 { + break; + } + cells.push(cell::Cell { + qualifier: format!("chid:{}", channel_id.as_hyphenated()), + timestamp: expiry, + ..Default::default() + }); + } + cells +} + pub fn retry_policy(max: usize) -> RetryPolicy { RetryPolicy::default() .with_max_retries(max) @@ -269,14 +314,14 @@ pub fn retryable_error(metrics: Arc) -> impl Fn(&grpcio::Error) -> /// They can be incomplete for a couple reasons: /// /// 1) A migration code bug caused a few incomplete migrations where -/// `add_channels` and `increment_storage` calls occurred when the migration's -/// initial `add_user` was never completed: -/// https://github.com/mozilla-services/autopush-rs/pull/640 +/// `add_channels` and `increment_storage` calls occurred when the migration's +/// initial `add_user` was never completed: +/// https://github.com/mozilla-services/autopush-rs/pull/640 /// /// 2) When router TTLs are eventually enabled: `add_channel` and -/// `increment_storage` can write cells with later expiry times than the other -/// router cells -fn is_incomplete_router_record(cells: &HashMap>) -> bool { +/// `increment_storage` can write cells with later expiry times than the other +/// router cells +fn is_incomplete_router_record(cells: &RowCells) -> bool { cells .keys() .all(|k| ["current_timestamp", "version"].contains(&k.as_str()) || k.starts_with("chid:")) @@ -337,13 +382,18 @@ impl BigTableClientImpl { /// Return a ReadRowsRequest for a given row key fn read_row_request(&self, row_key: &str) -> bigtable::ReadRowsRequest { - read_row_request(&self.settings.table_name, row_key) + read_row_request( + &self.settings.table_name, + &self.settings.app_profile_id, + row_key, + ) } /// Return a MutateRowRequest for a given row key fn mutate_row_request(&self, row_key: &str) -> bigtable::MutateRowRequest { let mut req = bigtable::MutateRowRequest::default(); req.set_table_name(self.settings.table_name.clone()); + req.set_app_profile_id(self.settings.app_profile_id.clone()); req.set_row_key(row_key.as_bytes().to_vec()); req } @@ -352,6 +402,7 @@ impl BigTableClientImpl { fn check_and_mutate_row_request(&self, row_key: &str) -> bigtable::CheckAndMutateRowRequest { let mut req = bigtable::CheckAndMutateRowRequest::default(); req.set_table_name(self.settings.table_name.clone()); + req.set_app_profile_id(self.settings.app_profile_id.clone()); req.set_row_key(row_key.as_bytes().to_vec()); req } @@ -498,7 +549,7 @@ impl BigTableClientImpl { .timestamp .duration_since(SystemTime::UNIX_EPOCH) .map_err(error::BigTableError::WriteTime)?; - set_cell.family_name = family_id.clone(); + set_cell.family_name.clone_from(&family_id); set_cell.set_column_qualifier(cell.qualifier.clone().into_bytes()); set_cell.set_value(cell.value); // Yes, this is passing milli bounded time as a micro. Otherwise I get @@ -759,6 +810,11 @@ impl BigTableClientImpl { }); }; + cells.extend(channels_to_cells( + Cow::Borrowed(&user.priv_channels), + expiry, + )); + row.add_cells(ROUTER_FAMILY, cells); row } @@ -787,7 +843,8 @@ impl BigtableDb { /// pub async fn health_check( &mut self, - metrics: Arc, + metrics: &Arc, + app_profile_id: &str, ) -> Result { // It is recommended that we pick a random key to perform the health check. Selecting // a single key for all health checks causes a "hot tablet" to arise. The `PingAndWarm` @@ -796,7 +853,7 @@ impl BigtableDb { // This health check is to see if the database is present, the response is not important // other than it does not return an error. let random_uaid = Uuid::new_v4().simple().to_string(); - let mut req = read_row_request(&self.table_name, &random_uaid); + let mut req = read_row_request(&self.table_name, app_profile_id, &random_uaid); let mut filter = data::RowFilter::default(); filter.set_block_all_filter(true); req.set_filter(filter); @@ -842,6 +899,18 @@ impl DbClient for BigTableClientImpl { /// BigTable doesn't really have the concept of an "update". You simply write the data and /// the individual cells create a new version. Depending on the garbage collection rules for /// the family, these can either persist or be automatically deleted. + /// + /// NOTE: This function updates the key ROUTER records for a given UAID. It does this by + /// calling [BigTableClientImpl::user_to_row] which creates a new row with new `cell.timestamp` values set + /// to now + `MAX_ROUTER_TTL`. This function is called by mobile during the daily + /// [autoendpoint::routes::update_token_route] handling, and by desktop + /// [autoconnect-ws-sm::get_or_create_user]` which is called + /// during the `HELLO` handler. This should be enough to ensure that the ROUTER records + /// are properly refreshed for "lively" clients. + /// + /// NOTE: There is some, very small, potential risk that a desktop client that can + /// somehow remain connected the duration of MAX_ROUTER_TTL, may be dropped as not being + /// "lively". async fn update_user(&self, user: &mut User) -> DbResult { let Some(ref version) = user.version else { return Err(DbError::General( @@ -930,6 +999,9 @@ impl DbClient for BigTableClientImpl { result.current_timestamp = Some(to_u64(cell.value, "current_timestamp")?) } + // Read the channels last, after removal of all non channel cells + result.priv_channels = channels_from_cells(&row.cells)?; + Ok(Some(result)) } @@ -962,26 +1034,15 @@ impl DbClient for BigTableClientImpl { // easy/efficient let row_key = uaid.simple().to_string(); let mut row = Row::new(row_key); - let expiry = std::time::SystemTime::now() + Duration::from_secs(MAX_CHANNEL_TTL); - - let mut cells = Vec::with_capacity(channels.len().min(100_000)); - for (i, channel_id) in channels.into_iter().enumerate() { - // There is a limit of 100,000 mutations per batch for bigtable. - // https://cloud.google.com/bigtable/quotas - // If you have 100,000 channels, you have too many. - if i >= 100_000 { - break; - } - cells.push(cell::Cell { - qualifier: format!("chid:{}", channel_id.as_hyphenated()), - timestamp: expiry, - ..Default::default() - }); - } + let expiry = std::time::SystemTime::now() + Duration::from_secs(MAX_ROUTER_TTL); + // Note: updating the version column isn't necessary here because this // write only adds a new (or updates an existing) column with a 0 byte // value - row.add_cells(ROUTER_FAMILY, cells); + row.add_cells( + ROUTER_FAMILY, + channels_to_cells(Cow::Owned(channels), expiry), + ); self.write_row(row).await?; Ok(()) @@ -999,23 +1060,10 @@ impl DbClient for BigTableClientImpl { cq_filter, ])); - let mut result = HashSet::new(); - if let Some(record) = self.read_row(req).await? { - for mut cells in record.cells.into_values() { - let Some(cell) = cells.pop() else { - continue; - }; - let Some((_, chid)) = cell.qualifier.split_once("chid:") else { - return Err(DbError::Integrity( - "get_channels expected: chid:".to_owned(), - None, - )); - }; - result.insert(Uuid::from_str(chid).map_err(|e| DbError::General(e.to_string()))?); - } - } - - Ok(result) + let Some(row) = self.read_row(req).await? else { + return Ok(Default::default()); + }; + channels_from_cells(&row.cells) } /// Delete the channel. Does not delete its associated pending messages. @@ -1178,8 +1226,9 @@ impl DbClient for BigTableClientImpl { &row_key, timestamp.to_be_bytes().to_vec() ); - let mut row = Row::new(row_key); let expiry = std::time::SystemTime::now() + Duration::from_secs(MAX_ROUTER_TTL); + let mut row = Row::new(row_key.clone()); + row.cells.insert( ROUTER_FAMILY.to_owned(), vec![ @@ -1192,7 +1241,9 @@ impl DbClient for BigTableClientImpl { new_version_cell(expiry), ], ); + self.write_row(row).await?; + Ok(()) } @@ -1221,6 +1272,7 @@ impl DbClient for BigTableClientImpl { ) -> DbResult { let mut req = ReadRowsRequest::default(); req.set_table_name(self.settings.table_name.clone()); + req.set_app_profile_id(self.settings.app_profile_id.clone()); let start_key = format!("{}#01:", uaid.simple()); let end_key = format!("{}#02:", uaid.simple()); @@ -1249,10 +1301,9 @@ impl DbClient for BigTableClientImpl { ); let messages = self.rows_to_notifications(rows)?; - // Note: Bigtable always returns a timestamp of None here whereas - // DynamoDB returns the `current_timestamp` read from its meta - // record. Under Bigtable `current_timestamp` is instead initially read - // from [get_user] + // Note: Bigtable always returns a timestamp of None. + // Under Bigtable `current_timestamp` is instead initially read + // from [get_user]. Ok(FetchMessageResponse { messages, timestamp: None, @@ -1269,6 +1320,7 @@ impl DbClient for BigTableClientImpl { ) -> DbResult { let mut req = ReadRowsRequest::default(); req.set_table_name(self.settings.table_name.clone()); + req.set_app_profile_id(self.settings.app_profile_id.clone()); let mut rows = data::RowSet::default(); let mut row_range = data::RowRange::default(); @@ -1331,7 +1383,7 @@ impl DbClient for BigTableClientImpl { .pool .get() .await? - .health_check(self.metrics.clone()) + .health_check(&self.metrics.clone(), &self.settings.app_profile_id) .await?) } @@ -1347,11 +1399,6 @@ impl DbClient for BigTableClientImpl { Ok(true) } - /// BigTable does not support message table rotation - fn rotating_message_table(&self) -> Option<&str> { - None - } - fn box_clone(&self) -> Box { Box::new(self.clone()) } @@ -1459,7 +1506,6 @@ mod tests { router_type: "webpush".to_owned(), connected_at, router_data: None, - last_connect: Some(connected_at), node_id: Some(node_id.clone()), ..Default::default() }; @@ -1762,4 +1808,84 @@ mod tests { client.remove_user(&uaid).await.unwrap(); } + + #[actix_rt::test] + async fn channel_and_current_timestamp_ttl_updates() { + let client = new_client().unwrap(); + let uaid = gen_test_uaid(); + let chid = Uuid::parse_str(TEST_CHID).unwrap(); + client.remove_user(&uaid).await.unwrap(); + + // Setup a user with some channels and a current_timestamp + let user = User { + uaid, + ..Default::default() + }; + client.add_user(&user).await.unwrap(); + + client.add_channel(&uaid, &chid).await.unwrap(); + client + .add_channel(&uaid, &uuid::Uuid::new_v4()) + .await + .unwrap(); + + client + .increment_storage( + &uaid, + SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs(), + ) + .await + .unwrap(); + + let req = client.read_row_request(&uaid.as_simple().to_string()); + let Some(mut row) = client.read_row(req).await.unwrap() else { + panic!("Expected row"); + }; + + // Ensure the initial cell expiry (timestamp) of all the cells + // in the row has been updated + let ca_expiry = row.take_required_cell("connected_at").unwrap().timestamp; + for mut cells in row.cells.into_values() { + let Some(cell) = cells.pop() else { + continue; + }; + assert!( + cell.timestamp >= ca_expiry, + "{} cell timestamp should >= connected_at's", + cell.qualifier + ); + } + + let mut user = client.get_user(&uaid).await.unwrap().unwrap(); + + // Quick nap to make sure that the ca_expiry values are different. + tokio::time::sleep(Duration::from_secs_f32(0.2)).await; + client.update_user(&mut user).await.unwrap(); + + // Ensure update_user updated the expiry (timestamp) of every cell in the row + let req = client.read_row_request(&uaid.as_simple().to_string()); + let Some(mut row) = client.read_row(req).await.unwrap() else { + panic!("Expected row"); + }; + + let ca_expiry2 = row.take_required_cell("connected_at").unwrap().timestamp; + + assert!(ca_expiry2 > ca_expiry); + + for mut cells in row.cells.into_values() { + let Some(cell) = cells.pop() else { + continue; + }; + assert!( + cell.timestamp >= ca_expiry2, + "{} cell timestamp expiry should exceed connected_at's", + cell.qualifier + ); + } + + client.remove_user(&uaid).await.unwrap(); + } } diff --git a/autopush-common/src/db/bigtable/bigtable_client/row.rs b/autopush-common/src/db/bigtable/bigtable_client/row.rs index 2ace5d8c8..2f5bbd417 100644 --- a/autopush-common/src/db/bigtable/bigtable_client/row.rs +++ b/autopush-common/src/db/bigtable/bigtable_client/row.rs @@ -4,6 +4,8 @@ use crate::db::error::{DbError, DbResult}; use super::{cell::Cell, RowKey}; +pub type RowCells = HashMap>; + /// A Bigtable storage row. Bigtable stores by Family ID which isn't /// very useful for us later, so we overload this structure a bit. /// When we read data back out of Bigtable, we index cells by @@ -19,7 +21,7 @@ pub struct Row { pub row_key: RowKey, /// The row's collection of cells, indexed by either the /// FamilyID (for write) or Qualifier (for read). - pub cells: HashMap>, + pub cells: RowCells, } impl Row { diff --git a/autopush-common/src/db/bigtable/mod.rs b/autopush-common/src/db/bigtable/mod.rs index d5e64867b..ce0403c86 100644 --- a/autopush-common/src/db/bigtable/mod.rs +++ b/autopush-common/src/db/bigtable/mod.rs @@ -48,6 +48,10 @@ pub struct BigTableDbSettings { /// bigtable. #[serde(default)] pub table_name: String, + /// Routing replication profile id. + /// Should be used everywhere we set `table_name` when creating requests + #[serde(default)] + pub app_profile_id: String, #[serde(default)] pub router_family: String, #[serde(default)] @@ -102,6 +106,7 @@ impl Default for BigTableDbSettings { database_pool_max_idle: Default::default(), route_to_leader: Default::default(), retry_count: Default::default(), + app_profile_id: Default::default(), } } } @@ -149,7 +154,7 @@ impl BigTableDbSettings { impl TryFrom<&str> for BigTableDbSettings { type Error = DbError; fn try_from(setting_string: &str) -> Result { - let me: Self = serde_json::from_str(setting_string) + let mut me: Self = serde_json::from_str(setting_string) .map_err(|e| DbError::General(format!("Could not parse DdbSettings: {:?}", e)))?; if me.table_name.starts_with('/') { @@ -158,6 +163,13 @@ impl TryFrom<&str> for BigTableDbSettings { )); }; + // specify the default string "default" if it's not specified. + // There's a small chance that this could be reported as "unspecified", so this + // removes that confusion. + if me.app_profile_id.is_empty() { + "default".clone_into(&mut me.app_profile_id); + } + Ok(me) } } diff --git a/autopush-common/src/db/bigtable/pool.rs b/autopush-common/src/db/bigtable/pool.rs index c216107de..6b14f20e0 100644 --- a/autopush-common/src/db/bigtable/pool.rs +++ b/autopush-common/src/db/bigtable/pool.rs @@ -216,7 +216,7 @@ impl Manager for BigtableClientManager { } if !client - .health_check(self.metrics.clone()) + .health_check(&self.metrics.clone(), &self.settings.app_profile_id) .await .inspect_err(|e| debug!("🏊 Recycle requested (health). {:?}", e))? { diff --git a/autopush-common/src/db/client.rs b/autopush-common/src/db/client.rs index 477b11401..f447b26ea 100644 --- a/autopush-common/src/db/client.rs +++ b/autopush-common/src/db/client.rs @@ -97,20 +97,8 @@ pub trait DbClient: Send + Sync { /// Perform the health check on this data store async fn health_check(&self) -> DbResult; - /// Return the DynamoDB current message table name - /// - /// DynamoDB tables were previously rotated to new tables on a monthly - /// basis. The current table name is used to validate the DynamoDB specific - /// legacy `User::current_month` value. - /// - /// N/A to BigTable (returns `None`). - // #[automock] requires an explicit 'a lifetime here which is otherwise - // unnecessary and rejected by clippy - #[allow(clippy::needless_lifetimes)] - fn rotating_message_table<'a>(&'a self) -> Option<&'a str>; - /// Provide the module name. - /// This was added for simple dual mode testing, but may be useful in + /// This was added for simple dual mode testing (legacy), but may be useful in /// other situations. fn name(&self) -> String; diff --git a/autopush-common/src/db/dual/mod.rs b/autopush-common/src/db/dual/mod.rs deleted file mode 100644 index 023a33bb2..000000000 --- a/autopush-common/src/db/dual/mod.rs +++ /dev/null @@ -1,601 +0,0 @@ -//! Dual data store. -//! -//! This uses two data stores, a primary and a secondary, and if a -//! read operation fails for the primary, the secondary is automatically -//! used. All write operations ONLY go to the primary. -//! -//! This requires both the `dynamodb` and `bigtable` features. -//! -use std::{collections::HashSet, sync::Arc, time::Duration}; - -use async_trait::async_trait; -use cadence::{CountedExt, StatsdClient, Timed}; -use serde::Deserialize; -use serde_json::from_str; -use uuid::Uuid; - -use crate::db::{ - bigtable::BigTableClientImpl, - client::{DbClient, FetchMessageResponse}, - dynamodb::DdbClientImpl, - error::{DbError, DbResult}, - DbSettings, Notification, User, -}; - -use super::StorageType; - -#[derive(Clone)] -pub struct DualClientImpl { - /// The primary data store, which will always be Bigtable. - primary: BigTableClientImpl, - /// The secondary data store, which will always be DynamoDB. - secondary: DdbClientImpl, - /// Write changes to the secondary, including messages and updates - /// as well as account and channel additions/deletions. - write_to_secondary: bool, - /// Hex value to use to specify the first byte of the median offset. - /// e.g. "0a" will start from include all UUIDs upto and including "0a" - median: Option, - /// Ending octet to use for more distributed account migration - end_median: Option, - metrics: Arc, -} - -fn default_true() -> bool { - true -} - -#[derive(Clone, Debug, Deserialize)] -pub struct DualDbSettings { - /// The primary data store, which will always be Bigtable. - primary: DbSettings, - /// The secondary data store, which will always be DynamoDB. - secondary: DbSettings, - /// Write changes to the secondary, including messages and updates - /// as well as account and channel additions/deletions. - #[serde(default = "default_true")] - write_to_secondary: bool, - /// Hex value to specify the first byte of the median offset. - /// e.g. "0a" will start from include all UUIDs upto and including "0a" - #[serde(default)] - median: Option, - /// Hex value to specify the last byte of the median offset to include. - /// this value is "OR"ed withe "median" to produce a more distributed set of - /// uaids to migrate - #[serde(default)] - end_median: Option, -} - -impl DualClientImpl { - pub fn new(metrics: Arc, settings: &DbSettings) -> DbResult { - // Not really sure we need the dsn here. - info!("⚖ Trying: {:?}", settings.db_settings); - let db_settings: DualDbSettings = from_str(&settings.db_settings).map_err(|e| { - DbError::General(format!("Could not parse DualDBSettings string {:?}", e)) - })?; - info!("⚖ {:?}", &db_settings); - if StorageType::from_dsn(&db_settings.primary.dsn) != StorageType::BigTable { - return Err(DbError::General( - "Invalid primary DSN specified (must be BigTable type)".to_owned(), - )); - } - if StorageType::from_dsn(&db_settings.secondary.dsn) != StorageType::DynamoDb { - return Err(DbError::General( - "Invalid secondary DSN specified (must be DynamoDB type)".to_owned(), - )); - } - // determine which uaids to move based on the first byte of their UAID, which (hopefully) - // should be sufficiently random based on it being a UUID4. - let median = if let Some(median) = db_settings.median { - let median = hex::decode(median).map_err(|e| { - DbError::General(format!( - "Could not parse median string. Please use a valid Hex identifier: {:?}", - e, - )) - })?[0]; - debug!( - "⚖ Setting median to {:02} ({})", - hex::encode([median]), - &median - ); - Some(median) - } else { - None - }; - // determine which uaids to move based on the last byte of their UAID. - // This should reduce the hot table problem. - let end_median = if let Some(end_median) = db_settings.end_median { - let end_median = hex::decode(end_median).map_err(|e| { - DbError::General(format!( - "Could not parse end_median string. Please use a valid Hex identifier: {:?}", - e, - )) - })?[0]; - debug!( - "⚖ Setting end_median to {:02} ({})", - hex::encode([end_median]), - &end_median - ); - Some(end_median) - } else { - None - }; - - let primary = BigTableClientImpl::new(metrics.clone(), &db_settings.primary)?; - let secondary = DdbClientImpl::new(metrics.clone(), &db_settings.secondary)?; - debug!("⚖ Got primary and secondary"); - metrics - .incr_with_tags("database.dual.allot") - .with_tag( - "median", - &median.map_or_else(|| "None".to_owned(), |m| m.to_string()), - ) - .with_tag( - "end_median", - &end_median.map_or_else(|| "None".to_owned(), |m| m.to_string()), - ) - .send(); - Ok(Self { - primary, - secondary: secondary.clone(), - median, - end_median, - write_to_secondary: db_settings.write_to_secondary, - metrics, - }) - } - - /// Spawn a task to periodically evict idle Bigtable connections - pub fn spawn_sweeper(&self, interval: Duration) { - self.primary.spawn_sweeper(interval); - } -} - -/// Wrapper functions to allow us to change which data store system actually manages the -/// user allocation routing table. -impl DualClientImpl { - fn should_migrate(&self, uaid: &Uuid) -> bool { - let bytes = uaid.as_bytes(); - let mut result: bool = false; - if let Some(median) = self.median { - result |= bytes.first() <= Some(&median); - }; - if let Some(end_median) = self.end_median { - result |= bytes.last() <= Some(&end_median); - } - result - } - /// Route and assign a user to the appropriate back end based on the defined - /// allowance - /// Returns the dbclient to use and whether or not it's the primary database. - async fn allot<'a>(&'a self, uaid: &Uuid) -> DbResult<(Box<&'a dyn DbClient>, bool)> { - let target: (Box<&'a dyn DbClient>, bool) = if self.should_migrate(uaid) { - debug!("⚖ Routing user to Bigtable"); - (Box::new(&self.primary), true) - } else { - (Box::new(&self.secondary), false) - }; - debug!("⚖ alloting to {}", target.0.name()); - Ok(target) - } -} - -#[async_trait] -impl DbClient for DualClientImpl { - async fn add_user(&self, user: &User) -> DbResult<()> { - let (target, is_primary) = self.allot(&user.uaid).await?; - debug!("⚖ adding user to {}...", target.name()); - let result = target.add_user(user).await?; - if is_primary && self.write_to_secondary { - let _ = self.secondary.add_user(user).await.map_err(|e| { - error!("⚖ Error: {:?}", e); - self.metrics - .incr_with_tags("database.dual.error") - .with_tag("func", "add_user") - .send(); - e - }); - } - debug!("⚖ User added..."); - Ok(result) - } - - async fn update_user(&self, user: &mut User) -> DbResult { - // If the UAID is in the allowance, move them to the new data store - let (target, is_primary) = self.allot(&user.uaid).await?; - let result = target.update_user(user).await?; - if is_primary && self.write_to_secondary { - let _ = self.secondary.update_user(user).await.map_err(|e| { - error!("⚡ Error: {:?}", e); - self.metrics - .incr_with_tags("database.dual.error") - .with_tag("func", "update_user") - .send(); - e - }); - } - Ok(result) - } - - async fn get_user(&self, uaid: &Uuid) -> DbResult> { - let (target, is_primary) = self.allot(uaid).await?; - match target.get_user(uaid).await { - Ok(Some(user)) => Ok(Some(user)), - Ok(None) => { - if is_primary { - // The user wasn't in the current primary, so fetch them from the secondary. - let start = std::time::Instant::now(); - if let Ok(Some(mut user)) = self.secondary.get_user(uaid).await { - // copy the user record over to the new data store. - debug!("⚖ Found user record in secondary, moving to primary"); - // Users read from DynamoDB lack a version field needed - // for Bigtable - debug_assert!(user.version.is_none()); - user.version = Some(Uuid::new_v4()); - if let Err(e) = self.primary.add_user(&user).await { - if !matches!(e, DbError::Conditional) { - return Err(e); - } - // User is being migrated underneath us. Try - // fetching the record from primary again, and back - // off if still not there. - let user = self.primary.get_user(uaid).await?; - // Possibly a higher number of these occur than - // expected, so sanity check that a user now exists - self.metrics - .incr_with_tags("database.already_migrated") - .with_tag("exists", &user.is_some().to_string()) - .send(); - if user.is_none() { - return Err(DbError::Backoff("Move in progress".to_owned())); - }; - return Ok(user); - }; - self.metrics.incr_with_tags("database.migrate").send(); - let channels = self.secondary.get_channels(uaid).await?; - if !channels.is_empty() { - // NOTE: add_channels doesn't write a new version: - // user.version is still valid - self.primary.add_channels(uaid, channels).await?; - } - self.metrics - .time_with_tags( - "database.migrate.time", - (std::time::Instant::now() - start).as_millis() as u64, - ) - .send(); - return Ok(Some(user)); - } - } - Ok(None) - } - Err(err) => Err(err), - } - } - - async fn remove_user(&self, uaid: &Uuid) -> DbResult<()> { - let (target, is_primary) = self.allot(uaid).await?; - let result = target.remove_user(uaid).await?; - if is_primary { - // try removing the user from the old store, just in case. - // leaving them could cause false reporting later. - let _ = self.secondary.remove_user(uaid).await.map_err(|e| { - debug!("⚖ Secondary remove_user error {:?}", e); - self.metrics - .incr_with_tags("database.dual.error") - .with_tag("func", "remove_user") - .send(); - e - }); - } - Ok(result) - } - - async fn add_channel(&self, uaid: &Uuid, channel_id: &Uuid) -> DbResult<()> { - debug!("⚖ getting target"); - let (target, is_primary) = self.allot(uaid).await?; - debug!("⚖ Adding channel to {}", target.name()); - let result = target.add_channel(uaid, channel_id).await; - if is_primary && self.write_to_secondary { - let _ = self - .secondary - .add_channel(uaid, channel_id) - .await - .map_err(|e| { - self.metrics - .incr_with_tags("database.dual.error") - .with_tag("func", "add_channel") - .send(); - e - }); - } - result - } - - async fn add_channels(&self, uaid: &Uuid, channels: HashSet) -> DbResult<()> { - let (target, is_primary) = self.allot(uaid).await?; - let result = target.add_channels(uaid, channels.clone()).await; - if is_primary && self.write_to_secondary { - let _ = self - .secondary - .add_channels(uaid, channels) - .await - .map_err(|e| { - self.metrics - .incr_with_tags("database.dual.error") - .with_tag("func", "add_channels") - .send(); - e - }); - } - result - } - - async fn get_channels(&self, uaid: &Uuid) -> DbResult> { - let (target, _is_primary) = self.allot(uaid).await?; - target.get_channels(uaid).await - } - - async fn remove_channel(&self, uaid: &Uuid, channel_id: &Uuid) -> DbResult { - let (target, is_primary) = self.allot(uaid).await?; - let result = target.remove_channel(uaid, channel_id).await?; - // Always remove the channel - if is_primary { - let _ = self - .secondary - .remove_channel(uaid, channel_id) - .await - .map_err(|e| { - debug!("⚖ Secondary remove_channel error: {:?}", e); - self.metrics - .incr_with_tags("database.dual.error") - .with_tag("func", "remove_channel") - .send(); - - e - }); - } - Ok(result) - } - - async fn remove_node_id( - &self, - uaid: &Uuid, - node_id: &str, - connected_at: u64, - version: &Option, - ) -> DbResult { - let (target, is_primary) = self.allot(uaid).await?; - let mut result = target - .remove_node_id(uaid, node_id, connected_at, version) - .await?; - // Always remove the node_id. - if is_primary { - result = self - .secondary - .remove_node_id(uaid, node_id, connected_at, version) - .await - .unwrap_or_else(|e| { - debug!("⚖ Secondary remove_node_id error: {:?}", e); - self.metrics - .incr_with_tags("database.dual.error") - .with_tag("func", "remove_node_id") - .send(); - - false - }) - || result; - } - Ok(result) - } - - async fn save_message(&self, uaid: &Uuid, message: Notification) -> DbResult<()> { - let (target, is_primary) = self.allot(uaid).await?; - if is_primary && self.write_to_secondary { - let _ = self.secondary.save_message(uaid, message.clone()).await?; - } - target.save_message(uaid, message).await - } - - async fn remove_message(&self, uaid: &Uuid, sort_key: &str) -> DbResult<()> { - let (target, is_primary) = self.allot(uaid).await?; - let result = target.remove_message(uaid, sort_key).await?; - // Always remove the message - if is_primary { - // this will be increasingly chatty as we wind down dynamodb. - let _ = self - .secondary - .remove_message(uaid, sort_key) - .await - .map_err(|e| { - debug!("⚖ Secondary remove_message error: {:?}", e); - self.metrics - .incr_with_tags("database.dual.error") - .with_tag("func", "remove_message") - .send(); - e - }); - } - Ok(result) - } - - async fn fetch_topic_messages( - &self, - uaid: &Uuid, - limit: usize, - ) -> DbResult { - let (target, is_primary) = self.allot(uaid).await?; - let result = target.fetch_topic_messages(uaid, limit).await?; - if result.messages.is_empty() && is_primary { - return self.secondary.fetch_topic_messages(uaid, limit).await; - } - return Ok(result); - } - - async fn fetch_timestamp_messages( - &self, - uaid: &Uuid, - timestamp: Option, - limit: usize, - ) -> DbResult { - let (target, is_primary) = self.allot(uaid).await?; - let result = target - .fetch_timestamp_messages(uaid, timestamp, limit) - .await?; - if result.messages.is_empty() && is_primary { - return self - .secondary - .fetch_timestamp_messages(uaid, timestamp, limit) - .await; - } - return Ok(result); - } - - async fn save_messages(&self, uaid: &Uuid, messages: Vec) -> DbResult<()> { - let (target, is_primary) = self.allot(uaid).await?; - if is_primary && self.write_to_secondary { - let _ = self.secondary.save_messages(uaid, messages.clone()).await?; - } - target.save_messages(uaid, messages).await - } - - async fn increment_storage(&self, uaid: &Uuid, timestamp: u64) -> DbResult<()> { - let (target, is_primary) = self.allot(uaid).await?; - if is_primary { - let _ = self - .secondary - .increment_storage(uaid, timestamp) - .await - .map_err(|e| { - debug!("⚖ Secondary increment_storage error: {:?}", e); - self.metrics - .incr_with_tags("database.dual.error") - .with_tag("func", "increment_storage") - .send(); - e - }); - } - target.increment_storage(uaid, timestamp).await - } - - async fn health_check(&self) -> DbResult { - Ok(self.primary.health_check().await? && self.secondary.health_check().await?) - } - - async fn router_table_exists(&self) -> DbResult { - self.primary.router_table_exists().await - } - - async fn message_table_exists(&self) -> DbResult { - self.primary.message_table_exists().await - } - - fn rotating_message_table(&self) -> Option<&str> { - None - } - - fn box_clone(&self) -> Box { - Box::new(self.clone()) - } - - fn name(&self) -> String { - "Dual".to_owned() - } - - fn pool_status(&self) -> Option { - self.primary.pool_status() - } -} - -#[cfg(all(test, feature = "bigtable", feature = "dynamodb"))] -mod test { - use super::*; - use cadence::{NopMetricSink, StatsdClient}; - use serde_json::json; - use std::str::FromStr; - - fn test_args(median: Option<&str>, end_median: Option<&str>) -> String { - json!({ - "primary": { - "dsn": "grpc://bigtable.googleapis.com", // Note that this is the general endpoint. - "db_settings": json!({ - "table_name": "projects/some-project/instances/some-instance/tables/some-table", // Note the full path. - "message_family": "messageFamily", - "router_family": "routerFamily", - }).to_string(), - }, - "secondary": { - "dsn": "http://localhost:8000/", - "db_settings": json!({ - "router_table": "test_router", - "message_table": "test_message", - }).to_string(), - }, - "median": median.to_owned(), - "end_median": end_median.to_owned(), - "write_to_secondary": false, - }) - .to_string() - } - - /// This test checks the dual parser, but also serves as a bit of - /// documentation for how the db_settings argument should be structured - #[test] - fn arg_parsing() -> DbResult<()> { - let arg_str = test_args(None, None); - // the output string looks like: - /* - "{\"primary\":{\"db_settings\":\"{\\\"message_family\\\":\\\"message\\\",\\\"router_family\\\":\\\"router\\\",\\\"table_name\\\":\\\"projects/some-project/instances/some-instance/tables/some-table\\\"}\",\"dsn\":\"grpc://bigtable.googleapis.com\"},\"secondary\":{\"db_settings\":\"{\\\"message_table\\\":\\\"test_message\\\",\\\"router_table\\\":\\\"test_router\\\"}\",\"dsn\":\"http://localhost:8000/\"}}" - */ - // some additional escaping may be required to encode this as an environment variable. - // not all variables are required, feel free to liberally use fall-back defaults. - - // dbg!(&arg_str); - - let dual_settings = DbSettings { - dsn: Some("dual".to_owned()), - db_settings: arg_str, - }; - let metrics = Arc::new(StatsdClient::builder("", NopMetricSink).build()); - - // this is the actual test. It should create `dual` without raising an error - // Specify "BIGTABLE_EMULATOR_HOST" to skip credential check for emulator. - let dual = DualClientImpl::new(metrics, &dual_settings)?; - - // for sanity sake, make sure the passed message_family isn't the default value. - assert_eq!(&dual.primary.settings.message_family, "messageFamily"); - - Ok(()) - } - - #[actix_rt::test] - async fn allocation() -> DbResult<()> { - let arg_str = test_args(Some("0A"), Some("88")); - let metrics = Arc::new(StatsdClient::builder("", NopMetricSink).build()); - let dual_settings = DbSettings { - dsn: Some("dual".to_owned()), - db_settings: arg_str, - }; - let dual = DualClientImpl::new(metrics, &dual_settings)?; - - // Should be included (Note: high end median) - let low_uaid = Uuid::from_str("04DDDDDD-1234-1234-1234-0000000000CC").unwrap(); - let (result, is_primary) = dual.allot(&low_uaid).await?; - assert_eq!(result.name(), dual.primary.name()); - assert!(is_primary); - - // Should be excluded (Note: high end_median) - let hi_uaid = Uuid::from_str("0BDDDDDD-1234-1234-1234-0000000000CC").unwrap(); - let (result, is_primary) = dual.allot(&hi_uaid).await?; - assert_eq!(result.name(), dual.secondary.name()); - assert!(!is_primary); - - // Should be included (Note: high median with low end median) - let hi_end_uaid = Uuid::from_str("0BDDDDDD-1234-1234-1234-000000000080").unwrap(); - let (result, is_primary) = dual.allot(&hi_end_uaid).await?; - assert_eq!(result.name(), dual.primary.name()); - assert!(is_primary); - - Ok(()) - } -} diff --git a/autopush-common/src/db/dynamodb/macros.rs b/autopush-common/src/db/dynamodb/macros.rs deleted file mode 100644 index bb944e0f2..000000000 --- a/autopush-common/src/db/dynamodb/macros.rs +++ /dev/null @@ -1,113 +0,0 @@ -/// A bunch of macro helpers from rusoto_helpers code, which they pulled from crates.io because -/// they were waiting for rusuto to hit 1.0.0 or something. For sanity, they are instead accumulated -/// here for our use. -#[allow(unused_macros)] -macro_rules! attributes { - ($($val:expr => $attr_type:expr),*) => { - { - let mut temp_vec = Vec::new(); - $( - temp_vec.push(AttributeDefinition { - attribute_name: String::from($val), - attribute_type: String::from($attr_type) - }); - )* - temp_vec - } - } -} - -#[allow(unused_macros)] -macro_rules! key_schema { - ($($name:expr => $key_type:expr),*) => { - { - let mut temp_vec = Vec::new(); - $( - temp_vec.push(KeySchemaElement { - key_type: String::from($key_type), - attribute_name: String::from($name) - }); - )* - temp_vec - } - } -} - -#[macro_export] -macro_rules! val { - (B => $val:expr) => { - AttributeValue { - b: Some($val), - ..Default::default() - } - }; - (S => $val:expr) => { - AttributeValue { - s: Some($val.to_string()), - ..Default::default() - } - }; - (SS => $val:expr) => { - AttributeValue { - ss: Some($val.iter().map(|v| v.to_string()).collect()), - ..Default::default() - } - }; - (N => $val:expr) => { - AttributeValue { - n: Some($val.to_string()), - ..Default::default() - } - }; -} - -/// Create a **HashMap** from a list of key-value pairs -/// -/// ## Example -/// -/// ```ignore -/// use autopush_common::hashmap; -/// -/// let map = hashmap!{ -/// "a" => 1, -/// "b" => 2, -/// }; -/// assert_eq!(map["a"], 1); -/// assert_eq!(map["b"], 2); -/// assert_eq!(map.get("c"), None); -/// ``` -#[macro_export] -macro_rules! hashmap { - (@single $($x:tt)*) => (()); - (@count $($rest:expr),*) => (<[()]>::len(&[$($crate::hashmap!(@single $rest)),*])); - - ($($key:expr => $value:expr,)+) => { $crate::hashmap!($($key => $value),+) }; - ($($key:expr => $value:expr),*) => { - { - let _cap = $crate::hashmap!(@count $($key),*); - let mut _map = ::std::collections::HashMap::with_capacity(_cap); - $( - _map.insert($key, $value); - )* - _map - } - }; -} - -/// Shorthand for specifying a dynamodb item -#[macro_export] -macro_rules! ddb_item { - ($($p:tt: $t:tt => $x:expr),*) => { - { - use rusoto_dynamodb::AttributeValue; - $crate::hashmap!{ - $( - String::from(stringify!($p)) => AttributeValue { - $t: Some($x), - ..Default::default() - }, - )* - } - } - } -} diff --git a/autopush-common/src/db/dynamodb/mod.rs b/autopush-common/src/db/dynamodb/mod.rs deleted file mode 100644 index 261c24158..000000000 --- a/autopush-common/src/db/dynamodb/mod.rs +++ /dev/null @@ -1,662 +0,0 @@ -use std::collections::HashSet; -use std::env; -use std::fmt::{Debug, Display}; -use std::result::Result as StdResult; -use std::sync::Arc; - -use crate::db::client::DbClient; -use crate::db::dynamodb::retry::{ - retry_policy, retryable_batchwriteitem_error, retryable_delete_error, - retryable_describe_table_error, retryable_getitem_error, retryable_putitem_error, - retryable_query_error, retryable_updateitem_error, -}; -use crate::db::error::{DbError, DbResult}; -use crate::db::{ - client::FetchMessageResponse, DbSettings, NotificationRecord, User, MAX_CHANNEL_TTL, MAX_EXPIRY, -}; -use crate::notification::Notification; -use crate::util::sec_since_epoch; - -use async_trait::async_trait; -use cadence::{CountedExt, StatsdClient}; -use chrono::Utc; -use rusoto_core::credential::StaticProvider; -use rusoto_core::{HttpClient, Region, RusotoError}; -use rusoto_dynamodb::{ - AttributeValue, BatchWriteItemInput, DeleteItemInput, DescribeTableError, DescribeTableInput, - DynamoDb, DynamoDbClient, GetItemInput, ListTablesInput, PutItemInput, PutRequest, QueryInput, - UpdateItemError, UpdateItemInput, WriteRequest, -}; -use serde::{Deserialize, Serialize}; -use uuid::Uuid; - -#[macro_use] -pub mod macros; -pub mod retry; - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct DynamoDbSettings { - #[serde(default)] - pub router_table: String, - #[serde(default)] - pub message_table: String, - #[serde(default)] - pub db_routing_table: Option, -} - -impl TryFrom<&str> for DynamoDbSettings { - type Error = DbError; - fn try_from(setting_string: &str) -> Result { - serde_json::from_str(setting_string) - .map_err(|e| DbError::General(format!("Could not parse DdbSettings: {:?}", e))) - } -} - -#[derive(Clone)] -pub struct DdbClientImpl { - db_client: DynamoDbClient, - metrics: Arc, - settings: DynamoDbSettings, -} - -impl DdbClientImpl { - pub fn new(metrics: Arc, db_settings: &DbSettings) -> DbResult { - debug!("🛢️DynamoDB Settings {:?}", db_settings); - let db_client = if let Ok(endpoint) = env::var("AWS_LOCAL_DYNAMODB") { - DynamoDbClient::new_with( - HttpClient::new().expect("TLS initialization error"), - StaticProvider::new_minimal("BogusKey".to_string(), "BogusKey".to_string()), - Region::Custom { - name: "us-east-1".to_string(), - endpoint, - }, - ) - } else { - DynamoDbClient::new(Region::default()) - }; - - let settings = DynamoDbSettings::try_from(db_settings.db_settings.as_ref())?; - Ok(Self { - db_client, - metrics, - settings, - }) - } - - /// Check if a table exists - async fn table_exists(&self, table_name: String) -> DbResult { - let input = DescribeTableInput { table_name }; - - let output = match retry_policy() - .retry_if( - || self.db_client.describe_table(input.clone()), - retryable_describe_table_error(self.metrics.clone()), - ) - .await - { - Ok(output) => output, - Err(RusotoError::Service(DescribeTableError::ResourceNotFound(_))) => { - return Ok(false); - } - Err(e) => return Err(e.into()), - }; - - let status = output - .table - .and_then(|table| table.table_status) - .ok_or(DbError::TableStatusUnknown)? - .to_uppercase(); - - Ok(["CREATING", "UPDATING", "ACTIVE"].contains(&status.as_str())) - } -} - -/// Like Result::ok, convert from Result to Option but applying a -/// function to the Err value -fn ok_or_inspect(result: StdResult, op: F) -> Option -where - F: FnOnce(E), -{ - match result { - Ok(t) => Some(t), - Err(e) => { - op(e); - None - } - } -} - -/// Log/metric errors during conversions to Notification -fn conversion_err(metrics: &StatsdClient, err: E, item: F, name: &'static str) -where - E: Display, - F: Debug, -{ - error!("Failed {}, item: {:?}, conversion: {}", name, item, err); - metrics - .incr_with_tags("ua.notification_read.error") - .with_tag("conversion", name) - .send(); -} - -#[allow(clippy::field_reassign_with_default)] -#[async_trait] -impl DbClient for DdbClientImpl { - async fn add_user(&self, user: &User) -> DbResult<()> { - let input = PutItemInput { - table_name: self.settings.router_table.clone(), - item: serde_dynamodb::to_hashmap(user)?, - condition_expression: Some("attribute_not_exists(uaid)".to_string()), - ..Default::default() - }; - - retry_policy() - .retry_if( - || self.db_client.put_item(input.clone()), - retryable_putitem_error(self.metrics.clone()), - ) - .await?; - Ok(()) - } - - async fn update_user(&self, user: &mut User) -> DbResult { - let mut user_map = serde_dynamodb::to_hashmap(&user)?; - user_map.remove("uaid"); - let input = UpdateItemInput { - table_name: self.settings.router_table.clone(), - key: ddb_item! { uaid: s => user.uaid.simple().to_string() }, - update_expression: Some(format!( - "SET {}", - user_map - .keys() - .map(|key| format!("{0}=:{0}", key)) - .collect::>() - .join(", ") - )), - expression_attribute_values: Some( - user_map - .into_iter() - .map(|(key, value)| (format!(":{}", key), value)) - .collect(), - ), - condition_expression: Some( - "attribute_exists(uaid) and ( - attribute_not_exists(router_type) or - (router_type = :router_type) - ) and ( - attribute_not_exists(node_id) or - (connected_at < :connected_at) - )" - .to_string(), - ), - ..Default::default() - }; - - let result = retry_policy() - .retry_if( - || self.db_client.update_item(input.clone()), - retryable_updateitem_error(self.metrics.clone()), - ) - .await; - match result { - Ok(_) => Ok(true), - Err(RusotoError::Service(UpdateItemError::ConditionalCheckFailed(_))) => Ok(false), - Err(e) => Err(e.into()), - } - } - - async fn get_user(&self, uaid: &Uuid) -> DbResult> { - let input = GetItemInput { - table_name: self.settings.router_table.clone(), - consistent_read: Some(true), - key: ddb_item! { uaid: s => uaid.simple().to_string() }, - ..Default::default() - }; - - retry_policy() - .retry_if( - || self.db_client.get_item(input.clone()), - retryable_getitem_error(self.metrics.clone()), - ) - .await? - .item - .map(serde_dynamodb::from_hashmap) - .transpose() - .map_err(|e| { - error!("DbClient::get_user: {:?}", e.to_string()); - DbError::from(e) - }) - } - - async fn remove_user(&self, uaid: &Uuid) -> DbResult<()> { - let input = DeleteItemInput { - table_name: self.settings.router_table.clone(), - key: ddb_item! { uaid: s => uaid.simple().to_string() }, - ..Default::default() - }; - - retry_policy() - .retry_if( - || self.db_client.delete_item(input.clone()), - retryable_delete_error(self.metrics.clone()), - ) - .await?; - Ok(()) - } - - async fn add_channel(&self, uaid: &Uuid, channel_id: &Uuid) -> DbResult<()> { - let input = UpdateItemInput { - table_name: self.settings.message_table.clone(), - key: ddb_item! { - uaid: s => uaid.simple().to_string(), - chidmessageid: s => " ".to_string() - }, - update_expression: Some("ADD chids :channel_id SET expiry = :expiry".to_string()), - expression_attribute_values: Some(hashmap! { - ":channel_id".to_string() => val!(SS => Some(channel_id)), - ":expiry".to_string() => val!(N => sec_since_epoch() + MAX_CHANNEL_TTL) - }), - ..Default::default() - }; - - retry_policy() - .retry_if( - || self.db_client.update_item(input.clone()), - retryable_updateitem_error(self.metrics.clone()), - ) - .await?; - Ok(()) - } - - /// Hopefully, this is never called. It is provided for completion sake. - async fn add_channels(&self, uaid: &Uuid, channels: HashSet) -> DbResult<()> { - for channel_id in channels { - self.add_channel(uaid, &channel_id).await?; - } - Ok(()) - } - - async fn get_channels(&self, uaid: &Uuid) -> DbResult> { - // Channel IDs are stored in a special row in the message table, where - // chidmessageid = " " - let input = GetItemInput { - table_name: self.settings.message_table.clone(), - consistent_read: Some(true), - key: ddb_item! { - uaid: s => uaid.simple().to_string(), - chidmessageid: s => " ".to_string() - }, - ..Default::default() - }; - - let output = retry_policy() - .retry_if( - || self.db_client.get_item(input.clone()), - retryable_getitem_error(self.metrics.clone()), - ) - .await?; - - // The channel IDs are in the notification's `chids` field - let channels = output - .item - // Deserialize the notification - .map(serde_dynamodb::from_hashmap::) - .transpose()? - // Extract the channel IDs - .and_then(|n| n.chids) - .unwrap_or_default(); - - // Convert the IDs from String to Uuid - let channels = channels - .into_iter() - .filter_map(|s| Uuid::parse_str(&s).ok()) - .collect(); - - Ok(channels) - } - - async fn remove_channel(&self, uaid: &Uuid, channel_id: &Uuid) -> DbResult { - let input = UpdateItemInput { - table_name: self.settings.message_table.clone(), - key: ddb_item! { - uaid: s => uaid.simple().to_string(), - chidmessageid: s => " ".to_string() - }, - update_expression: Some("DELETE chids :channel_id SET expiry = :expiry".to_string()), - expression_attribute_values: Some(hashmap! { - ":channel_id".to_string() => val!(SS => Some(channel_id)), - ":expiry".to_string() => val!(N => sec_since_epoch() + MAX_CHANNEL_TTL) - }), - return_values: Some("UPDATED_OLD".to_string()), - ..Default::default() - }; - - let output = retry_policy() - .retry_if( - || self.db_client.update_item(input.clone()), - retryable_updateitem_error(self.metrics.clone()), - ) - .await?; - - // Check if the old channel IDs contain the removed channel - Ok(output - .attributes - .as_ref() - .and_then(|map| map.get("chids")) - .and_then(|item| item.ss.as_ref()) - .map(|channel_ids| channel_ids.contains(&channel_id.to_string())) - .unwrap_or(false)) - } - - async fn remove_node_id( - &self, - uaid: &Uuid, - node_id: &str, - connected_at: u64, - _version: &Option, - ) -> DbResult { - let input = UpdateItemInput { - key: ddb_item! { uaid: s => uaid.simple().to_string() }, - update_expression: Some("REMOVE node_id".to_string()), - condition_expression: Some("(node_id = :node) and (connected_at = :conn)".to_string()), - expression_attribute_values: Some(hashmap! { - ":node".to_string() => val!(S => node_id), - ":conn".to_string() => val!(N => connected_at.to_string()) - }), - table_name: self.settings.router_table.clone(), - ..Default::default() - }; - - let result = retry_policy() - .retry_if( - || self.db_client.update_item(input.clone()), - retryable_updateitem_error(self.metrics.clone()), - ) - .await; - match result { - Ok(_) => Ok(true), - Err(RusotoError::Service(UpdateItemError::ConditionalCheckFailed(_))) => Ok(false), - Err(e) => Err(e.into()), - } - } - - async fn fetch_topic_messages( - &self, - uaid: &Uuid, - limit: usize, - ) -> DbResult { - // from commands::fetch_topic_messages() - let attr_values = hashmap! { - ":uaid".to_string() => val!(S => uaid.simple().to_string()), - ":cmi".to_string() => val!(S => "02"), - }; - let input = QueryInput { - key_condition_expression: Some("uaid = :uaid AND chidmessageid < :cmi".to_string()), - expression_attribute_values: Some(attr_values), - table_name: self.settings.message_table.to_string(), - consistent_read: Some(true), - limit: Some(limit as i64), - ..Default::default() - }; - - let output = retry_policy() - .retry_if( - || self.db_client.query(input.clone()), - retryable_query_error(self.metrics.clone()), - ) - .await?; - - let mut notifs: Vec = output.items.map_or_else(Vec::new, |items| { - debug!("Got response of: {:?}", items); - items - .into_iter() - .inspect(|i| debug!("Item: {:?}", i)) - .filter_map(|item| { - let item2 = item.clone(); - ok_or_inspect(serde_dynamodb::from_hashmap(item), |e| { - conversion_err(&self.metrics, e, item2, "serde_dynamodb_from_hashmap") - }) - }) - .collect() - }); - if notifs.is_empty() { - return Ok(Default::default()); - } - - // Load the current_timestamp from the subscription registry entry which is - // the first DynamoDbNotification and remove it from the vec. - let timestamp = notifs.remove(0).current_timestamp; - // Convert any remaining DynamoDbNotifications to Notification's - let messages = notifs - .into_iter() - .filter_map(|ddb_notif| { - let ddb_notif2 = ddb_notif.clone(); - ok_or_inspect(ddb_notif.into_notif(), |e| { - conversion_err(&self.metrics, e, ddb_notif2, "into_notif") - }) - }) - .collect(); - Ok(FetchMessageResponse { - timestamp, - messages, - }) - } - - async fn fetch_timestamp_messages( - &self, - uaid: &Uuid, - timestamp: Option, - limit: usize, - ) -> DbResult { - // Specify the minimum value to look for as a channelmessageid - let range_key = if let Some(ts) = timestamp { - format!("02:{}:z", ts) - } else { - // fun ascii tricks? because ':' comes before ';', look for any non-topic? - "01;".to_string() - }; - let attr_values = hashmap! { - ":uaid".to_string() => val!(S => uaid.simple().to_string()), - ":cmi".to_string() => val!(S => range_key), - }; - let input = QueryInput { - key_condition_expression: Some("uaid = :uaid AND chidmessageid > :cmi".to_string()), - expression_attribute_values: Some(attr_values), - table_name: self.settings.message_table.to_string(), - consistent_read: Some(true), - limit: Some(limit as i64), - ..Default::default() - }; - - let output = retry_policy() - .retry_if( - || self.db_client.query(input.clone()), - retryable_query_error(self.metrics.clone()), - ) - .await?; - - let messages = output.items.map_or_else(Vec::new, |items| { - debug!("Got response of: {:?}", items); - items - .into_iter() - .filter_map(|item| { - let item2 = item.clone(); - ok_or_inspect(serde_dynamodb::from_hashmap(item), |e| { - conversion_err(&self.metrics, e, item2, "serde_dynamodb_from_hashmap") - }) - }) - .filter_map(|ddb_notif: NotificationRecord| { - let ddb_notif2 = ddb_notif.clone(); - ok_or_inspect(ddb_notif.into_notif(), |e| { - conversion_err(&self.metrics, e, ddb_notif2, "into_notif") - }) - }) - .collect() - }); - let timestamp = messages.iter().filter_map(|m| m.sortkey_timestamp).max(); - Ok(FetchMessageResponse { - timestamp, - messages, - }) - } - - async fn increment_storage(&self, uaid: &Uuid, timestamp: u64) -> DbResult<()> { - let expiry = sec_since_epoch() + 2 * MAX_EXPIRY; - let attr_values = hashmap! { - ":timestamp".to_string() => val!(N => timestamp.to_string()), - ":expiry".to_string() => val!(N => expiry), - }; - let update_input = UpdateItemInput { - key: ddb_item! { - uaid: s => uaid.as_simple().to_string(), - chidmessageid: s => " ".to_string() - }, - update_expression: Some( - "SET current_timestamp = :timestamp, expiry = :expiry".to_string(), - ), - expression_attribute_values: Some(attr_values), - table_name: self.settings.message_table.clone(), - ..Default::default() - }; - - retry_policy() - .retry_if( - || self.db_client.update_item(update_input.clone()), - retryable_updateitem_error(self.metrics.clone()), - ) - .await?; - - Ok(()) - } - - async fn save_message(&self, uaid: &Uuid, message: Notification) -> DbResult<()> { - let topic = message.topic.is_some().to_string(); - let input = PutItemInput { - item: serde_dynamodb::to_hashmap(&NotificationRecord::from_notif(uaid, message))?, - table_name: self.settings.message_table.clone(), - ..Default::default() - }; - - retry_policy() - .retry_if( - || self.db_client.put_item(input.clone()), - retryable_putitem_error(self.metrics.clone()), - ) - .await?; - - self.metrics - .incr_with_tags("notification.message.stored") - .with_tag("topic", &topic) - .with_tag("database", &self.name()) - .send(); - Ok(()) - } - - async fn save_messages(&self, uaid: &Uuid, messages: Vec) -> DbResult<()> { - let put_items: Vec = messages - .into_iter() - .filter_map(|n| { - // eventually include `internal` if `meta` defined. - self.metrics - .incr_with_tags("notification.message.stored") - .with_tag("topic", &n.topic.is_some().to_string()) - .with_tag("database", &self.name()) - .send(); - serde_dynamodb::to_hashmap(&NotificationRecord::from_notif(uaid, n)) - .ok() - .map(|hm| WriteRequest { - put_request: Some(PutRequest { item: hm }), - delete_request: None, - }) - }) - .collect(); - let batch_input = BatchWriteItemInput { - request_items: hashmap! { self.settings.message_table.clone() => put_items }, - ..Default::default() - }; - - retry_policy() - .retry_if( - || self.db_client.batch_write_item(batch_input.clone()), - retryable_batchwriteitem_error(self.metrics.clone()), - ) - .await?; - Ok(()) - } - - async fn remove_message(&self, uaid: &Uuid, sort_key: &str) -> DbResult<()> { - let input = DeleteItemInput { - table_name: self.settings.message_table.clone(), - key: ddb_item! { - uaid: s => uaid.simple().to_string(), - chidmessageid: s => sort_key.to_owned() - }, - ..Default::default() - }; - - retry_policy() - .retry_if( - || self.db_client.delete_item(input.clone()), - retryable_delete_error(self.metrics.clone()), - ) - .await?; - self.metrics - .incr_with_tags("notification.message.deleted") - .with_tag("database", &self.name()) - .send(); - Ok(()) - } - - async fn router_table_exists(&self) -> DbResult { - self.table_exists(self.settings.router_table.clone()).await - } - - async fn message_table_exists(&self) -> DbResult { - self.table_exists(self.settings.message_table.clone()).await - } - - fn rotating_message_table(&self) -> Option<&str> { - trace!("ddb message table {:?}", &self.settings.message_table); - Some(&self.settings.message_table) - } - - /// Perform a simple health check to make sure that the database is there. - /// This is called by __health__, so it should be reasonably light weight. - async fn health_check(&self) -> DbResult { - let input = ListTablesInput { - exclusive_start_table_name: Some(self.settings.message_table.clone()), - ..Default::default() - }; - // if we can't connect, that's a fail. - let result = self - .db_client - .list_tables(input) - .await - .map_err(|e| DbError::General(format!("DynamoDB health check failure: {:?}", e)))?; - if let Some(names) = result.table_names { - // We found at least one table that matches the message_table - debug!("dynamodb ok"); - return Ok(!names.is_empty()); - } - // Huh, we couldn't find a message table? That's a failure. - return Err(DbError::General( - "DynamoDB health check failure: No message table found".to_owned(), - )); - } - - fn box_clone(&self) -> Box { - Box::new(self.clone()) - } - - fn name(&self) -> String { - "DynamoDb".to_owned() - } -} - -/// Indicate whether this last_connect falls in the current month -pub(crate) fn has_connected_this_month(user: &User) -> bool { - user.last_connect.map_or(false, |v| { - let pat = Utc::now().format("%Y%m").to_string(); - v.to_string().starts_with(&pat) - }) -} diff --git a/autopush-common/src/db/dynamodb/retry.rs b/autopush-common/src/db/dynamodb/retry.rs deleted file mode 100644 index ffcc3807e..000000000 --- a/autopush-common/src/db/dynamodb/retry.rs +++ /dev/null @@ -1,61 +0,0 @@ -use again::RetryPolicy; -use cadence::{CountedExt, StatsdClient}; -use rusoto_core::RusotoError; -use rusoto_dynamodb::{ - BatchWriteItemError, DeleteItemError, DescribeTableError, GetItemError, PutItemError, - QueryError, UpdateItemError, -}; -use std::sync::Arc; - -/// Create a retry function for the given error -macro_rules! retryable_error { - ($name:ident, $error:tt, $error_tag:expr) => { - pub fn $name(metrics: Arc) -> impl Fn(&RusotoError<$error>) -> bool { - move |err| match err { - RusotoError::HttpDispatch(_) - | RusotoError::Service($error::InternalServerError(_)) - | RusotoError::Service($error::ProvisionedThroughputExceeded(_)) => { - debug!("retryable {} {:?}", $error_tag, &err); - metrics - .incr_with_tags("database.retry") - .with_tag("error", $error_tag) - .send(); - true - } - _ => false, - } - } - }; -} - -retryable_error!(retryable_query_error, QueryError, "query"); -retryable_error!(retryable_getitem_error, GetItemError, "get_item"); -retryable_error!(retryable_updateitem_error, UpdateItemError, "update_item"); -retryable_error!(retryable_putitem_error, PutItemError, "put_item"); -retryable_error!(retryable_delete_error, DeleteItemError, "delete_item"); -retryable_error!( - retryable_batchwriteitem_error, - BatchWriteItemError, - "batch_write_item" -); - -// DescribeTableError does not have a ProvisionedThroughputExceeded variant -pub fn retryable_describe_table_error( - metrics: Arc, -) -> impl Fn(&RusotoError) -> bool { - move |err| match err { - RusotoError::Service(DescribeTableError::InternalServerError(_)) => { - metrics - .incr_with_tags("database.retry") - .with_tag("error", "describe_table_error") - .send(); - true - } - _ => false, - } -} - -/// Build an exponential retry policy -pub fn retry_policy() -> RetryPolicy { - RetryPolicy::default().with_jitter(true) -} diff --git a/autopush-common/src/db/error.rs b/autopush-common/src/db/error.rs index 373b8986c..a30dc915b 100644 --- a/autopush-common/src/db/error.rs +++ b/autopush-common/src/db/error.rs @@ -1,13 +1,5 @@ use actix_web::http::StatusCode; -use backtrace::Backtrace; -#[cfg(feature = "dynamodb")] -use rusoto_core::RusotoError; -#[cfg(feature = "dynamodb")] -use rusoto_dynamodb::{ - BatchWriteItemError, DeleteItemError, DescribeTableError, GetItemError, PutItemError, - QueryError, UpdateItemError, -}; use thiserror::Error; #[cfg(feature = "bigtable")] @@ -18,38 +10,6 @@ pub type DbResult = Result; #[derive(Debug, Error)] pub enum DbError { - #[cfg(feature = "dynamodb")] - #[error("Database error while performing GetItem")] - DdbGetItem(#[from] RusotoError), - - #[cfg(feature = "dynamodb")] - #[error("Database error while performing UpdateItem")] - DdbUpdateItem(#[from] RusotoError), - - #[cfg(feature = "dynamodb")] - #[error("Database error while performing PutItem")] - DdbPutItem(#[from] RusotoError), - - #[cfg(feature = "dynamodb")] - #[error("Database error while performing DeleteItem")] - DdbDeleteItem(#[from] RusotoError), - - #[cfg(feature = "dynamodb")] - #[error("Database error while performing BatchWriteItem")] - DdbBatchWriteItem(#[from] RusotoError), - - #[cfg(feature = "dynamodb")] - #[error("Database error while performing DescribeTable")] - DdbDescribeTable(#[from] RusotoError), - - #[cfg(feature = "dynamodb")] - #[error("Database error while performing Query")] - DdbQuery(#[from] RusotoError), - - #[cfg(feature = "dynamodb")] - #[error("Error while performing DynamoDB (de)serialization: {0}")] - DdbSerialization(#[from] serde_dynamodb::Error), - #[error("Error while performing (de)serialization: {0}")] Serialization(String), @@ -103,10 +63,6 @@ impl ReportableError for DbError { } } - fn backtrace(&self) -> Option<&Backtrace> { - None - } - fn is_sentry_event(&self) -> bool { match &self { #[cfg(feature = "bigtable")] diff --git a/autopush-common/src/db/mock.rs b/autopush-common/src/db/mock.rs index eedf52905..a3d5db7b6 100644 --- a/autopush-common/src/db/mock.rs +++ b/autopush-common/src/db/mock.rs @@ -103,10 +103,6 @@ impl DbClient for Arc { Arc::as_ref(self).message_table_exists().await } - fn rotating_message_table(&self) -> Option<&str> { - Arc::as_ref(self).rotating_message_table() - } - async fn health_check(&self) -> DbResult { Arc::as_ref(self).health_check().await } diff --git a/autopush-common/src/db/mod.rs b/autopush-common/src/db/mod.rs index 3eb7a0064..3b1d2f11b 100644 --- a/autopush-common/src/db/mod.rs +++ b/autopush-common/src/db/mod.rs @@ -12,28 +12,20 @@ use std::cmp::min; use std::collections::{HashMap, HashSet}; use std::result::Result as StdResult; +use derive_builder::Builder; use lazy_static::lazy_static; use regex::RegexSet; use serde::Serializer; use serde_derive::{Deserialize, Serialize}; use uuid::Uuid; -#[cfg(feature = "dynamodb")] -use crate::db::dynamodb::has_connected_this_month; -use util::generate_last_connect; - #[cfg(feature = "bigtable")] pub mod bigtable; pub mod client; -#[cfg(all(feature = "bigtable", feature = "dynamodb"))] -pub mod dual; -#[cfg(feature = "dynamodb")] -pub mod dynamodb; pub mod error; pub mod models; pub mod reporter; pub mod routing; -mod util; // used by integration testing pub mod mock; @@ -43,25 +35,16 @@ pub use reporter::spawn_pool_periodic_reporter; use crate::errors::{ApcErrorKind, Result}; use crate::notification::{Notification, STANDARD_NOTIFICATION_PREFIX, TOPIC_NOTIFICATION_PREFIX}; use crate::util::timing::{ms_since_epoch, sec_since_epoch}; +use crate::{MAX_NOTIFICATION_TTL, MAX_ROUTER_TTL}; use models::{NotificationHeaders, RangeKey}; -const MAX_EXPIRY: u64 = 2_592_000; pub const USER_RECORD_VERSION: u64 = 1; -/// The maximum TTL for channels, 30 days -pub const MAX_CHANNEL_TTL: u64 = 30 * 24 * 60 * 60; -/// The maximum TTL for router records, 30 days -pub const MAX_ROUTER_TTL: u64 = MAX_CHANNEL_TTL; #[derive(Eq, Debug, PartialEq)] pub enum StorageType { INVALID, #[cfg(feature = "bigtable")] BigTable, - #[cfg(feature = "dynamodb")] - DynamoDb, - #[cfg(all(feature = "bigtable", feature = "dynamodb"))] - Dual, - // Postgres, } impl From<&str> for StorageType { @@ -69,10 +52,6 @@ impl From<&str> for StorageType { match name.to_lowercase().as_str() { #[cfg(feature = "bigtable")] "bigtable" => Self::BigTable, - #[cfg(feature = "dual")] - "dual" => Self::Dual, - #[cfg(feature = "dynamodb")] - "dynamodb" => Self::DynamoDb, _ => Self::INVALID, } } @@ -84,12 +63,8 @@ impl StorageType { fn available<'a>() -> Vec<&'a str> { #[allow(unused_mut)] let mut result: Vec<&str> = Vec::new(); - #[cfg(feature = "dynamodb")] - result.push("DynamoDB"); #[cfg(feature = "bigtable")] result.push("Bigtable"); - #[cfg(all(feature = "bigtable", feature = "dynamodb"))] - result.push("Dual"); result } @@ -101,14 +76,7 @@ impl StorageType { info!("No DSN specified, failing over to old default dsn: {default}"); return Self::from(default); } - let dsn = dsn - .clone() - .unwrap_or(std::env::var("AWS_LOCAL_DYNAMODB").unwrap_or_default()); - #[cfg(feature = "dynamodb")] - if dsn.starts_with("http") { - trace!("Found http"); - return Self::DynamoDb; - } + let dsn = dsn.clone().unwrap_or_default(); #[cfg(feature = "bigtable")] if dsn.starts_with("grpc") { trace!("Found grpc"); @@ -122,11 +90,6 @@ impl StorageType { } return Self::BigTable; } - #[cfg(all(feature = "bigtable", feature = "dynamodb"))] - if dsn.to_lowercase() == "dual" { - trace!("Found Dual mode"); - return Self::Dual; - } Self::INVALID } } @@ -139,9 +102,8 @@ pub struct DbSettings { pub dsn: Option, /// A JSON formatted dictionary containing Database settings that /// are specific to the type of Data storage specified in the `dsn` - /// See the respective settings structures for - /// [crate::db::dynamodb::DynamoDbSettings] - /// and [crate::db::bigtable::BigTableDbSettings] + /// See the respective settings structure for + /// [crate::db::bigtable::BigTableDbSettings] pub db_settings: String, } //TODO: add `From for DbSettings`? @@ -170,11 +132,11 @@ pub struct CheckStorageResponse { } /// A user data record. -#[derive(Deserialize, PartialEq, Debug, Clone, Serialize)] +#[derive(Deserialize, PartialEq, Debug, Clone, Serialize, Builder)] +#[builder(default, setter(strip_option))] pub struct User { /// The UAID. This is generally a UUID4. It needs to be globally /// unique. - // DynamoDB #[serde(serialize_with = "uuid_serializer")] pub uaid: Uuid, /// Time in milliseconds that the user last connected at @@ -183,20 +145,12 @@ pub struct User { pub router_type: String, /// Router-specific data pub router_data: Option>, - /// Keyed time in a month the user last connected at with limited - /// key range for indexing - // [ed. --sigh. don't use custom timestamps kids.] - #[serde(skip_serializing_if = "Option::is_none")] - pub last_connect: Option, /// Last node/port the client was or may be connected to #[serde(skip_serializing_if = "Option::is_none")] pub node_id: Option, /// Record version #[serde(skip_serializing_if = "Option::is_none")] pub record_version: Option, - /// LEGACY: Current month table in the database the user is on - #[serde(skip_serializing_if = "Option::is_none")] - pub current_month: Option, /// the timestamp of the last notification sent to the user /// This field is exclusive to the Bigtable data scheme //TODO: rename this to `last_notification_timestamp` @@ -205,6 +159,19 @@ pub struct User { /// UUID4 version number for optimistic locking of updates on Bigtable #[serde(skip_serializing)] pub version: Option, + /// Set of user's channel ids. These are stored in router (user) record's + /// row in Bigtable. They are read along with the rest of the user record + /// so that them, along with every other field in the router record, will + /// automatically have their TTL (cell timestamp) reset during + /// [DbClient::update_user]. + /// + /// This is solely used for the sake of that update thus private. + /// [DbClient::get_channels] is preferred for reading the latest version of + /// the channel ids (partly due to historical purposes but also is a more + /// flexible API that might benefit different, non Bigtable [DbClient] + /// backends that don't necessarily store the channel ids in the router + /// record). + priv_channels: HashSet, } impl Default for User { @@ -216,24 +183,23 @@ impl Default for User { connected_at: ms_since_epoch(), router_type: "webpush".to_string(), router_data: None, - last_connect: Some(generate_last_connect()), node_id: None, record_version: Some(USER_RECORD_VERSION), - current_month: None, current_timestamp: None, version: Some(Uuid::new_v4()), + priv_channels: HashSet::new(), } } } impl User { - #[cfg(feature = "dynamodb")] - pub fn set_last_connect(&mut self) { - self.last_connect = if has_connected_this_month(self) { - None - } else { - Some(generate_last_connect()) - } + /// Return a new [UserBuilder] (generated from [derive_builder::Builder]) + pub fn builder() -> UserBuilder { + UserBuilder::default() + } + + pub fn channel_count(&self) -> usize { + self.priv_channels.len() } } @@ -243,10 +209,8 @@ impl User { #[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)] pub struct NotificationRecord { /// The UserAgent Identifier (UAID) - // DynamoDB #[serde(serialize_with = "uuid_serializer")] uaid: Uuid, - // DynamoDB // Format: // Topic Messages: // {TOPIC_NOTIFICATION_PREFIX}:{channel id}:{topic} @@ -264,8 +228,7 @@ pub struct NotificationRecord { /// Time in seconds from epoch #[serde(skip_serializing_if = "Option::is_none")] timestamp: Option, - /// DynamoDB expiration timestamp per - /// + /// Expiration timestamp expiry: u64, /// TTL value provided by application server for the message #[serde(skip_serializing_if = "Option::is_none")] @@ -379,7 +342,7 @@ impl NotificationRecord { uaid: *uaid, chidmessageid: val.chidmessageid(), timestamp: Some(val.timestamp), - expiry: sec_since_epoch() + min(val.ttl, MAX_EXPIRY), + expiry: sec_since_epoch() + min(val.ttl, MAX_NOTIFICATION_TTL), ttl: Some(val.ttl), data: val.data, headers: val.headers.map(|h| h.into()), @@ -388,3 +351,16 @@ impl NotificationRecord { } } } + +#[cfg(test)] +mod tests { + use super::{User, USER_RECORD_VERSION}; + + #[test] + fn user_defaults() { + let user = User::builder().current_timestamp(22).build().unwrap(); + assert_eq!(user.current_timestamp, Some(22)); + assert_eq!(user.router_type, "webpush".to_owned()); + assert_eq!(user.record_version, Some(USER_RECORD_VERSION)); + } +} diff --git a/autopush-common/src/db/routing.rs b/autopush-common/src/db/routing.rs index 6cbda70a5..7999552aa 100644 --- a/autopush-common/src/db/routing.rs +++ b/autopush-common/src/db/routing.rs @@ -1,15 +1,12 @@ #[derive(Clone, Eq, PartialEq, Debug)] pub(crate) enum StorageType { - DynamoDB, BigTable, None, } impl Default for StorageType { fn default() -> StorageType { - if cfg!(feature = "dynamodb") { - StorageType::DynamoDB - } else if cfg!(feature = "bigtable") { + if cfg!(feature = "bigtable") { StorageType::BigTable } else { StorageType::None @@ -20,7 +17,6 @@ impl Default for StorageType { impl From<&str> for StorageType { fn from(str: &str) -> StorageType { match str.to_lowercase().as_str() { - "dynamodb" => StorageType::DynamoDB, "bigtable" => StorageType::BigTable, _ => { warn!("Using default StorageType for {str}"); diff --git a/autopush-common/src/db/util.rs b/autopush-common/src/db/util.rs deleted file mode 100644 index 05c28e0dc..000000000 --- a/autopush-common/src/db/util.rs +++ /dev/null @@ -1,15 +0,0 @@ -use chrono::Utc; -use rand::{thread_rng, Rng}; - -/// Generate a last_connect -/// -/// This intentionally generates a limited set of keys for each month in a -/// known sequence. For each month, there's 24 hours * 10 random numbers for -/// a total of 240 keys per month depending on when the user migrates forward. -pub fn generate_last_connect() -> u64 { - let today = Utc::now(); - let mut rng = thread_rng(); - let num = rng.gen_range(0..10); - let val = format!("{}{:04}", today.format("%Y%m%H"), num); - val.parse::().unwrap() -} diff --git a/autopush-common/src/errors.rs b/autopush-common/src/errors.rs index 5424fbb60..4402dcd6d 100644 --- a/autopush-common/src/errors.rs +++ b/autopush-common/src/errors.rs @@ -1,6 +1,5 @@ //! Error handling for common autopush functions -use std::any::Any; use std::fmt::{self, Display}; use std::io; use std::num; @@ -90,17 +89,9 @@ impl Serialize for ApcError { #[derive(Error, Debug)] pub enum ApcErrorKind { - #[error(transparent)] - Ws(#[from] tungstenite::Error), #[error(transparent)] Io(#[from] io::Error), #[error(transparent)] - Json(#[from] serde_json::Error), - #[error(transparent)] - Httparse(#[from] httparse::Error), - #[error(transparent)] - MetricError(#[from] cadence::MetricError), - #[error(transparent)] UuidError(#[from] uuid::Error), #[error(transparent)] ParseIntError(#[from] num::ParseIntError), @@ -108,49 +99,19 @@ pub enum ApcErrorKind { ParseUrlError(#[from] url::ParseError), #[error(transparent)] ConfigError(#[from] config::ConfigError), - #[error("Error while validating token")] - TokenHashValidation(#[source] openssl::error::ErrorStack), - #[error("Error while creating secret")] - RegistrationSecretHash(#[source] openssl::error::ErrorStack), - - #[error("thread panicked")] - Thread(Box), - #[error("websocket pong timeout")] - PongTimeout, - #[error("repeat uaid disconnect")] - RepeatUaidDisconnect, - #[error("invalid state transition, from: {0}, to: {1}")] - InvalidStateTransition(String, String), - #[error("invalid json: {0}")] - InvalidClientMessage(String), - #[error("server error fetching messages")] - MessageFetch, - #[error("unable to send to client")] - SendError, - #[error("client sent too many pings")] - ExcessivePing, - #[error("Broadcast Error: {0}")] BroadcastError(String), #[error("Payload Error: {0}")] PayloadError(String), #[error("General Error: {0}")] GeneralError(String), - #[error("Database Error: {0}")] - DatabaseError(String), - - // TODO: option this. - #[error("Rusoto Error: {0}")] - RusotoError(String), } impl ApcErrorKind { /// Get the associated HTTP status code pub fn status(&self) -> StatusCode { match self { - Self::Json(_) | Self::ParseIntError(_) | Self::ParseUrlError(_) | Self::Httparse(_) => { - StatusCode::BAD_REQUEST - } + Self::ParseIntError(_) | Self::ParseUrlError(_) => StatusCode::BAD_REQUEST, _ => StatusCode::INTERNAL_SERVER_ERROR, } } @@ -158,7 +119,6 @@ impl ApcErrorKind { pub fn is_sentry_event(&self) -> bool { match self { // TODO: Add additional messages to ignore here. - Self::PongTimeout | Self::ExcessivePing => false, // Non-actionable Endpoint errors Self::PayloadError(_) => false, _ => true, @@ -167,13 +127,10 @@ impl ApcErrorKind { pub fn metric_label(&self) -> Option<&'static str> { // TODO: add labels for skipped stuff - let label = match self { - Self::PongTimeout => "pong_timeout", - Self::ExcessivePing => "excessive_ping", - Self::PayloadError(_) => "payload", - _ => return None, - }; - Some(label) + match self { + Self::PayloadError(_) => Some("payload"), + _ => None, + } } } @@ -188,14 +145,19 @@ pub trait ReportableError: std::error::Error { } /// Return a `Backtrace` for this Error if one was captured - fn backtrace(&self) -> Option<&Backtrace>; - + fn backtrace(&self) -> Option<&Backtrace> { + None + } /// Whether this error is reported to Sentry - fn is_sentry_event(&self) -> bool; + fn is_sentry_event(&self) -> bool { + true + } /// Errors that don't emit Sentry events (!is_sentry_event()) emit an /// increment metric instead with this label - fn metric_label(&self) -> Option<&'static str>; + fn metric_label(&self) -> Option<&'static str> { + None + } /// Experimental: return tag key value pairs for metrics and Sentry fn tags(&self) -> Vec<(&str, String)> { diff --git a/autopush-common/src/lib.rs b/autopush-common/src/lib.rs index a96df47b5..8656e814c 100644 --- a/autopush-common/src/lib.rs +++ b/autopush-common/src/lib.rs @@ -19,3 +19,32 @@ pub mod test_support; #[macro_use] pub mod util; + +/// Define some global TTLs. +/// +/// [RFC8030 notes](https://datatracker.ietf.org/doc/html/rfc8030#section-5.2) that +/// technically these are u32 values, but we should be kind and use u64. The RFC also +/// does not define a maximum TTL duration. Traditionally, Autopush has capped this +/// to 60 days, partly because we used to require monthly message table rotation. +/// (The TTL was given an extra 30 days grace in order to handle dates near the +/// turn of the month, when we might need to look in two tables for the data.) +/// Since we now have automatically applied garbage collection, we are at a bit of +/// liberty about how long these should be. +/// +/// That gets back to the concept that Push messages are supposed to be "timely". +/// A user may not appreciate that they have an undelivered calendar reminder from +/// 58 days ago, nor should they be interested in a meeting alert that happened last +/// month. When a User Agent (UA) connects, it recieves all pending messages. If +/// a user has not used the User Agent in more than +/// [60 days](https://searchfox.org/mozilla-central/search?q=OFFER_PROFILE_RESET_INTERVAL_MS), +/// the User Agent suggest "refreshing Firefox", which essentially throws away one's +/// current profile. This would include all subscriptions a user may have had. +/// +/// To that end, messages left unread for more than 30 days should be considered +/// "abandoned" and any router info assigned to a User Agent that has not contacted +/// Autopush in 60 days can be discarded. +/// +/// The maximum TTL for notifications, 30 days in seconds +pub const MAX_NOTIFICATION_TTL: u64 = 30 * 24 * 60 * 60; +/// The maximum TTL for router records, 60 days in seconds +pub const MAX_ROUTER_TTL: u64 = 2 * MAX_NOTIFICATION_TTL; diff --git a/autopush-common/src/logging.rs b/autopush-common/src/logging.rs index 32fefe881..52ed08a97 100644 --- a/autopush-common/src/logging.rs +++ b/autopush-common/src/logging.rs @@ -1,6 +1,4 @@ use std::io; -#[cfg(feature = "aws")] -use std::{sync::OnceLock, time::Duration}; use gethostname::gethostname; use slog::{self, Drain}; @@ -8,18 +6,10 @@ use slog_mozlog_json::MozLogJson; use crate::errors::Result; -#[cfg(feature = "aws")] -static EC2_INSTANCE_ID: OnceLock> = OnceLock::new(); - pub fn init_logging(json: bool, name: &str, version: &str) -> Result<()> { let logger = if json { - let hostname = { - #[cfg(feature = "aws")] - let result = ec2_instance_id(); - #[cfg(not(feature = "aws"))] - let result = gethostname().to_string_lossy().to_string(); - result - }; + let hostname = gethostname().to_string_lossy().to_string(); + let drain = MozLogJson::new(io::stdout()) .logger_name(format!("{}-{}", name, version)) .msg_type(format!("{}:log", name)) @@ -62,28 +52,6 @@ pub fn init_test_logging() { slog_stdlog::init().ok(); } -#[cfg(feature = "aws")] -fn ec2_instance_id() -> String { - let ec2_instance_id = EC2_INSTANCE_ID.get_or_init(|| get_ec2_instance_id().ok()); - ec2_instance_id - .clone() - .unwrap_or_else(|| gethostname().to_string_lossy().to_string()) -} -/// Fetch the EC2 instance-id -/// -/// NOTE: This issues a blocking web request -#[cfg(feature = "aws")] -fn get_ec2_instance_id() -> reqwest::Result { - let client = reqwest::blocking::Client::builder() - .timeout(Duration::from_secs(1)) - .build()?; - client - .get("http://169.254.169.254/latest/meta-data/instance-id") - .send()? - .error_for_status()? - .text() -} - /// Return parallelism/number of CPU information to log at startup pub fn parallelism_banner() -> String { format!( diff --git a/autopush-common/src/util/mod.rs b/autopush-common/src/util/mod.rs index 54ab444b3..9daa199a1 100644 --- a/autopush-common/src/util/mod.rs +++ b/autopush-common/src/util/mod.rs @@ -9,7 +9,9 @@ use serde::{Deserialize, Deserializer}; pub mod timing; pub mod user_agent; -pub use self::timing::{ms_since_epoch, sec_since_epoch, us_since_epoch}; +pub use self::timing::{ms_since_epoch, ms_utc_midnight, sec_since_epoch, us_since_epoch}; + +pub const ONE_DAY_IN_SECONDS: u64 = 60 * 60 * 24; pub trait InsertOpt { /// Insert an item only if it exists diff --git a/autopush-common/src/util/timing.rs b/autopush-common/src/util/timing.rs index 7c7a7e472..6c452776a 100644 --- a/autopush-common/src/util/timing.rs +++ b/autopush-common/src/util/timing.rs @@ -10,9 +10,36 @@ pub fn ms_since_epoch() -> u64 { Utc::now().timestamp_millis() as u64 } +/// Return UTC midnight for the current day. +pub fn ms_utc_midnight() -> u64 { + let now = Utc::now(); + Utc.with_ymd_and_hms(now.year(), now.month(), now.day(), 0, 0, 0) + .single() + .expect("Clock error") // Something had to go horribly wrong. Panic is ok. + .timestamp_millis() as u64 +} + /// Get the time since the UNIX epoch in microseconds #[allow(dead_code)] pub fn us_since_epoch() -> u64 { let now = Utc::now(); (now.timestamp() as u64) * 1_000_000 + (now.timestamp_subsec_micros() as u64) } + +/// Display a formatted date-time string from a SystemTime +/// +/// (This is useful in dev/debugging) +#[allow(dead_code)] +pub fn date_string_from_systemtime(ts: std::time::SystemTime) -> String { + let dt: chrono::DateTime = ts.into(); + dt.format("%Y-%m-%d %H:%M:%S.%f").to_string() +} + +/// Display a formatted date-time string from a UTC offset in millis +/// +/// (This is useful in dev/debugging) +#[allow(dead_code)] +pub fn date_string_from_utc_ms(offset: u64) -> String { + let utc = std::time::SystemTime::UNIX_EPOCH + std::time::Duration::from_millis(offset); + date_string_from_systemtime(utc) +} diff --git a/autopush-common/src/util/user_agent.rs b/autopush-common/src/util/user_agent.rs index b69c825f2..3477fc7c4 100644 --- a/autopush-common/src/util/user_agent.rs +++ b/autopush-common/src/util/user_agent.rs @@ -21,6 +21,8 @@ pub struct UserAgentInfo { pub metrics_os: String, pub os_version: String, pub os: String, + // Note, Woothee can determine if a user agent is mobile if the + // ["smartphone", "mobilephone"].contains(category) } impl From<&str> for UserAgentInfo { @@ -55,6 +57,16 @@ impl From<&str> for UserAgentInfo { } } +impl From<&actix_web::HttpRequest> for UserAgentInfo { + fn from(req: &actix_web::HttpRequest) -> UserAgentInfo { + if let Some(header) = req.headers().get(&actix_web::http::header::USER_AGENT) { + Self::from(header.to_str().unwrap_or("UNKNOWN")) + } else { + UserAgentInfo::default() + } + } +} + #[cfg(test)] mod tests { use super::UserAgentInfo; diff --git a/autopush/Cargo.toml b/autopush/Cargo.toml deleted file mode 100644 index c428ce824..000000000 --- a/autopush/Cargo.toml +++ /dev/null @@ -1,60 +0,0 @@ -[package] -name = "autopush" -version.workspace = true -authors.workspace = true -edition.workspace = true - -[[bin]] -name = "autopush_rs" -path = "src/main.rs" - -[dependencies] -base64.workspace = true -cadence.workspace = true -chrono.workspace = true -config.workspace = true -docopt.workspace = true -fernet.workspace = true -hex.workspace = true -httparse.workspace = true -lazy_static.workspace = true -log.workspace = true -mozsvc-common.workspace = true -openssl.workspace = true -rand.workspace = true -regex.workspace = true -sentry.workspace = true -serde.workspace = true -serde_derive.workspace = true -serde_json.workspace = true -slog.workspace = true -slog-async.workspace = true -slog-mozlog-json.workspace = true -slog-scope.workspace = true -slog-stdlog.workspace = true -slog-term.workspace = true -tungstenite.workspace = true -uuid.workspace = true - -autopush_common = { path = "../autopush-common", features = ["dynamodb"] } -bytes = "0.4" # XXX: pin to < 0.5 for hyper 0.12 -crossbeam-channel = "0.5" -env_logger = "0.11" -thiserror = "1.0" -futures = "0.1.29" # XXX: pin to 0.1 until likely hyper 0.13 -futures-backoff = "0.1.0" -futures-locks = "0.5" # pin to 0.5 until futures update -hyper = "^0.12.36" # pin to hyper 0.12 for now: 0.13 has many changes.. -reqwest = "0.9.24" # XXX: pin to < 0.10 until futures 0.3 -rusoto_core = "0.42.0" # 0.46+ requires futures 0.3+ -rusoto_credential = "0.42.0" # 0.46+ requires futures 0.3+ -rusoto_dynamodb = "0.42.0" # XXX: pin to 0.42 until futures 0.3 -serde_dynamodb = "0.4" # 0.7+ requires carg 0.46+ -signal-hook = "0.3" -# state_machine_future = { version = "0.1.6", features = ["debug_code_generation"] } -state_machine_future = "0.2.0" -tokio-core = "0.1" -tokio-io = "0.1.13" -tokio-openssl = "0.3.0" # XXX: pin to < 0.4 until hyper 0.13 -tokio-tungstenite = { version = "0.9.0", default-features = false } # 0.10+ requires tokio 0.3+ -woothee = "0.13" diff --git a/autopush/README.md b/autopush/README.md deleted file mode 100644 index fe8ac27b4..000000000 --- a/autopush/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Autopush Legacy - -This app is the legacy Autopush websocket server. It will be replaced by -[autoconnect](../autoconnect/), which is still in development. - -The function of this app should be maintained until it is fully replaced. diff --git a/autopush/src/client.rs b/autopush/src/client.rs deleted file mode 100644 index ec9d80afd..000000000 --- a/autopush/src/client.rs +++ /dev/null @@ -1,1411 +0,0 @@ -//! Management of connected clients to a WebPush server -#![allow(dead_code)] - -use cadence::{prelude::*, StatsdClient}; -use futures::future::Either; -use futures::sync::mpsc; -use futures::sync::oneshot::Receiver; -use futures::AsyncSink; -use futures::{future, try_ready}; -use futures::{Async, Future, Poll, Sink, Stream}; -use reqwest::r#async::Client as AsyncClient; -use rusoto_dynamodb::UpdateItemOutput; -use state_machine_future::{transition, RentToOwn, StateMachineFuture}; -use std::cell::RefCell; -use std::mem; -use std::rc::Rc; -use std::time::Duration; -use tokio_core::reactor::Timeout; -use uuid::Uuid; - -use crate::db::{CheckStorageResponse, DynamoDbUser, HelloResponse, RegisterResponse}; -use autopush_common::endpoint::make_endpoint; -use autopush_common::errors::{ApcError, ApcErrorKind}; -use autopush_common::notification::Notification; -use autopush_common::util::{ms_since_epoch, sec_since_epoch, user_agent::UserAgentInfo}; - -use crate::megaphone::{Broadcast, BroadcastSubs}; -use crate::server::protocol::{ClientMessage, ServerMessage, ServerNotification}; -use crate::server::Server; -use crate::MyFuture; - -/// Created and handed to the AutopushServer -pub struct RegisteredClient { - /// The User Agent ID (assigned to the remote UserAgent by the server) - pub uaid: Uuid, - /// Internally defined User ID - pub uid: Uuid, - /// The inbound channel for delivery of locally routed Push Notifications - pub tx: mpsc::UnboundedSender, -} - -/// Websocket connector client handler -pub struct Client -where - T: Stream - + Sink - + 'static, -{ - /// The current state machine's state - state_machine: UnAuthClientStateFuture, - /// Handle back to the autoconnect server - srv: Rc, - /// List of interested Broadcast/Megaphone subscriptions for this User Agent - broadcast_subs: Rc>, - /// local webpush router state command request channel. - tx: mpsc::UnboundedSender, -} - -impl Client -where - T: Stream - + Sink - + 'static, -{ - /// Spins up a new client communicating over the websocket `ws` specified. - /// - /// The `ws` specified already has ping/pong parts of the websocket - /// protocol managed elsewhere, and this struct is only expected to deal - /// with webpush-specific messages. - /// - /// The `srv` argument is the server that this client is attached to and - /// the various state behind the server. This provides transitive access to - /// various configuration options of the server as well as the ability to - /// call back into Python. - pub fn new(ws: T, srv: &Rc, mut uarx: Receiver) -> Client { - let srv = srv.clone(); - let timeout = - Timeout::new(srv.app_state.open_handshake_timeout.unwrap(), &srv.handle).unwrap(); - let (tx, rx) = mpsc::unbounded(); - - // Pull out the user-agent, which we should have by now - let uastr = match uarx.poll() { - Ok(Async::Ready(ua)) => ua, - Ok(Async::NotReady) => { - error!("Failed to parse the user-agent"); - String::from("") - } - Err(_) => { - error!("Failed to receive a value"); - String::from("") - } - }; - - let broadcast_subs = Rc::new(RefCell::new(Default::default())); - // Initialize the state machine. (UAs start off as Unauthorized ) - let machine_state = UnAuthClientState::start( - UnAuthClientData { - srv: srv.clone(), - ws, - user_agent: uastr, - broadcast_subs: broadcast_subs.clone(), - }, - timeout, - tx.clone(), - rx, - ); - - Self { - state_machine: machine_state, - srv, - broadcast_subs, - tx, - } - } - - /// Determine the difference between the User Agents list of broadcast IDs and the ones - /// currently known by the server - pub fn broadcast_delta(&mut self) -> Option> { - let mut broadcast_subs = self.broadcast_subs.borrow_mut(); - self.srv.broadcast_delta(&mut broadcast_subs) - } - - /// Terminate all connections before shutting down the server. - pub fn shutdown(&mut self) { - let _result = self.tx.unbounded_send(ServerNotification::Disconnect); - } -} - -impl Future for Client -where - T: Stream - + Sink - + 'static, -{ - type Item = (); - type Error = ApcError; - - fn poll(&mut self) -> Poll<(), ApcError> { - self.state_machine.poll() - } -} - -// Websocket session statistics -#[derive(Clone, Default)] -struct SessionStatistics { - // User data - uaid: String, - uaid_reset: bool, - existing_uaid: bool, - connection_type: String, - - // Usage data - direct_acked: i32, - direct_storage: i32, - stored_retrieved: i32, - stored_acked: i32, - nacks: i32, - unregisters: i32, - registers: i32, -} - -// Represent the state for a valid WebPush client that is authenticated -pub struct WebPushClient { - uaid: Uuid, - uid: Uuid, - rx: mpsc::UnboundedReceiver, - flags: ClientFlags, - message_month: String, - unacked_direct_notifs: Vec, - unacked_stored_notifs: Vec, - // Highest version from stored, retained for use with increment - // when all the unacked storeds are ack'd - unacked_stored_highest: Option, - connected_at: u64, - sent_from_storage: u32, - last_ping: u64, - stats: SessionStatistics, - deferred_user_registration: Option, - ua_info: UserAgentInfo, -} - -impl Default for WebPushClient { - fn default() -> Self { - let (_, rx) = mpsc::unbounded(); - Self { - uaid: Default::default(), - uid: Default::default(), - rx, - flags: Default::default(), - message_month: Default::default(), - unacked_direct_notifs: Default::default(), - unacked_stored_notifs: Default::default(), - unacked_stored_highest: Default::default(), - connected_at: Default::default(), - sent_from_storage: Default::default(), - last_ping: Default::default(), - stats: Default::default(), - deferred_user_registration: Default::default(), - ua_info: Default::default(), - } - } -} - -impl WebPushClient { - fn unacked_messages(&self) -> bool { - !self.unacked_stored_notifs.is_empty() || !self.unacked_direct_notifs.is_empty() - } -} - -#[derive(Default)] -pub struct ClientFlags { - /// Whether check_storage queries for topic (not "timestamped") messages - include_topic: bool, - /// Flags the need to increment the last read for timestamp for timestamped messages - increment_storage: bool, - /// Whether this client needs to check storage for messages - check: bool, - /// Flags the need to drop the user record if the User record_version is less - /// than the USER_RECORD_VERSION constant. The reset is done after all existing - /// message traffic is sent over. - old_record_version: bool, - rotate_message_table: bool, -} - -impl ClientFlags { - fn new() -> Self { - Self { - include_topic: true, - increment_storage: false, - check: false, - old_record_version: false, - rotate_message_table: false, - } - } -} - -pub struct UnAuthClientData { - srv: Rc, - ws: T, - user_agent: String, - broadcast_subs: Rc>, -} - -impl UnAuthClientData -where - T: Stream - + Sink - + 'static, -{ - fn input_with_timeout(&mut self, timeout: &mut Timeout) -> Poll { - let item = match timeout.poll()? { - Async::Ready(_) => { - return Err(ApcErrorKind::GeneralError("Client timed out".into()).into()) - } - Async::NotReady => match self.ws.poll()? { - Async::Ready(None) => { - return Err(ApcErrorKind::GeneralError("Client dropped".into()).into()) - } - Async::Ready(Some(msg)) => Async::Ready(msg), - Async::NotReady => Async::NotReady, - }, - }; - Ok(item) - } -} - -pub struct AuthClientData { - srv: Rc, - ws: T, - webpush: Rc>, - broadcast_subs: Rc>, -} - -impl AuthClientData -where - T: Stream - + Sink - + 'static, -{ - fn input_or_notif(&mut self) -> Poll, ApcError> { - let mut webpush = self.webpush.borrow_mut(); - let item = match webpush.rx.poll() { - Ok(Async::Ready(Some(notif))) => Either::B(notif), - Ok(Async::Ready(None)) => { - return Err(ApcErrorKind::GeneralError("Sending side dropped".into()).into()) - } - Ok(Async::NotReady) => match self.ws.poll()? { - Async::Ready(None) => { - return Err(ApcErrorKind::GeneralError("Client dropped".into()).into()) - } - Async::Ready(Some(msg)) => Either::A(msg), - Async::NotReady => return Ok(Async::NotReady), - }, - Err(_) => return Err(ApcErrorKind::GeneralError("Unexpected error".into()).into()), - }; - Ok(Async::Ready(item)) - } -} - -/* -STATE MACHINE -*/ -#[derive(StateMachineFuture)] -pub enum UnAuthClientState -where - T: Stream - + Sink - + 'static, -{ - #[state_machine_future(start, transitions(AwaitProcessHello))] - AwaitHello { - data: UnAuthClientData, - timeout: Timeout, - tx: mpsc::UnboundedSender, - rx: mpsc::UnboundedReceiver, - }, - - #[state_machine_future(transitions(AwaitRegistryConnect))] - AwaitProcessHello { - response: MyFuture, - data: UnAuthClientData, - desired_broadcasts: Vec, - tx: mpsc::UnboundedSender, - rx: mpsc::UnboundedReceiver, - }, - - #[state_machine_future(transitions(AwaitSessionComplete))] - AwaitRegistryConnect { - response: MyFuture, - srv: Rc, - ws: T, - user_agent: String, - webpush: Rc>, - broadcast_subs: Rc>, - }, - - #[state_machine_future(transitions(AwaitRegistryDisconnect))] - AwaitSessionComplete { - auth_state_machine: AuthClientStateFuture, - srv: Rc, - webpush: Rc>, - }, - - #[state_machine_future(transitions(UnAuthDone))] - AwaitRegistryDisconnect { - response: MyFuture<()>, - srv: Rc, - webpush: Rc>, - error: Option, - }, - - #[state_machine_future(ready)] - UnAuthDone(()), - - #[state_machine_future(error)] - GeneralUnauthClientError(ApcError), -} - -impl PollUnAuthClientState for UnAuthClientState -where - T: Stream - + Sink - + 'static, -{ - fn poll_await_hello<'a>( - hello: &'a mut RentToOwn<'a, AwaitHello>, - ) -> Poll, ApcError> { - trace!("State: AwaitHello"); - let (uaid, desired_broadcasts) = { - let AwaitHello { - ref mut data, - ref mut timeout, - .. - } = **hello; - match try_ready!(data.input_with_timeout(timeout)) { - ClientMessage::Hello { - uaid, - use_webpush: Some(true), - broadcasts, - .. - } => ( - uaid.and_then(|uaid| Uuid::parse_str(uaid.as_str()).ok()), - Broadcast::from_hashmap(broadcasts.unwrap_or_default()), - ), - _ => { - return Err(ApcErrorKind::BroadcastError( - "Invalid message, must be hello".into(), - ) - .into()) - } - } - }; - - let AwaitHello { data, tx, rx, .. } = hello.take(); - let connected_at = ms_since_epoch(); - trace!("❓ AwaitHello UAID: {:?}", uaid); - // Defer registration (don't write the user to the router table yet) - // when no uaid was specified. We'll get back a pending DynamoDbUser - // from the HelloResponse. It'll be potentially written to the db later - // whenever the user first subscribes to a channel_id - // (ClientMessage::Register). - let defer_registration = uaid.is_none(); - let response = Box::new(data.srv.ddb.hello( - connected_at, - uaid.as_ref(), - &data.srv.app_state.router_url, - defer_registration, - )); - transition!(AwaitProcessHello { - response, - data, - desired_broadcasts, - tx, - rx, - }) - } - - fn poll_await_process_hello<'a>( - process_hello: &'a mut RentToOwn<'a, AwaitProcessHello>, - ) -> Poll, ApcError> { - trace!("State: AwaitProcessHello"); - let ( - uaid, - message_month, - check_storage, - old_record_version, - rotate_message_table, - connected_at, - deferred_user_registration, - ) = { - let res = try_ready!(process_hello.response.poll()); - match res { - HelloResponse { - uaid: Some(uaid), - message_month, - check_storage, - old_record_version, - rotate_message_table, - connected_at, - deferred_user_registration, - } => { - trace!("❓ AfterAwaitProcessHello: uaid = {:?}", uaid); - ( - uaid, - message_month, - check_storage, - old_record_version, - rotate_message_table, - connected_at, - deferred_user_registration, - ) - } - HelloResponse { uaid: None, .. } => { - trace!("UAID undefined"); - return Err( - ApcErrorKind::GeneralError("Already connected elsewhere".into()).into(), - ); - } - } - }; - - trace!("❓ post hello: {:?}", &uaid); - - let AwaitProcessHello { - data, - desired_broadcasts, - tx, - rx, - .. - } = process_hello.take(); - let user_is_registered = deferred_user_registration.is_none(); - trace!( - "💬 Taken hello. user_is_registered: {}, {:?}", - user_is_registered, - uaid - ); - data.srv.metrics.incr("ua.command.hello").ok(); - - let UnAuthClientData { - srv, - ws, - user_agent, - broadcast_subs, - } = data; - - // Setup the objects and such needed for a WebPushClient - let mut flags = ClientFlags::new(); - flags.check = check_storage; - flags.old_record_version = old_record_version; - flags.rotate_message_table = rotate_message_table; - let (initialized_subs, broadcasts) = srv.broadcast_init(&desired_broadcasts); - broadcast_subs.replace(initialized_subs); - let uid = Uuid::new_v4(); - let webpush = Rc::new(RefCell::new(WebPushClient { - uaid, - uid, - flags, - rx, - message_month, - connected_at, - stats: SessionStatistics { - uaid: uaid.as_simple().to_string(), - uaid_reset: old_record_version, - existing_uaid: check_storage, - connection_type: String::from("webpush"), - ..Default::default() - }, - deferred_user_registration, - ua_info: UserAgentInfo::from(user_agent.as_ref()), - ..Default::default() - })); - - let response = Box::new( - srv.clients - .connect(RegisteredClient { uaid, uid, tx }) - .and_then(move |_| { - // generate the response message back to the client. - Ok(ServerMessage::Hello { - uaid: uaid.as_simple().to_string(), - status: 200, - use_webpush: Some(true), - broadcasts, - }) - }), - ); - - transition!(AwaitRegistryConnect { - response, - srv, - ws, - user_agent, - webpush, - broadcast_subs, - }) - } - - fn poll_await_registry_connect<'a>( - registry_connect: &'a mut RentToOwn<'a, AwaitRegistryConnect>, - ) -> Poll, ApcError> { - trace!("State: AwaitRegistryConnect"); - let hello_response = try_ready!(registry_connect.response.poll()); - - let AwaitRegistryConnect { - srv, - ws, - webpush, - broadcast_subs, - .. - } = registry_connect.take(); - - let auth_state_machine = AuthClientState::start( - vec![hello_response], - AuthClientData { - srv: srv.clone(), - ws, - webpush: webpush.clone(), - broadcast_subs, - }, - ); - - transition!(AwaitSessionComplete { - auth_state_machine, - srv, - webpush, - }) - } - - fn poll_await_session_complete<'a>( - session_complete: &'a mut RentToOwn<'a, AwaitSessionComplete>, - ) -> Poll { - trace!("State: AwaitSessionComplete"); - let error = { - match session_complete.auth_state_machine.poll() { - Ok(Async::NotReady) => return Ok(Async::NotReady), - Ok(Async::Ready(_)) => None, - - Err(e) => match e.kind { - ApcErrorKind::Ws(_) - | ApcErrorKind::Io(_) - | ApcErrorKind::PongTimeout - | ApcErrorKind::RepeatUaidDisconnect - | ApcErrorKind::ExcessivePing - | ApcErrorKind::InvalidStateTransition(_, _) - | ApcErrorKind::InvalidClientMessage(_) - | ApcErrorKind::SendError => None, - _ => Some(e), - }, - } - }; - - let AwaitSessionComplete { srv, webpush, .. } = session_complete.take(); - - let response = srv - .clients - .disconnect(&webpush.borrow().uaid, &webpush.borrow().uid); - - transition!(AwaitRegistryDisconnect { - response, - srv, - webpush, - error, - }) - } - - fn poll_await_registry_disconnect<'a>( - registry_disconnect: &'a mut RentToOwn<'a, AwaitRegistryDisconnect>, - ) -> Poll { - trace!("State: AwaitRegistryDisconnect"); - try_ready!(registry_disconnect.response.poll()); - - let AwaitRegistryDisconnect { - srv, - webpush, - error, - .. - } = registry_disconnect.take(); - - let mut webpush = webpush.borrow_mut(); - // If there's any notifications in the queue, move them to our unacked direct notifs - webpush.rx.close(); - while let Ok(Async::Ready(Some(msg))) = webpush.rx.poll() { - match msg { - ServerNotification::CheckStorage | ServerNotification::Disconnect => continue, - ServerNotification::Notification(notif) => { - webpush.unacked_direct_notifs.push(notif) - } - } - } - let now = ms_since_epoch(); - let elapsed = (now - webpush.connected_at) / 1_000; - let ua_info = webpush.ua_info.clone(); - // dogstatsd doesn't support timers: use histogram instead - srv.metrics - .time_with_tags("ua.connection.lifespan", elapsed) - .with_tag("ua_os_family", &ua_info.metrics_os) - .with_tag("ua_browser_family", &ua_info.metrics_browser) - .send(); - - // Log out the sentry message if applicable and convert to error msg - let error = if let Some(ref err) = error { - let ua_info = ua_info.clone(); - let mut event = sentry::event_from_error(err); - event.exception.last_mut().unwrap().stacktrace = - sentry::integrations::backtrace::backtrace_to_stacktrace(&err.backtrace); - - event.user = Some(sentry::User { - id: Some(webpush.uaid.as_simple().to_string()), - ..Default::default() - }); - event - .tags - .insert("ua_name".to_string(), ua_info.browser_name); - event - .tags - .insert("ua_os_family".to_string(), ua_info.metrics_os); - event - .tags - .insert("ua_os_ver".to_string(), ua_info.os_version); - event - .tags - .insert("ua_browser_family".to_string(), ua_info.metrics_browser); - event - .tags - .insert("ua_browser_ver".to_string(), ua_info.browser_version); - sentry::capture_event(event); - err.to_string() - } else { - "".to_string() - }; - // If there's direct unack'd messages, they need to be saved out without blocking - // here - let mut stats = webpush.stats.clone(); - let unacked_direct_notifs = webpush.unacked_direct_notifs.len(); - if unacked_direct_notifs > 0 { - debug!("Writing direct notifications to storage"); - stats.direct_storage += unacked_direct_notifs as i32; - let mut notifs = mem::take(&mut webpush.unacked_direct_notifs); - // Ensure we don't store these as legacy by setting a 0 as the sortkey_timestamp - // That will ensure the Python side doesn't mark it as legacy during conversion and - // still get the correct default us_time when saving. - for notif in &mut notifs { - notif.sortkey_timestamp = Some(0); - } - save_and_notify_undelivered_messages(&webpush, srv, notifs); - } - - // Log out the final stats message - info!("Session"; - "uaid_hash" => &stats.uaid, - "uaid_reset" => stats.uaid_reset, - "existing_uaid" => stats.existing_uaid, - "connection_type" => &stats.connection_type, - "ua_name" => ua_info.browser_name, - "ua_os_family" => ua_info.metrics_os, - "ua_os_ver" => ua_info.os_version, - "ua_browser_family" => ua_info.metrics_browser, - "ua_browser_ver" => ua_info.browser_version, - "ua_category" => ua_info.category, - "connection_time" => elapsed, - "direct_acked" => stats.direct_acked, - "direct_storage" => stats.direct_storage, - "stored_retrieved" => stats.stored_retrieved, - "stored_acked" => stats.stored_acked, - "nacks" => stats.nacks, - "registers" => stats.registers, - "unregisters" => stats.unregisters, - "disconnect_reason" => error, - ); - transition!(UnAuthDone(())) - } -} - -fn save_and_notify_undelivered_messages( - webpush: &WebPushClient, - srv: Rc, - notifs: Vec, -) { - let srv2 = srv.clone(); - let uaid = webpush.uaid; - let connected_at = webpush.connected_at; - srv.handle.spawn( - srv.ddb - .store_messages(&webpush.uaid, &webpush.message_month, notifs) - .and_then(move |_| { - debug!("Finished saving unacked direct notifications, checking for reconnect"); - srv2.ddb.get_user(&uaid) - }) - .and_then(move |user| { - let user = match user.ok_or_else(|| { - ApcErrorKind::DatabaseError("No user record found".into()).into() - }) { - Ok(user) => user, - Err(e) => return future::err(e), - }; - - // Return an err to stop processing if the user hasn't reconnected yet, otherwise - // attempt to construct a client to make the request - if user.connected_at == connected_at { - future::err(ApcErrorKind::GeneralError("No notify needed".into()).into()) - } else if let Some(node_id) = user.node_id { - let result = AsyncClient::builder() - .timeout(Duration::from_secs(1)) - .build(); - if let Ok(client) = result { - future::ok((client, user.uaid, node_id)) - } else { - future::err( - ApcErrorKind::GeneralError("Unable to build http client".into()).into(), - ) - } - } else { - future::err( - ApcErrorKind::GeneralError("No new node_id, notify not needed".into()) - .into(), - ) - } - }) - .and_then(|(client, uaid, node_id)| { - // Send the notify to the user - let notify_url = format!("{}/notif/{}", node_id, uaid.as_simple()); - client - .put(¬ify_url) - .send() - .map_err(|_| ApcErrorKind::GeneralError("Failed to send".into()).into()) - }) - .then(|_| { - debug!("Finished cleanup"); - Ok(()) - }), - ); -} - -/* -STATE MACHINE: Main engine -*/ -#[derive(StateMachineFuture)] -pub enum AuthClientState -where - T: Stream - + Sink - + 'static, -{ - /// Send one or more locally routed notification messages - #[state_machine_future(start, transitions(AwaitSend, DetermineAck))] - Send { - smessages: Vec, - data: AuthClientData, - }, - - #[state_machine_future(transitions(DetermineAck, Send, AwaitDropUser))] - AwaitSend { - smessages: Vec, - data: AuthClientData, - }, - - #[state_machine_future(transitions( - IncrementStorage, - CheckStorage, - AwaitDropUser, - AwaitMigrateUser, - AwaitInput - ))] - DetermineAck { data: AuthClientData }, - - #[state_machine_future(transitions( - DetermineAck, - Send, - AwaitInput, - AwaitRegister, - AwaitUnregister, - AwaitDelete - ))] - AwaitInput { data: AuthClientData }, - - #[state_machine_future(transitions(AwaitIncrementStorage))] - IncrementStorage { data: AuthClientData }, - - #[state_machine_future(transitions(DetermineAck))] - AwaitIncrementStorage { - response: MyFuture, - data: AuthClientData, - }, - - #[state_machine_future(transitions(AwaitCheckStorage))] - CheckStorage { data: AuthClientData }, - - #[state_machine_future(transitions(Send, DetermineAck))] - AwaitCheckStorage { - response: MyFuture, - data: AuthClientData, - }, - - #[state_machine_future(transitions(DetermineAck))] - AwaitMigrateUser { - response: MyFuture<()>, - data: AuthClientData, - }, - - #[state_machine_future(transitions(AuthDone))] - AwaitDropUser { - response: MyFuture<()>, - data: AuthClientData, - }, - - #[state_machine_future(transitions(Send))] - AwaitRegister { - channel_id: Uuid, - response: MyFuture, - data: AuthClientData, - }, - - #[state_machine_future(transitions(Send))] - AwaitUnregister { - channel_id: Uuid, - code: u32, - response: MyFuture, - data: AuthClientData, - }, - - #[state_machine_future(transitions(DetermineAck))] - AwaitDelete { - response: MyFuture<()>, - data: AuthClientData, - }, - - #[state_machine_future(ready)] - AuthDone(()), - - #[state_machine_future(error)] - GeneralAuthClientStateError(ApcError), -} - -impl PollAuthClientState for AuthClientState -where - T: Stream - + Sink - + 'static, -{ - fn poll_send<'a>(send: &'a mut RentToOwn<'a, Send>) -> Poll, ApcError> { - trace!("State: Send"); - let sent = { - let Send { - ref mut smessages, - ref mut data, - .. - } = **send; - if !smessages.is_empty() { - trace!("🚟 Sending {} msgs: {:#?}", smessages.len(), smessages); - let item = smessages.remove(0); - let ret = data - .ws - .start_send(item) - .map_err(|_e| ApcErrorKind::SendError)?; - match ret { - AsyncSink::Ready => true, - AsyncSink::NotReady(returned) => { - smessages.insert(0, returned); - return Ok(Async::NotReady); - } - } - } else { - false - } - }; - - let Send { smessages, data } = send.take(); - if sent { - transition!(AwaitSend { smessages, data }); - } - transition!(DetermineAck { data }) - } - - fn poll_await_send<'a>( - await_send: &'a mut RentToOwn<'a, AwaitSend>, - ) -> Poll, ApcError> { - trace!("State: AwaitSend"); - try_ready!(await_send.data.ws.poll_complete()); - - let AwaitSend { smessages, data } = await_send.take(); - let webpush_rc = data.webpush.clone(); - let webpush = webpush_rc.borrow(); - if webpush.sent_from_storage > data.srv.app_state.msg_limit { - // Exceeded the max limit of stored messages: drop the user to trigger a - // re-register - debug!("Dropping user: exceeded msg_limit"); - let response = Box::new(data.srv.ddb.drop_uaid(&webpush.uaid)); - transition!(AwaitDropUser { response, data }); - } else if !smessages.is_empty() { - transition!(Send { smessages, data }); - } - transition!(DetermineAck { data }) - } - - fn poll_determine_ack<'a>( - detack: &'a mut RentToOwn<'a, DetermineAck>, - ) -> Poll, ApcError> { - let DetermineAck { data } = detack.take(); - let webpush_rc = data.webpush.clone(); - let webpush = webpush_rc.borrow(); - let all_acked = !webpush.unacked_messages(); - if all_acked && webpush.flags.check && webpush.flags.increment_storage { - transition!(IncrementStorage { data }); - } else if all_acked && webpush.flags.check { - transition!(CheckStorage { data }); - } else if all_acked && webpush.flags.rotate_message_table { - debug!("Triggering migration"); - data.srv.metrics.incr("ua.rotate_message_table").ok(); - let response = Box::new( - data.srv - .ddb - .migrate_user(&webpush.uaid, &webpush.message_month), - ); - transition!(AwaitMigrateUser { response, data }); - } else if all_acked && webpush.flags.old_record_version { - debug!("Dropping user: flagged old_record_version"); - let response = Box::new(data.srv.ddb.drop_uaid(&webpush.uaid)); - transition!(AwaitDropUser { response, data }); - } - transition!(AwaitInput { data }) - } - - fn poll_await_input<'a>( - r#await: &'a mut RentToOwn<'a, AwaitInput>, - ) -> Poll, ApcError> { - trace!("State: AwaitInput"); - // The following is a blocking call. No action is taken until we either get a - // websocket data packet or there's an incoming notification. - let input = try_ready!(r#await.data.input_or_notif()); - let AwaitInput { data } = r#await.take(); - let webpush_rc = data.webpush.clone(); - let mut webpush = webpush_rc.borrow_mut(); - match input { - Either::A(ClientMessage::Hello { .. }) => Err(ApcErrorKind::InvalidStateTransition( - "AwaitInput".to_string(), - "Hello".to_string(), - ) - .into()), - Either::A(ClientMessage::BroadcastSubscribe { broadcasts }) => { - let broadcast_delta = { - let mut broadcast_subs = data.broadcast_subs.borrow_mut(); - data.srv.process_broadcasts( - &mut broadcast_subs, - &Broadcast::from_hashmap(broadcasts), - ) - }; - - if let Some(response) = broadcast_delta { - transition!(Send { - smessages: vec![ServerMessage::Broadcast { - broadcasts: response, - }], - data, - }); - } else { - transition!(AwaitInput { data }); - } - } - Either::A(ClientMessage::Register { - channel_id: channel_id_str, - key, - }) => { - debug!("Got a register command"; - "uaid" => &webpush.uaid.to_string(), - "channel_id" => &channel_id_str, - ); - let channel_id = Uuid::parse_str(&channel_id_str).map_err(|_e| { - ApcErrorKind::InvalidClientMessage(format!( - "Invalid channelID: {channel_id_str}" - )) - })?; - if channel_id.as_hyphenated().to_string() != channel_id_str { - return Err(ApcErrorKind::InvalidClientMessage(format!( - "Invalid UUID format, not lower-case/dashed: {channel_id}", - )) - .into()); - } - - let uaid = webpush.uaid; - let message_month = webpush.message_month.clone(); - let srv = &data.srv; - let fut = match make_endpoint( - &uaid, - &channel_id, - key.as_deref(), - &srv.app_state.endpoint_url, - &srv.app_state.fernet, - ) { - Ok(endpoint) => srv.ddb.register_channel( - &uaid, - &channel_id, - &message_month, - &endpoint, - webpush.deferred_user_registration.as_ref(), - ), - Err(e) => { - error!("make_endpoint: {:?}", e); - Box::new(future::ok(RegisterResponse::Error { - error_msg: "Failed to generate endpoint".to_string(), - status: 400, - })) - } - }; - transition!(AwaitRegister { - channel_id, - response: fut, - data, - }); - } - Either::A(ClientMessage::Unregister { channel_id, code }) => { - debug!("Got a unregister command"); - // XXX: unregister should check the format of channel_id like - // register does - let uaid = webpush.uaid; - let message_month = webpush.message_month.clone(); - let response = Box::new(data.srv.ddb.unregister_channel( - &uaid, - &channel_id, - &message_month, - )); - transition!(AwaitUnregister { - channel_id, - code: code.unwrap_or(200), - response, - data, - }); - } - Either::A(ClientMessage::Nack { code, .. }) => { - // only metric codes expected from the client (or 0) - let mcode = code - .and_then(|code| { - if (301..=303).contains(&code) { - Some(code) - } else { - None - } - }) - .unwrap_or(0); - data.srv - .metrics - .incr_with_tags("ua.command.nack") - .with_tag("code", &mcode.to_string()) - .send(); - webpush.stats.nacks += 1; - transition!(AwaitInput { data }); - } - Either::A(ClientMessage::Ack { updates }) => { - data.srv.metrics.incr("ua.command.ack").ok(); - let mut fut: Option> = None; - for notif in &updates { - if let Some(pos) = webpush.unacked_direct_notifs.iter().position(|v| { - v.channel_id == notif.channel_id && v.version == notif.version - }) { - webpush.stats.direct_acked += 1; - webpush.unacked_direct_notifs.remove(pos); - continue; - }; - if let Some(pos) = webpush.unacked_stored_notifs.iter().position(|v| { - v.channel_id == notif.channel_id && v.version == notif.version - }) { - webpush.stats.stored_acked += 1; - let message_month = webpush.message_month.clone(); - let n = webpush.unacked_stored_notifs.remove(pos); - // Topic/legacy messages have no sortkey_timestamp - if n.sortkey_timestamp.is_none() { - fut = if let Some(call) = fut { - let my_fut = - data.srv - .ddb - .delete_message(&message_month, &webpush.uaid, &n); - Some(Box::new(call.and_then(move |_| my_fut))) - } else { - Some(Box::new(data.srv.ddb.delete_message( - &message_month, - &webpush.uaid, - &n, - ))) - } - } - continue; - }; - } - if let Some(my_fut) = fut { - transition!(AwaitDelete { - response: my_fut, - data, - }); - } else { - transition!(DetermineAck { data }); - } - } - Either::A(ClientMessage::Ping) => { - // Clients shouldn't ping > than once per minute or we - // disconnect them - if sec_since_epoch() - webpush.last_ping >= 45 { - trace!("🏓 Got a ping, sending pong"); - webpush.last_ping = sec_since_epoch(); - transition!(Send { - smessages: vec![ServerMessage::Ping], - data, - }) - } else { - trace!("🏓 Got a ping too quickly, disconnecting"); - Err(ApcErrorKind::ExcessivePing.into()) - } - } - Either::B(ServerNotification::Notification(notif)) => { - if notif.ttl != 0 { - webpush.unacked_direct_notifs.push(notif.clone()); - } - debug!("Got a notification to send, sending!"); - emit_metrics_for_send(&data.srv.metrics, ¬if, "Direct", &webpush.ua_info); - transition!(Send { - smessages: vec![ServerMessage::Notification(notif)], - data, - }); - } - Either::B(ServerNotification::CheckStorage) => { - webpush.flags.include_topic = true; - webpush.flags.check = true; - transition!(DetermineAck { data }); - } - Either::B(ServerNotification::Disconnect) => { - debug!("Got told to disconnect, connecting client has our uaid"); - Err(ApcErrorKind::RepeatUaidDisconnect.into()) - } - } - } - - fn poll_increment_storage<'a>( - increment_storage: &'a mut RentToOwn<'a, IncrementStorage>, - ) -> Poll, ApcError> { - trace!("State: IncrementStorage"); - let webpush_rc = increment_storage.data.webpush.clone(); - let webpush = webpush_rc.borrow(); - let timestamp = webpush - .unacked_stored_highest - .ok_or_else(|| ApcErrorKind::GeneralError("unacked_stored_highest unset".into()))? - .to_string(); - let response = Box::new(increment_storage.data.srv.ddb.increment_storage( - &webpush.message_month, - &webpush.uaid, - ×tamp, - )); - transition!(AwaitIncrementStorage { - response, - data: increment_storage.take().data, - }) - } - - fn poll_await_increment_storage<'a>( - await_increment_storage: &'a mut RentToOwn<'a, AwaitIncrementStorage>, - ) -> Poll, ApcError> { - trace!("State: AwaitIncrementStorage"); - try_ready!(await_increment_storage.response.poll()); - let AwaitIncrementStorage { data, .. } = await_increment_storage.take(); - let webpush = data.webpush.clone(); - webpush.borrow_mut().flags.increment_storage = false; - transition!(DetermineAck { data }) - } - - fn poll_check_storage<'a>( - check_storage: &'a mut RentToOwn<'a, CheckStorage>, - ) -> Poll, ApcError> { - trace!("State: CheckStorage"); - let CheckStorage { data } = check_storage.take(); - let response = Box::new({ - let webpush = data.webpush.borrow(); - data.srv.ddb.check_storage( - &webpush.message_month.clone(), - &webpush.uaid, - webpush.flags.include_topic, - webpush.unacked_stored_highest, - ) - }); - transition!(AwaitCheckStorage { response, data }) - } - - fn poll_await_check_storage<'a>( - await_check_storage: &'a mut RentToOwn<'a, AwaitCheckStorage>, - ) -> Poll, ApcError> { - trace!("State: AwaitCheckStorage"); - let CheckStorageResponse { - include_topic, - mut messages, - timestamp, - } = try_ready!(await_check_storage.response.poll()); - debug!("Got checkstorage response"); - - let AwaitCheckStorage { data, .. } = await_check_storage.take(); - let webpush_rc = data.webpush.clone(); - let mut webpush = webpush_rc.borrow_mut(); - webpush.flags.include_topic = include_topic; - debug!("Setting unacked stored highest to {:?}", timestamp); - webpush.unacked_stored_highest = timestamp; - if messages.is_empty() { - webpush.flags.check = false; - webpush.sent_from_storage = 0; - transition!(DetermineAck { data }); - } - - // Filter out TTL expired messages - let now = sec_since_epoch(); - let srv = data.srv.clone(); - messages.retain(|n| { - if !n.expired(now) { - return true; - } - if n.sortkey_timestamp.is_none() { - srv.handle.spawn( - srv.ddb - .delete_message(&webpush.message_month, &webpush.uaid, n) - .then(|_| { - debug!("Deleting expired message without sortkey_timestamp"); - Ok(()) - }), - ); - } - false - }); - webpush.flags.increment_storage = !include_topic && timestamp.is_some(); - // If there's still messages send them out - if !messages.is_empty() { - webpush - .unacked_stored_notifs - .extend(messages.iter().cloned()); - let smessages: Vec<_> = messages - .into_iter() - .inspect(|msg| { - emit_metrics_for_send(&data.srv.metrics, msg, "Stored", &webpush.ua_info) - }) - .map(ServerMessage::Notification) - .collect(); - webpush.sent_from_storage += smessages.len() as u32; - transition!(Send { smessages, data }) - } else { - // No messages remaining - transition!(DetermineAck { data }) - } - } - - fn poll_await_migrate_user<'a>( - await_migrate_user: &'a mut RentToOwn<'a, AwaitMigrateUser>, - ) -> Poll, ApcError> { - trace!("State: AwaitMigrateUser"); - try_ready!(await_migrate_user.response.poll()); - let AwaitMigrateUser { data, .. } = await_migrate_user.take(); - { - let mut webpush = data.webpush.borrow_mut(); - webpush.message_month = data.srv.ddb.current_message_month.clone(); - webpush.flags.rotate_message_table = false; - } - transition!(DetermineAck { data }) - } - - fn poll_await_drop_user<'a>( - await_drop_user: &'a mut RentToOwn<'a, AwaitDropUser>, - ) -> Poll { - trace!("State: AwaitDropUser"); - try_ready!(await_drop_user.response.poll()); - transition!(AuthDone(())) - } - - fn poll_await_register<'a>( - await_register: &'a mut RentToOwn<'a, AwaitRegister>, - ) -> Poll, ApcError> { - trace!("State: AwaitRegister"); - let msg = match try_ready!(await_register.response.poll()) { - RegisterResponse::Success { endpoint } => { - let mut webpush = await_register.data.webpush.borrow_mut(); - await_register - .data - .srv - .metrics - .incr("ua.command.register") - .ok(); - webpush.stats.registers += 1; - ServerMessage::Register { - channel_id: await_register.channel_id, - status: 200, - push_endpoint: endpoint, - } - } - RegisterResponse::Error { error_msg, status } => { - debug!("Got unregister fail, error: {}", error_msg); - ServerMessage::Register { - channel_id: await_register.channel_id, - status, - push_endpoint: "".into(), - } - } - }; - - let data = await_register.take().data; - { - let mut webpush = data.webpush.borrow_mut(); - // If we completed a deferred user registration during a channel - // subscription (Client::Register), we're now all done with it - webpush.deferred_user_registration = None; - } - - transition!(Send { - smessages: vec![msg], - data, - }) - } - - fn poll_await_unregister<'a>( - await_unregister: &'a mut RentToOwn<'a, AwaitUnregister>, - ) -> Poll, ApcError> { - trace!("State: AwaitUnRegister"); - let msg = if try_ready!(await_unregister.response.poll()) { - debug!("Got the unregister response"); - let mut webpush = await_unregister.data.webpush.borrow_mut(); - webpush.stats.unregisters += 1; - ServerMessage::Unregister { - channel_id: await_unregister.channel_id, - status: 200, - } - } else { - debug!("Got unregister fail"); - ServerMessage::Unregister { - channel_id: await_unregister.channel_id, - status: 500, - } - }; - - let AwaitUnregister { code, data, .. } = await_unregister.take(); - data.srv - .metrics - .incr_with_tags("ua.command.unregister") - .with_tag("code", &code.to_string()) - .send(); - transition!(Send { - smessages: vec![msg], - data, - }) - } - - fn poll_await_delete<'a>( - await_delete: &'a mut RentToOwn<'a, AwaitDelete>, - ) -> Poll, ApcError> { - trace!("State: AwaitDelete"); - try_ready!(await_delete.response.poll()); - transition!(DetermineAck { - data: await_delete.take().data, - }) - } -} - -fn emit_metrics_for_send( - metrics: &StatsdClient, - notif: &Notification, - source: &'static str, - user_agent_info: &UserAgentInfo, -) { - metrics - .incr_with_tags("ua.notification.sent") - .with_tag("source", source) - .with_tag("topic", ¬if.topic.is_some().to_string()) - .with_tag("os", &user_agent_info.metrics_os) - // TODO: include `internal` if meta is set - .send(); - metrics - .count_with_tags( - "ua.message_data", - notif.data.as_ref().map_or(0, |data| data.len() as i64), - ) - .with_tag("source", source) - .with_tag("os", &user_agent_info.metrics_os) - .send(); -} diff --git a/autopush/src/db/commands.rs b/autopush/src/db/commands.rs deleted file mode 100644 index 2305ac88f..000000000 --- a/autopush/src/db/commands.rs +++ /dev/null @@ -1,527 +0,0 @@ -use std::collections::HashSet; -use std::fmt::{Debug, Display}; -use std::result::Result as StdResult; -use std::sync::Arc; -use uuid::Uuid; - -use cadence::{CountedExt, StatsdClient}; -use chrono::Utc; -use futures::{future, Future}; -use futures_backoff::retry_if; -use rusoto_core::RusotoError; -use rusoto_dynamodb::{ - AttributeValue, BatchWriteItemError, DeleteItemError, DeleteItemInput, DeleteItemOutput, - DynamoDb, DynamoDbClient, GetItemError, GetItemInput, GetItemOutput, ListTablesInput, - ListTablesOutput, PutItemError, PutItemInput, PutItemOutput, QueryError, QueryInput, - UpdateItemError, UpdateItemInput, UpdateItemOutput, -}; - -use autopush_common::errors::{ApcError, ApcErrorKind, Result}; -use autopush_common::notification::Notification; -use autopush_common::util::timing::sec_since_epoch; - -use super::models::{DynamoDbNotification, DynamoDbUser}; -use super::util::generate_last_connect; -use super::{HelloResponse, MAX_EXPIRY, USER_RECORD_VERSION}; -use crate::MyFuture; - -macro_rules! retryable_error { - ($name:ident, $type:ty, $property:ident) => { - pub fn $name(err: &RusotoError<$type>) -> bool { - match err { - RusotoError::Service($property::InternalServerError(_)) - | RusotoError::Service($property::ProvisionedThroughputExceeded(_)) => true, - _ => false, - } - } - }; -} - -retryable_error!( - retryable_batchwriteitem_error, - BatchWriteItemError, - BatchWriteItemError -); -retryable_error!(retryable_query_error, QueryError, QueryError); -retryable_error!(retryable_delete_error, DeleteItemError, DeleteItemError); -retryable_error!(retryable_getitem_error, GetItemError, GetItemError); -retryable_error!(retryable_putitem_error, PutItemError, PutItemError); -retryable_error!(retryable_updateitem_error, UpdateItemError, UpdateItemError); - -#[derive(Default)] -pub struct FetchMessageResponse { - pub timestamp: Option, - pub messages: Vec, -} - -/// Indicate whether this last_connect falls in the current month -fn has_connected_this_month(user: &DynamoDbUser) -> bool { - user.last_connect.map_or(false, |v| { - let pat = Utc::now().format("%Y%m").to_string(); - v.to_string().starts_with(&pat) - }) -} - -/// A blocking list_tables call only called during initialization -/// (prior to an any active tokio executor) -pub fn list_tables_sync( - ddb: &DynamoDbClient, - start_key: Option, -) -> Result { - let input = ListTablesInput { - exclusive_start_table_name: start_key, - limit: Some(100), - }; - ddb.list_tables(input) - .sync() - .map_err(|_| ApcErrorKind::DatabaseError("Unable to list tables".into()).into()) -} - -/// Pull all pending messages for the user from storage -pub fn fetch_topic_messages( - ddb: DynamoDbClient, - metrics: Arc, - table_name: &str, - uaid: &Uuid, - limit: u32, -) -> impl Future { - let attr_values = hashmap! { - ":uaid".to_string() => val!(S => uaid.as_simple().to_string()), - ":cmi".to_string() => val!(S => "02"), - }; - let input = QueryInput { - key_condition_expression: Some("uaid = :uaid AND chidmessageid < :cmi".to_string()), - expression_attribute_values: Some(attr_values), - table_name: table_name.to_string(), - consistent_read: Some(true), - limit: Some(limit as i64), - ..Default::default() - }; - - retry_if(move || ddb.query(input.clone()), retryable_query_error) - .map_err(|_| ApcErrorKind::MessageFetch.into()) - .and_then(move |output| { - let mut notifs: Vec = - output.items.map_or_else(Vec::new, |items| { - debug!("Got response of: {:?}", items); - items - .into_iter() - .inspect(|i| debug!("Item: {:?}", i)) - .filter_map(|item| { - let item2 = item.clone(); - ok_or_inspect(serde_dynamodb::from_hashmap(item), |e| { - conversion_err(&metrics, e, item2, "serde_dynamodb_from_hashmap") - }) - }) - .collect() - }); - if notifs.is_empty() { - return Ok(Default::default()); - } - - // Load the current_timestamp from the subscription registry entry which is - // the first DynamoDbNotification and remove it from the vec. - let timestamp = notifs.remove(0).current_timestamp; - // Convert any remaining DynamoDbNotifications to Notification's - let messages = notifs - .into_iter() - .filter_map(|ddb_notif| { - let ddb_notif2 = ddb_notif.clone(); - ok_or_inspect(ddb_notif.into_notif(), |e| { - conversion_err(&metrics, e, ddb_notif2, "into_notif") - }) - }) - .collect(); - Ok(FetchMessageResponse { - timestamp, - messages, - }) - }) -} - -/// Pull messages older than a given timestamp for a given user. -/// This also returns the latest message timestamp. -pub fn fetch_timestamp_messages( - ddb: DynamoDbClient, - metrics: Arc, - table_name: &str, - uaid: &Uuid, - timestamp: Option, - limit: usize, -) -> impl Future { - let range_key = if let Some(ts) = timestamp { - format!("02:{ts}:z") - } else { - "01;".to_string() - }; - let attr_values = hashmap! { - ":uaid".to_string() => val!(S => uaid.as_simple().to_string()), - ":cmi".to_string() => val!(S => range_key), - }; - let input = QueryInput { - key_condition_expression: Some("uaid = :uaid AND chidmessageid > :cmi".to_string()), - expression_attribute_values: Some(attr_values), - table_name: table_name.to_string(), - consistent_read: Some(true), - limit: Some(limit as i64), - ..Default::default() - }; - - retry_if(move || ddb.query(input.clone()), retryable_query_error) - .map_err(|_| ApcErrorKind::MessageFetch.into()) - .and_then(move |output| { - let messages = output.items.map_or_else(Vec::new, |items| { - debug!("Got response of: {:?}", items); - items - .into_iter() - .filter_map(|item| { - let item2 = item.clone(); - ok_or_inspect(serde_dynamodb::from_hashmap(item), |e| { - conversion_err(&metrics, e, item2, "serde_dynamodb_from_hashmap") - }) - }) - .filter_map(|ddb_notif: DynamoDbNotification| { - let ddb_notif2 = ddb_notif.clone(); - ok_or_inspect(ddb_notif.into_notif(), |e| { - conversion_err(&metrics, e, ddb_notif2, "into_notif") - }) - }) - .collect() - }); - let timestamp = messages.iter().filter_map(|m| m.sortkey_timestamp).max(); - Ok(FetchMessageResponse { - timestamp, - messages, - }) - }) -} - -/// Drop all user information from the Router table. -pub fn drop_user( - ddb: DynamoDbClient, - uaid: &Uuid, - router_table_name: &str, -) -> impl Future { - let input = DeleteItemInput { - table_name: router_table_name.to_string(), - key: ddb_item! { uaid: s => uaid.as_simple().to_string() }, - ..Default::default() - }; - retry_if( - move || ddb.delete_item(input.clone()), - retryable_delete_error, - ) - .map_err(|_| ApcErrorKind::DatabaseError("Error dropping user".into()).into()) -} - -/// Get the user information from the Router table. -pub fn get_uaid( - ddb: DynamoDbClient, - uaid: &Uuid, - router_table_name: &str, -) -> impl Future { - let input = GetItemInput { - table_name: router_table_name.to_string(), - consistent_read: Some(true), - key: ddb_item! { uaid: s => uaid.as_simple().to_string() }, - ..Default::default() - }; - retry_if(move || ddb.get_item(input.clone()), retryable_getitem_error).map_err(|e| { - error!("get_uaid: {:?}", e.to_string()); - ApcErrorKind::DatabaseError("Error fetching user".into()).into() - }) -} - -/// Register a user into the Router table. -pub fn register_user( - ddb: DynamoDbClient, - user: &DynamoDbUser, - router_table: &str, -) -> impl Future { - let item = match serde_dynamodb::to_hashmap(user) { - Ok(item) => item, - Err(e) => { - return future::Either::A(future::err( - ApcErrorKind::DatabaseError(e.to_string()).into(), - )) - } - }; - let router_table = router_table.to_string(); - let attr_values = hashmap! { - ":router_type".to_string() => val!(S => user.router_type), - ":connected_at".to_string() => val!(N => user.connected_at), - }; - - future::Either::B( - retry_if( - move || { - debug!("🧑🏼 Registering user into {}: {:?}", router_table, item); - ddb.put_item(PutItemInput { - item: item.clone(), - table_name: router_table.clone(), - expression_attribute_values: Some(attr_values.clone()), - condition_expression: Some( - r#"( - attribute_not_exists(router_type) or - (router_type = :router_type) - ) and ( - attribute_not_exists(node_id) or - (connected_at < :connected_at) - )"# - .to_string(), - ), - return_values: Some("ALL_OLD".to_string()), - ..Default::default() - }) - }, - retryable_putitem_error, - ) - .map_err(|_| ApcErrorKind::DatabaseError("Error registering user".to_string()).into()), - ) -} - -/// Update the user's message month (Note: This is legacy for DynamoDB, but may still -/// be used by Stand Alone systems.) -pub fn update_user_message_month( - ddb: DynamoDbClient, - uaid: &Uuid, - router_table_name: &str, - message_month: &str, -) -> impl Future { - let attr_values = hashmap! { - ":curmonth".to_string() => val!(S => message_month.to_string()), - ":lastconnect".to_string() => val!(N => generate_last_connect().to_string()), - }; - let update_item = UpdateItemInput { - key: ddb_item! { uaid: s => uaid.as_simple().to_string() }, - update_expression: Some( - "SET current_month=:curmonth, last_connect=:lastconnect".to_string(), - ), - expression_attribute_values: Some(attr_values), - table_name: router_table_name.to_string(), - ..Default::default() - }; - - retry_if( - move || { - ddb.update_item(update_item.clone()) - .and_then(|_| future::ok(())) - }, - retryable_updateitem_error, - ) - .map_err(|_e| ApcErrorKind::DatabaseError("Error updating user message month".into()).into()) -} - -/// Return all known Channels for a given User. -pub fn all_channels( - ddb: DynamoDbClient, - uaid: &Uuid, - message_table_name: &str, -) -> impl Future, Error = ApcError> { - let input = GetItemInput { - table_name: message_table_name.to_string(), - consistent_read: Some(true), - key: ddb_item! { - uaid: s => uaid.as_simple().to_string(), - chidmessageid: s => " ".to_string() - }, - ..Default::default() - }; - - retry_if(move || ddb.get_item(input.clone()), retryable_getitem_error) - .and_then(|output| { - let channels = output - .item - .and_then(|item| { - serde_dynamodb::from_hashmap(item) - .ok() - .and_then(|notif: DynamoDbNotification| notif.chids) - }) - .unwrap_or_default(); - future::ok(channels) - }) - .or_else(|_err| future::ok(HashSet::new())) -} - -/// Save the current list of Channels for a given user. -pub fn save_channels( - ddb: DynamoDbClient, - uaid: &Uuid, - channels: HashSet, - message_table_name: &str, -) -> impl Future { - let chids: Vec = channels.into_iter().collect(); - let expiry = sec_since_epoch() + 2 * MAX_EXPIRY; - let attr_values = hashmap! { - ":chids".to_string() => val!(SS => chids), - ":expiry".to_string() => val!(N => expiry), - }; - let update_item = UpdateItemInput { - key: ddb_item! { - uaid: s => uaid.as_simple().to_string(), - chidmessageid: s => " ".to_string() - }, - update_expression: Some("ADD chids :chids SET expiry=:expiry".to_string()), - expression_attribute_values: Some(attr_values), - table_name: message_table_name.to_string(), - ..Default::default() - }; - - retry_if( - move || { - ddb.update_item(update_item.clone()) - .and_then(|_| future::ok(())) - }, - retryable_updateitem_error, - ) - .map_err(|_e| ApcErrorKind::DatabaseError("Error saving channels".into()).into()) -} - -/// Remove a specific channel from the list of known channels for a given User -pub fn unregister_channel_id( - ddb: DynamoDbClient, - uaid: &Uuid, - channel_id: &Uuid, - message_table_name: &str, -) -> impl Future { - let chid = channel_id.as_hyphenated().to_string(); - let attr_values = hashmap! { - ":channel_id".to_string() => val!(SS => [chid]), - }; - let update_item = UpdateItemInput { - key: ddb_item! { - uaid: s => uaid.as_simple().to_string(), - chidmessageid: s => " ".to_string() - }, - update_expression: Some("DELETE chids :channel_id".to_string()), - expression_attribute_values: Some(attr_values), - table_name: message_table_name.to_string(), - ..Default::default() - }; - - retry_if( - move || ddb.update_item(update_item.clone()), - retryable_updateitem_error, - ) - .map_err(|_e| ApcErrorKind::DatabaseError("Error unregistering channel".into()).into()) -} - -/// Respond with user information for a given user. -#[allow(clippy::too_many_arguments)] -pub fn lookup_user( - ddb: DynamoDbClient, - metrics: Arc, - uaid: &Uuid, - connected_at: u64, - router_url: &str, - router_table_name: &str, - message_table_names: &[String], - current_message_month: &str, -) -> MyFuture<(HelloResponse, Option)> { - let response = get_uaid(ddb.clone(), uaid, router_table_name); - // Prep all these for the move into the static closure capture - let cur_month = current_message_month.to_string(); - let uaid2 = *uaid; - let router_table = router_table_name.to_string(); - let messages_tables = message_table_names.to_vec(); - let router_url = router_url.to_string(); - let response = response.and_then(move |data| -> MyFuture<_> { - let mut hello_response = HelloResponse { - message_month: cur_month.clone(), - connected_at, - ..Default::default() - }; - let user = handle_user_result( - &cur_month, - &messages_tables, - connected_at, - router_url, - data, - &mut hello_response, - ); - match user { - Ok(user) => { - trace!("🧑 returning user: {:?}", user.uaid); - Box::new(future::ok((hello_response, Some(user)))) - } - Err((false, _)) => { - trace!("🧑 handle_user_result false, _: {:?}", uaid2); - Box::new(future::ok((hello_response, None))) - } - Err((true, code)) => { - trace!("🧑 handle_user_result true, {}: {:?}", uaid2, code); - metrics - .incr_with_tags("ua.expiration") - .with_tag("code", &code.to_string()) - .send(); - let response = drop_user(ddb, &uaid2, &router_table) - .and_then(|_| future::ok((hello_response, None))) - .map_err(|_e| ApcErrorKind::DatabaseError("Unable to drop user".into()).into()); - Box::new(response) - } - } - }); - Box::new(response) -} - -/// Helper function for determining if a returned user record is valid for use -/// or if it should be dropped and a new one created. -fn handle_user_result( - cur_month: &str, - messages_tables: &[String], - connected_at: u64, - router_url: String, - data: GetItemOutput, - hello_response: &mut HelloResponse, -) -> StdResult { - let item = data.item.ok_or((false, 104))?; - let mut user: DynamoDbUser = serde_dynamodb::from_hashmap(item).map_err(|_| (true, 104))?; - - let user_month = user.current_month.clone().ok_or((true, 104))?; - if !messages_tables.contains(&user_month) { - return Err((true, 105)); - } - hello_response.check_storage = true; - hello_response.rotate_message_table = user_month != *cur_month; - hello_response.message_month = user_month; - hello_response.old_record_version = user - .record_version - .map_or(true, |rec_ver| rec_ver < USER_RECORD_VERSION); - - user.last_connect = if has_connected_this_month(&user) { - None - } else { - Some(generate_last_connect()) - }; - user.node_id = Some(router_url); - user.connected_at = connected_at; - Ok(user) -} - -/// Like `Result::ok`, convert from `Result` to `Option` but applying a -/// function to the Err value -fn ok_or_inspect(result: StdResult, op: F) -> Option -where - F: FnOnce(E), -{ - match result { - Ok(t) => Some(t), - Err(e) => { - op(e); - None - } - } -} - -/// Log/metric errors during conversions to Notification -fn conversion_err(metrics: &StatsdClient, err: E, item: F, name: &'static str) -where - E: Display, - F: Debug, -{ - error!("Failed {}, item: {:?}, conversion: {}", name, item, err); - metrics - .incr_with_tags("ua.notification_read.error") - .with_tag("conversion", name) - .send(); -} diff --git a/autopush/src/db/macros.rs b/autopush/src/db/macros.rs deleted file mode 100644 index bb944e0f2..000000000 --- a/autopush/src/db/macros.rs +++ /dev/null @@ -1,113 +0,0 @@ -/// A bunch of macro helpers from rusoto_helpers code, which they pulled from crates.io because -/// they were waiting for rusuto to hit 1.0.0 or something. For sanity, they are instead accumulated -/// here for our use. -#[allow(unused_macros)] -macro_rules! attributes { - ($($val:expr => $attr_type:expr),*) => { - { - let mut temp_vec = Vec::new(); - $( - temp_vec.push(AttributeDefinition { - attribute_name: String::from($val), - attribute_type: String::from($attr_type) - }); - )* - temp_vec - } - } -} - -#[allow(unused_macros)] -macro_rules! key_schema { - ($($name:expr => $key_type:expr),*) => { - { - let mut temp_vec = Vec::new(); - $( - temp_vec.push(KeySchemaElement { - key_type: String::from($key_type), - attribute_name: String::from($name) - }); - )* - temp_vec - } - } -} - -#[macro_export] -macro_rules! val { - (B => $val:expr) => { - AttributeValue { - b: Some($val), - ..Default::default() - } - }; - (S => $val:expr) => { - AttributeValue { - s: Some($val.to_string()), - ..Default::default() - } - }; - (SS => $val:expr) => { - AttributeValue { - ss: Some($val.iter().map(|v| v.to_string()).collect()), - ..Default::default() - } - }; - (N => $val:expr) => { - AttributeValue { - n: Some($val.to_string()), - ..Default::default() - } - }; -} - -/// Create a **HashMap** from a list of key-value pairs -/// -/// ## Example -/// -/// ```ignore -/// use autopush_common::hashmap; -/// -/// let map = hashmap!{ -/// "a" => 1, -/// "b" => 2, -/// }; -/// assert_eq!(map["a"], 1); -/// assert_eq!(map["b"], 2); -/// assert_eq!(map.get("c"), None); -/// ``` -#[macro_export] -macro_rules! hashmap { - (@single $($x:tt)*) => (()); - (@count $($rest:expr),*) => (<[()]>::len(&[$($crate::hashmap!(@single $rest)),*])); - - ($($key:expr => $value:expr,)+) => { $crate::hashmap!($($key => $value),+) }; - ($($key:expr => $value:expr),*) => { - { - let _cap = $crate::hashmap!(@count $($key),*); - let mut _map = ::std::collections::HashMap::with_capacity(_cap); - $( - _map.insert($key, $value); - )* - _map - } - }; -} - -/// Shorthand for specifying a dynamodb item -#[macro_export] -macro_rules! ddb_item { - ($($p:tt: $t:tt => $x:expr),*) => { - { - use rusoto_dynamodb::AttributeValue; - $crate::hashmap!{ - $( - String::from(stringify!($p)) => AttributeValue { - $t: Some($x), - ..Default::default() - }, - )* - } - } - } -} diff --git a/autopush/src/db/mod.rs b/autopush/src/db/mod.rs deleted file mode 100644 index beb223b2c..000000000 --- a/autopush/src/db/mod.rs +++ /dev/null @@ -1,597 +0,0 @@ -use std::collections::HashSet; -use std::env; -use std::sync::Arc; -use uuid::Uuid; - -use cadence::{Counted, CountedExt, StatsdClient}; -use futures::{future, Future}; -use futures_backoff::retry_if; -use rusoto_core::{HttpClient, Region}; -use rusoto_credential::StaticProvider; -use rusoto_dynamodb::{ - AttributeValue, BatchWriteItemInput, DeleteItemInput, DynamoDb, DynamoDbClient, PutItemInput, - PutRequest, UpdateItemInput, UpdateItemOutput, WriteRequest, -}; - -#[macro_use] -mod macros; -mod commands; -mod models; -mod util; - -use autopush_common::errors::{ApcError, ApcErrorKind, Result}; -use autopush_common::notification::Notification; -use autopush_common::util::timing::sec_since_epoch; - -use self::commands::{ - retryable_batchwriteitem_error, retryable_delete_error, retryable_putitem_error, - retryable_updateitem_error, FetchMessageResponse, -}; -pub use self::models::{DynamoDbNotification, DynamoDbUser}; -use crate::MyFuture; - -const MAX_EXPIRY: u64 = 2_592_000; -const USER_RECORD_VERSION: u8 = 1; - -/// Basic requirements for notification content to deliver to websocket client -/// - channelID (the subscription website intended for) -/// - version (only really utilized for notification acknowledgement in -/// webpush, used to be the sole carrier of data, can now be anything) -/// - data (encrypted content) -/// - headers (hash of crypto headers: encoding, encrypption, crypto-key, encryption-key) -#[derive(Default, Clone)] -pub struct HelloResponse { - pub uaid: Option, - pub message_month: String, - pub check_storage: bool, - pub old_record_version: bool, - pub rotate_message_table: bool, - pub connected_at: u64, - // Exists when we didn't register this user during HELLO - pub deferred_user_registration: Option, -} - -pub struct CheckStorageResponse { - pub include_topic: bool, - pub messages: Vec, - pub timestamp: Option, -} - -pub enum RegisterResponse { - Success { endpoint: String }, - Error { error_msg: String, status: u32 }, -} - -#[derive(Clone)] -pub struct DynamoStorage { - ddb: DynamoDbClient, - metrics: Arc, - router_table_name: String, - pub message_table_names: Vec, - pub current_message_month: String, -} - -impl DynamoStorage { - pub fn from_settings( - message_table_name: &str, - router_table_name: &str, - metrics: Arc, - ) -> Result { - debug!( - "Checking tables: message = {:?} & router = {:?}", - &message_table_name, &router_table_name - ); - let ddb = if let Ok(endpoint) = env::var("AWS_LOCAL_DYNAMODB") { - DynamoDbClient::new_with( - HttpClient::new().map_err(|e| { - ApcErrorKind::GeneralError(format!("TLS initialization error {e:?}")) - })?, - StaticProvider::new_minimal("BogusKey".to_string(), "BogusKey".to_string()), - Region::Custom { - name: "us-east-1".to_string(), - endpoint, - }, - ) - } else { - DynamoDbClient::new(Region::default()) - }; - - let mut message_table_names = list_message_tables(&ddb, message_table_name) - .map_err(|_| ApcErrorKind::DatabaseError("Failed to locate message tables".into()))?; - // Valid message months are the current and last 2 months - message_table_names.sort_unstable_by(|a, b| b.cmp(a)); - message_table_names.truncate(3); - message_table_names.reverse(); - let current_message_month = message_table_names - .last() - .ok_or("No last message month found") - .map_err(|_e| ApcErrorKind::GeneralError("No last message month found".into()))? - .to_string(); - - Ok(Self { - ddb, - metrics, - router_table_name: router_table_name.to_owned(), - message_table_names, - current_message_month, - }) - } - - pub fn increment_storage( - &self, - table_name: &str, - uaid: &Uuid, - timestamp: &str, - ) -> impl Future { - let ddb = self.ddb.clone(); - let expiry = sec_since_epoch() + 2 * MAX_EXPIRY; - let attr_values = hashmap! { - ":timestamp".to_string() => val!(N => timestamp), - ":expiry".to_string() => val!(N => expiry), - }; - let update_input = UpdateItemInput { - key: ddb_item! { - uaid: s => uaid.as_simple().to_string(), - chidmessageid: s => " ".to_string() - }, - update_expression: Some("SET current_timestamp=:timestamp, expiry=:expiry".to_string()), - expression_attribute_values: Some(attr_values), - table_name: table_name.to_string(), - ..Default::default() - }; - - retry_if( - move || ddb.update_item(update_input.clone()), - retryable_updateitem_error, - ) - .map_err(|e| ApcErrorKind::DatabaseError(e.to_string()).into()) - } - - pub fn hello( - &self, - connected_at: u64, - uaid: Option<&Uuid>, - router_url: &str, - defer_registration: bool, - ) -> impl Future { - trace!( - "🧑🏼 uaid {:?}, defer_registration: {:?}", - &uaid, - &defer_registration - ); - let response: MyFuture<(HelloResponse, Option)> = if let Some(uaid) = uaid { - commands::lookup_user( - self.ddb.clone(), - self.metrics.clone(), - uaid, - connected_at, - router_url, - &self.router_table_name, - &self.message_table_names, - &self.current_message_month, - ) - } else { - Box::new(future::ok(( - HelloResponse { - message_month: self.current_message_month.clone(), - connected_at, - ..Default::default() - }, - None, - ))) - }; - let ddb = self.ddb.clone(); - let router_url = router_url.to_string(); - let router_table_name = self.router_table_name.clone(); - - response.and_then(move |(mut hello_response, user_opt)| { - trace!( - "💬 Hello Response: {:?}, {:?}", - hello_response.uaid, - user_opt - ); - let hello_message_month = hello_response.message_month.clone(); - let user = user_opt.unwrap_or_else(|| DynamoDbUser { - current_month: Some(hello_message_month), - node_id: Some(router_url), - connected_at, - ..Default::default() - }); - let uaid = user.uaid; - trace!("🧑 UAID = {:?}", &uaid); - let mut err_response = hello_response.clone(); - err_response.connected_at = connected_at; - if !defer_registration { - future::Either::A( - commands::register_user(ddb, &user, &router_table_name) - .and_then(move |result| { - debug!("Success adding user, item output: {:?}", result); - hello_response.uaid = Some(uaid); - future::ok(hello_response) - }) - .or_else(move |e| { - debug!("Error registering user: {:?}", e); - future::ok(err_response) - }), - ) - } else { - debug!("Deferring user registration {:?}", &uaid); - hello_response.uaid = Some(uaid); - hello_response.deferred_user_registration = Some(user); - future::Either::B(Box::new(future::ok(hello_response))) - } - }) - } - - pub fn register_channel( - &self, - uaid: &Uuid, - channel_id: &Uuid, - message_month: &str, - endpoint: &str, - register_user: Option<&DynamoDbUser>, - ) -> MyFuture { - let ddb = self.ddb.clone(); - let mut chids = HashSet::new(); - let endpoint = endpoint.to_owned(); - chids.insert(channel_id.as_hyphenated().to_string()); - - if let Some(user) = register_user { - trace!( - "💬 Endpoint Request: User not yet registered... {:?}", - &user.uaid - ); - let uaid2 = *uaid; - let message_month2 = message_month.to_owned(); - let response = commands::register_user(ddb.clone(), user, &self.router_table_name) - .and_then(move |_| { - trace!("✍️ Saving channels: {:#?}", chids); - commands::save_channels(ddb, &uaid2, chids, &message_month2) - .and_then(move |_| { - trace!("✉️ sending endpoint: {}", endpoint); - future::ok(RegisterResponse::Success { endpoint }) - }) - .or_else(move |r| { - trace!("--- failed to register channel. {:?}", r); - future::ok(RegisterResponse::Error { - status: 503, - error_msg: "Failed to register channel".to_string(), - }) - }) - }); - return Box::new(response); - }; - trace!("❓ Continuing register..."); - let response = commands::save_channels(ddb, uaid, chids, message_month) - .and_then(move |_| future::ok(RegisterResponse::Success { endpoint })) - .or_else(move |_| { - future::ok(RegisterResponse::Error { - status: 503, - error_msg: "Failed to register channel".to_string(), - }) - }); - Box::new(response) - } - - pub fn drop_uaid(&self, uaid: &Uuid) -> impl Future { - commands::drop_user(self.ddb.clone(), uaid, &self.router_table_name) - .and_then(|_| future::ok(())) - .map_err(|_| ApcErrorKind::DatabaseError("Unable to drop user record".into()).into()) - } - - pub fn unregister_channel( - &self, - uaid: &Uuid, - channel_id: &Uuid, - message_month: &str, - ) -> impl Future { - commands::unregister_channel_id(self.ddb.clone(), uaid, channel_id, message_month) - .and_then(|_| future::ok(true)) - .or_else(|_| future::ok(false)) - } - - /// Migrate a user to a new month table - pub fn migrate_user( - &self, - uaid: &Uuid, - message_month: &str, - ) -> impl Future { - let uaid = *uaid; - let ddb = self.ddb.clone(); - let ddb2 = self.ddb.clone(); - let cur_month = self.current_message_month.to_string(); - let cur_month2 = cur_month.clone(); - let router_table_name = self.router_table_name.clone(); - - commands::all_channels(self.ddb.clone(), &uaid, message_month) - .and_then(move |channels| -> MyFuture<_> { - if channels.is_empty() { - Box::new(future::ok(())) - } else { - Box::new(commands::save_channels(ddb, &uaid, channels, &cur_month)) - } - }) - .and_then(move |_| { - commands::update_user_message_month(ddb2, &uaid, &router_table_name, &cur_month2) - }) - .and_then(|_| future::ok(())) - .map_err(|_e| ApcErrorKind::DatabaseError("Unable to migrate user".into()).into()) - } - - /// Store a single message - #[allow(dead_code)] - pub fn store_message( - &self, - uaid: &Uuid, - message_month: String, - message: Notification, - ) -> impl Future { - let topic = message.topic.is_some().to_string(); - let ddb = self.ddb.clone(); - let put_item = PutItemInput { - item: serde_dynamodb::to_hashmap(&DynamoDbNotification::from_notif(uaid, message)) - .unwrap(), - table_name: message_month, - ..Default::default() - }; - let metrics = self.metrics.clone(); - retry_if( - move || ddb.put_item(put_item.clone()), - retryable_putitem_error, - ) - .and_then(move |_| { - let mut metric = metrics.incr_with_tags("notification.message.stored"); - // TODO: include `internal` if meta is set. - metric = metric.with_tag("topic", &topic); - metric.send(); - future::ok(()) - }) - .map_err(|_| ApcErrorKind::DatabaseError("Error saving notification".into()).into()) - } - - /// Store a batch of messages when shutting down - pub fn store_messages( - &self, - uaid: &Uuid, - message_month: &str, - messages: Vec, - ) -> impl Future { - let ddb = self.ddb.clone(); - let metrics = self.metrics.clone(); - let put_items: Vec = messages - .into_iter() - .filter_map(|n| { - // eventually include `internal` if `meta` defined. - metrics - .incr_with_tags("notification.message.stored") - .with_tag("topic", &n.topic.is_some().to_string()) - .send(); - serde_dynamodb::to_hashmap(&DynamoDbNotification::from_notif(uaid, n)) - .ok() - .map(|hm| WriteRequest { - put_request: Some(PutRequest { item: hm }), - delete_request: None, - }) - }) - .collect(); - let batch_input = BatchWriteItemInput { - request_items: hashmap! { message_month.to_string() => put_items }, - ..Default::default() - }; - - retry_if( - move || ddb.batch_write_item(batch_input.clone()), - retryable_batchwriteitem_error, - ) - .and_then(|_| future::ok(())) - .map_err(|err| { - debug!("Error saving notification: {:?}", err); - err - }) - // TODO: Use Sentry to capture/report this error - .map_err(|_e| ApcErrorKind::DatabaseError("Error saving notifications".into()).into()) - } - - /// Delete a given notification from the database - /// - /// No checks are done to see that this message came from the database or has - /// sufficient properties for a delete as that is expected to have been done - /// before this is called. - pub fn delete_message( - &self, - table_name: &str, - uaid: &Uuid, - notif: &Notification, - ) -> impl Future { - let topic = notif.topic.is_some().to_string(); - let ddb = self.ddb.clone(); - let metrics = self.metrics.clone(); - let delete_input = DeleteItemInput { - table_name: table_name.to_string(), - key: ddb_item! { - uaid: s => uaid.as_simple().to_string(), - chidmessageid: s => notif.chidmessageid() - }, - ..Default::default() - }; - - retry_if( - move || ddb.delete_item(delete_input.clone()), - retryable_delete_error, - ) - .and_then(move |_| { - let mut metric = metrics.incr_with_tags("notification.message.deleted"); - // TODO: include `internal` if meta is set. - metric = metric.with_tag("topic", &topic); - metric.send(); - future::ok(()) - }) - .map_err(|_| ApcErrorKind::DatabaseError("Error deleting notification".into()).into()) - } - - /// Check to see if we have pending messages and return them if we do. - pub fn check_storage( - &self, - table_name: &str, - uaid: &Uuid, - include_topic: bool, - timestamp: Option, - ) -> impl Future { - let response: MyFuture = if include_topic { - Box::new(commands::fetch_topic_messages( - self.ddb.clone(), - self.metrics.clone(), - table_name, - uaid, - 11, - )) - } else { - Box::new(future::ok(Default::default())) - }; - let uaid = *uaid; - let table_name = table_name.to_string(); - let ddb = self.ddb.clone(); - let metrics = self.metrics.clone(); - let rmetrics = metrics.clone(); - - response.and_then(move |resp| -> MyFuture<_> { - // Return now from this future if we have messages - if !resp.messages.is_empty() { - debug!("Topic message returns: {:?}", resp.messages); - rmetrics - .count_with_tags("notification.message.retrieved", resp.messages.len() as i64) - .with_tag("topic", "true") - .send(); - return Box::new(future::ok(CheckStorageResponse { - include_topic: true, - messages: resp.messages, - timestamp: resp.timestamp, - })); - } - // Use the timestamp returned by the topic query if we were looking at the topics - let timestamp = if include_topic { - resp.timestamp - } else { - timestamp - }; - let next_query: MyFuture<_> = { - if resp.messages.is_empty() || resp.timestamp.is_some() { - Box::new(commands::fetch_timestamp_messages( - ddb, - metrics, - table_name.as_ref(), - &uaid, - timestamp, - 10, - )) - } else { - Box::new(future::ok(Default::default())) - } - }; - let next_query = next_query.and_then(move |resp: FetchMessageResponse| { - // If we didn't get a timestamp off the last query, use the original - // value if passed one - let timestamp = resp.timestamp.or(timestamp); - rmetrics - .count_with_tags("notification.message.retrieved", resp.messages.len() as i64) - .with_tag("topic", "false") - .send(); - Ok(CheckStorageResponse { - include_topic: false, - messages: resp.messages, - timestamp, - }) - }); - Box::new(next_query) - }) - } - - pub fn get_user( - &self, - uaid: &Uuid, - ) -> impl Future, Error = ApcError> { - let ddb = self.ddb.clone(); - let result = commands::get_uaid(ddb, uaid, &self.router_table_name).and_then(|result| { - future::result( - result - .item - .map(|item| { - let user = serde_dynamodb::from_hashmap(item); - user.map_err(|_| { - ApcErrorKind::DatabaseError("Error deserializing".into()).into() - }) - }) - .transpose(), - ) - }); - Box::new(result) - } - - /// Get the set of channel IDs for a user - #[allow(dead_code)] - pub fn get_user_channels( - &self, - uaid: &Uuid, - message_table: &str, - ) -> impl Future, Error = ApcError> { - commands::all_channels(self.ddb.clone(), uaid, message_table).and_then(|channels| { - channels - .into_iter() - .map(|channel| channel.parse().map_err(|e| ApcErrorKind::from(e).into())) - .collect::>() - }) - } - - /// Remove the node ID from a user in the router table. - /// The node ID will only be cleared if `connected_at` matches up - /// with the item's `connected_at`. - #[allow(dead_code)] - pub fn remove_node_id( - &self, - uaid: &Uuid, - node_id: String, - connected_at: u64, - ) -> impl Future { - let ddb = self.ddb.clone(); - let update_item = UpdateItemInput { - key: ddb_item! { uaid: s => uaid.as_simple().to_string() }, - update_expression: Some("REMOVE node_id".to_string()), - condition_expression: Some("(node_id = :node) and (connected_at = :conn)".to_string()), - expression_attribute_values: Some(hashmap! { - ":node".to_string() => val!(S => node_id), - ":conn".to_string() => val!(N => connected_at.to_string()) - }), - table_name: self.router_table_name.clone(), - ..Default::default() - }; - - retry_if( - move || ddb.update_item(update_item.clone()), - retryable_updateitem_error, - ) - .and_then(|_| future::ok(())) - .map_err(|_| ApcErrorKind::DatabaseError("Error removing node ID".into()).into()) - } -} - -/// Get the list of current, valid message tables (Note: This is legacy for DynamoDB, but may still -/// be used for Stand Alone systems ) -pub fn list_message_tables(ddb: &DynamoDbClient, prefix: &str) -> Result> { - let mut names: Vec = Vec::new(); - let mut start_key = None; - loop { - let result = commands::list_tables_sync(ddb, start_key)?; - start_key = result.last_evaluated_table_name; - if let Some(table_names) = result.table_names { - names.extend(table_names); - } - if start_key.is_none() { - break; - } - } - let names = names - .into_iter() - .filter(|name| name.starts_with(prefix)) - .collect(); - Ok(names) -} diff --git a/autopush/src/db/models.rs b/autopush/src/db/models.rs deleted file mode 100644 index f0a47f622..000000000 --- a/autopush/src/db/models.rs +++ /dev/null @@ -1,300 +0,0 @@ -use std::collections::{HashMap, HashSet}; -use std::result::Result as StdResult; - -use lazy_static::lazy_static; -use regex::RegexSet; -use serde::Serializer; -use serde_derive::{Deserialize, Serialize}; -use uuid::Uuid; - -use crate::db::util::generate_last_connect; -use autopush_common::errors::*; -use autopush_common::notification::{ - Notification, STANDARD_NOTIFICATION_PREFIX, TOPIC_NOTIFICATION_PREFIX, -}; -use autopush_common::util::timing::ms_since_epoch; -use autopush_common::util::InsertOpt; - -use super::USER_RECORD_VERSION; - -/// Custom Uuid serializer -/// -/// Serializes a Uuid as a simple string instead of hyphenated -fn uuid_serializer(x: &Uuid, s: S) -> StdResult -where - S: Serializer, -{ - s.serialize_str(&x.as_simple().to_string()) -} - -/// Direct representation of a DynamoDB Notification as we store it in the database -/// Most attributes are optional -#[derive(Default, Deserialize, PartialEq, Debug, Clone, Serialize)] -struct NotificationHeaders { - #[serde(skip_serializing_if = "Option::is_none")] - crypto_key: Option, - #[serde(skip_serializing_if = "Option::is_none")] - encryption: Option, - #[serde(skip_serializing_if = "Option::is_none")] - encryption_key: Option, - #[serde(skip_serializing_if = "Option::is_none")] - encoding: Option, -} - -#[allow(clippy::implicit_hasher)] -impl From for HashMap { - fn from(val: NotificationHeaders) -> Self { - let mut map = Self::new(); - map.insert_opt("crypto_key", val.crypto_key); - map.insert_opt("encryption", val.encryption); - map.insert_opt("encryption_key", val.encryption_key); - map.insert_opt("encoding", val.encoding); - map - } -} - -impl From> for NotificationHeaders { - fn from(val: HashMap) -> Self { - Self { - crypto_key: val.get("crypto_key").map(|v| v.to_string()), - encryption: val.get("encryption").map(|v| v.to_string()), - encryption_key: val.get("encryption_key").map(|v| v.to_string()), - encoding: val.get("encoding").map(|v| v.to_string()), - } - } -} - -#[derive(Deserialize, Eq, PartialEq, Debug, Clone, Serialize)] -pub struct DynamoDbUser { - // DynamoDB - #[serde(serialize_with = "uuid_serializer")] - pub uaid: Uuid, - // Time in milliseconds that the user last connected at - pub connected_at: u64, - // Router type of the user - pub router_type: String, - // Router-specific data - pub router_data: Option>, - // Keyed time in a month the user last connected at with limited key range for indexing - #[serde(skip_serializing_if = "Option::is_none")] - pub last_connect: Option, - // Last node/port the client was or may be connected to - #[serde(skip_serializing_if = "Option::is_none")] - pub node_id: Option, - // Record version - #[serde(skip_serializing_if = "Option::is_none")] - pub record_version: Option, - // Current month table in the database the user is on - #[serde(skip_serializing_if = "Option::is_none")] - pub current_month: Option, -} - -impl Default for DynamoDbUser { - fn default() -> Self { - let uaid = Uuid::new_v4(); - //trace!(">>> Setting default uaid: {:?}", &uaid); - Self { - uaid, - connected_at: ms_since_epoch(), - router_type: "webpush".to_string(), - router_data: None, - last_connect: Some(generate_last_connect()), - node_id: None, - record_version: Some(USER_RECORD_VERSION), - current_month: None, - } - } -} - -#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] -pub struct DynamoDbNotification { - // DynamoDB - #[serde(serialize_with = "uuid_serializer")] - uaid: Uuid, - // DynamoDB - // Format: - // Topic Messages: - // {TOPIC_NOTIFICATION_PREFIX}:{channel id}:{topic} - // New Messages: - // {STANDARD_NOTIFICATION_PREFIX}:{timestamp int in microseconds}:{channel id} - chidmessageid: String, - // Magic entry stored in the first Message record that indicates the highest - // non-topic timestamp we've read into - #[serde(skip_serializing_if = "Option::is_none")] - pub current_timestamp: Option, - // Magic entry stored in the first Message record that indicates the valid - // channel id's - #[serde(skip_serializing)] - pub chids: Option>, - // Time in seconds from epoch - #[serde(skip_serializing_if = "Option::is_none")] - timestamp: Option, - // DynamoDB expiration timestamp per - // https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/TTL.html - expiry: u64, - // TTL value provided by application server for the message - #[serde(skip_serializing_if = "Option::is_none")] - ttl: Option, - #[serde(skip_serializing_if = "Option::is_none")] - data: Option, - #[serde(skip_serializing_if = "Option::is_none")] - headers: Option, - // This is the acknowledgement-id used for clients to ack that they have received the - // message. Some Python code refers to this as a message_id. Endpoints generate this - // value before sending it to storage or a connection node. - #[serde(skip_serializing_if = "Option::is_none")] - updateid: Option, -} - -/// Ensure that the default for 'stored' is true. -impl Default for DynamoDbNotification { - fn default() -> Self { - Self { - uaid: Uuid::default(), - chidmessageid: String::default(), - current_timestamp: None, - chids: None, - timestamp: None, - expiry: 0, - ttl: None, - data: None, - headers: None, - updateid: None, - } - } -} - -impl DynamoDbNotification { - fn parse_sort_key(key: &str) -> Result { - lazy_static! { - static ref RE: RegexSet = - RegexSet::new([r"^01:\S+:\S+$", r"^02:\d+:\S+$", r"^\S{3,}:\S+$",]).unwrap(); - } - if !RE.is_match(key) { - return Err(ApcErrorKind::GeneralError("Invalid chidmessageid".into()).into()); - } - - let v: Vec<&str> = key.split(':').collect(); - match v[0] { - TOPIC_NOTIFICATION_PREFIX => { - if v.len() != 3 { - return Err(ApcErrorKind::GeneralError("Invalid topic key".into()).into()); - } - let (channel_id, topic) = (v[1], v[2]); - let channel_id = Uuid::parse_str(channel_id)?; - Ok(RangeKey { - channel_id, - topic: Some(topic.to_string()), - sortkey_timestamp: None, - legacy_version: None, - }) - } - STANDARD_NOTIFICATION_PREFIX => { - if v.len() != 3 { - return Err(ApcErrorKind::GeneralError("Invalid topic key".into()).into()); - } - let (sortkey, channel_id) = (v[1], v[2]); - let channel_id = Uuid::parse_str(channel_id)?; - Ok(RangeKey { - channel_id, - topic: None, - sortkey_timestamp: Some(sortkey.parse()?), - legacy_version: None, - }) - } - _ => { - if v.len() != 2 { - return Err(ApcErrorKind::GeneralError("Invalid topic key".into()).into()); - } - let (channel_id, legacy_version) = (v[0], v[1]); - let channel_id = Uuid::parse_str(channel_id)?; - Ok(RangeKey { - channel_id, - topic: None, - sortkey_timestamp: None, - legacy_version: Some(legacy_version.to_string()), - }) - } - } - } - - // TODO: Implement as TryFrom whenever that lands - pub fn into_notif(self) -> Result { - let key = Self::parse_sort_key(&self.chidmessageid)?; - let version = key - .legacy_version - .or(self.updateid) - .ok_or("No valid updateid/version found") - .map_err(|e| ApcErrorKind::GeneralError(e.to_string()))?; - - Ok(Notification { - channel_id: key.channel_id, - version, - ttl: self.ttl.unwrap_or(0), - timestamp: self - .timestamp - .ok_or("No timestamp found") - .map_err(|e| ApcErrorKind::GeneralError(e.to_string()))?, - topic: key.topic, - data: self.data, - headers: self.headers.map(|m| m.into()), - sortkey_timestamp: key.sortkey_timestamp, - }) - } - - pub fn from_notif(uaid: &Uuid, val: Notification) -> Self { - Self { - uaid: *uaid, - chidmessageid: val.chidmessageid(), - timestamp: Some(val.timestamp), - ttl: Some(val.ttl), - data: val.data, - headers: val.headers.map(|h| h.into()), - updateid: Some(val.version), - ..Default::default() - } - } -} - -struct RangeKey { - channel_id: Uuid, - topic: Option, - pub sortkey_timestamp: Option, - legacy_version: Option, -} - -#[cfg(test)] -mod tests { - use super::DynamoDbNotification; - use autopush_common::util::us_since_epoch; - use uuid::Uuid; - - #[test] - fn test_parse_sort_key_ver1() { - let chid = Uuid::new_v4(); - let chidmessageid = format!("01:{}:mytopic", chid.as_hyphenated()); - let key = DynamoDbNotification::parse_sort_key(&chidmessageid).unwrap(); - assert_eq!(key.topic, Some("mytopic".to_string())); - assert_eq!(key.channel_id, chid); - assert_eq!(key.sortkey_timestamp, None); - } - - #[test] - fn test_parse_sort_key_ver2() { - let chid = Uuid::new_v4(); - let sortkey_timestamp = us_since_epoch(); - let chidmessageid = format!("02:{}:{}", sortkey_timestamp, chid.as_hyphenated()); - let key = DynamoDbNotification::parse_sort_key(&chidmessageid).unwrap(); - assert_eq!(key.topic, None); - assert_eq!(key.channel_id, chid); - assert_eq!(key.sortkey_timestamp, Some(sortkey_timestamp)); - } - - #[test] - fn test_parse_sort_key_bad_values() { - for val in &["02j3i2o", "03:ffas:wef", "01::mytopic", "02:oops:ohnoes"] { - let key = DynamoDbNotification::parse_sort_key(val); - assert!(key.is_err()); - } - } -} diff --git a/autopush/src/db/util.rs b/autopush/src/db/util.rs deleted file mode 100644 index 05c28e0dc..000000000 --- a/autopush/src/db/util.rs +++ /dev/null @@ -1,15 +0,0 @@ -use chrono::Utc; -use rand::{thread_rng, Rng}; - -/// Generate a last_connect -/// -/// This intentionally generates a limited set of keys for each month in a -/// known sequence. For each month, there's 24 hours * 10 random numbers for -/// a total of 240 keys per month depending on when the user migrates forward. -pub fn generate_last_connect() -> u64 { - let today = Utc::now(); - let mut rng = thread_rng(); - let num = rng.gen_range(0..10); - let val = format!("{}{:04}", today.format("%Y%m%H"), num); - val.parse::().unwrap() -} diff --git a/autopush/src/http.rs b/autopush/src/http.rs deleted file mode 100644 index c2f04c13f..000000000 --- a/autopush/src/http.rs +++ /dev/null @@ -1,98 +0,0 @@ -//! Internal router HTTP API -//! -//! Accepts PUT requests to deliver notifications to a connected client or trigger -//! a client to check storage. -//! -//! Valid URL's: -//! PUT /push/UAID - Deliver notification to a client -//! PUT /notify/UAID - Tell a client to check storage - -use std::{str, sync::Arc}; - -use futures::future::Either; - -use futures::future::ok; -use futures::{Future, Stream}; -use hyper::{self, service::Service, Body, Method, StatusCode}; -use uuid::Uuid; - -use crate::server::registry::ClientRegistry; - -pub struct Push(pub Arc); - -impl Service for Push { - type ReqBody = Body; - type ResBody = Body; - type Error = hyper::Error; - type Future = Box, Error = hyper::Error> + Send>; - - fn call(&mut self, req: hyper::Request) -> Self::Future { - trace!("⏩ **** In notif handler..."); - let mut response = hyper::Response::builder(); - let req_path = req.uri().path().to_string(); - let path_vec: Vec<&str> = req_path.split('/').collect(); - if path_vec.len() != 3 { - response.status(StatusCode::NOT_FOUND); - return Box::new(ok(response.body(Body::empty()).unwrap())); - } - let (method_name, uaid) = (path_vec[1], path_vec[2]); - let uaid = match Uuid::parse_str(uaid) { - Ok(id) => id, - Err(_) => { - debug!("uri not uuid: {}", req.uri().to_string()); - response.status(StatusCode::BAD_REQUEST); - return Box::new(ok(response.body(Body::empty()).unwrap())); - } - }; - let clients = Arc::clone(&self.0); - debug!("⏩ trying {} => {}", req.method(), &uaid); - // Handle incoming routed push notifications from endpoints. - match (req.method(), method_name, uaid) { - (&Method::PUT, "push", uaid) => { - trace!("⏩ PUT /push/ {}", uaid); - // Due to consumption of body as a future we must return here - let body = req.into_body().concat2(); - return Box::new(body.and_then(move |body| { - let s = String::from_utf8(body.to_vec()).unwrap(); - if let Ok(msg) = serde_json::from_str(&s) { - Either::A(clients.notify(uaid, msg).then(move |result| { - let body = if result.is_ok() { - response.status(StatusCode::OK); - Body::empty() - } else { - response.status(StatusCode::NOT_FOUND); - Body::from("Client not available.") - }; - Ok(response.body(body).unwrap()) - })) - } else { - Either::B(ok(response - .status(hyper::StatusCode::BAD_REQUEST) - .body("Unable to decode body payload".into()) - .unwrap())) - } - })); - } - (&Method::PUT, "notif", uaid) => { - trace!("⏩ PUT /notif/ {}", uaid); - return Box::new(clients.check_storage(uaid).then(move |result| { - let body = if result.is_ok() { - response.status(StatusCode::OK); - Body::empty() - } else { - response.status(StatusCode::NOT_FOUND); - Body::from("Client not available.") - }; - Ok(response.body(body).unwrap()) - })); - } - (_, "push", _) | (_, "notif", _) => { - response.status(StatusCode::METHOD_NOT_ALLOWED); - } - _ => { - response.status(StatusCode::NOT_FOUND); - } - }; - Box::new(ok(response.body(Body::empty()).unwrap())) - } -} diff --git a/autopush/src/main.rs b/autopush/src/main.rs deleted file mode 100644 index 1b67fb90e..000000000 --- a/autopush/src/main.rs +++ /dev/null @@ -1,108 +0,0 @@ -extern crate slog; -#[macro_use] -extern crate slog_scope; -#[macro_use] -extern crate serde_derive; - -use std::time::Duration; -use std::{env, os::raw::c_int, thread}; - -use docopt::Docopt; - -use autopush_common::errors::{ApcError, ApcErrorKind, Result}; -use futures::{future::Either, Future, IntoFuture}; -use tokio_core::reactor::{Handle, Timeout}; - -mod client; -mod db; -mod http; -mod megaphone; -mod server; -mod settings; - -use crate::server::{AppState, AutopushServer}; -use crate::settings::Settings; - -pub type MyFuture = Box>; - -const USAGE: &str = " -Usage: autopush_rs [options] - -Options: - -h, --help Show this message. - --config-connection=CONFIGFILE Connection configuration file path. - --config-shared=CONFIGFILE Common configuration file path. -"; - -#[derive(Debug, Deserialize)] -struct Args { - flag_config_connection: Option, - flag_config_shared: Option, -} - -fn main() -> Result<()> { - env_logger::init(); - let signal = notify(&[signal_hook::consts::SIGINT, signal_hook::consts::SIGTERM])?; - let args: Args = Docopt::new(USAGE) - .and_then(|d| d.deserialize()) - .unwrap_or_else(|e| e.exit()); - let mut filenames = Vec::new(); - if let Some(shared_filename) = args.flag_config_shared { - filenames.push(shared_filename); - } - if let Some(config_filename) = args.flag_config_connection { - filenames.push(config_filename); - } - let settings = Settings::with_env_and_config_files(&filenames)?; - // Setup the AWS env var if it was set - if let Some(ref ddb_local) = settings.aws_ddb_endpoint { - env::set_var("AWS_LOCAL_DYNAMODB", ddb_local); - } - let app_state = AppState::from_settings(settings)?; - let server = AutopushServer::new(app_state); - server.start(); - signal.recv().unwrap(); - server - .stop() - .map_err(|_e| ApcErrorKind::GeneralError("Failed to shutdown properly".into()).into()) -} - -/// Create a new channel subscribed to the given signals -fn notify(signals: &[c_int]) -> Result> { - let (s, r) = crossbeam_channel::bounded(100); - let mut signals = signal_hook::iterator::Signals::new(signals)?; - thread::spawn(move || { - for signal in signals.forever() { - if s.send(signal).is_err() { - break; - } - } - }); - Ok(r) -} - -/// Convenience future to time out the resolution of `f` provided within the -/// duration provided. -/// -/// If the `dur` is `None` then the returned future is equivalent to `f` (no -/// timeout) and otherwise the returned future will cancel `f` and resolve to an -/// error if the `dur` timeout elapses before `f` resolves. -pub fn timeout(f: F, dur: Option, handle: &Handle) -> MyFuture -where - F: Future + 'static, - F::Error: Into, -{ - let dur = match dur { - Some(dur) => dur, - None => return Box::new(f.map_err(|e| e.into())), - }; - let timeout = Timeout::new(dur, handle).into_future().flatten(); - Box::new(f.select2(timeout).then(|res| match res { - Ok(Either::A((item, _timeout))) => Ok(item), - Err(Either::A((e, _timeout))) => Err(e.into()), - Ok(Either::B(((), _item))) => { - Err(ApcErrorKind::GeneralError("timed out".to_owned()).into()) - } - Err(Either::B((e, _item))) => Err(e.into()), - })) -} diff --git a/autopush/src/megaphone.rs b/autopush/src/megaphone.rs deleted file mode 100644 index fc6154e59..000000000 --- a/autopush/src/megaphone.rs +++ /dev/null @@ -1,406 +0,0 @@ -use std::collections::HashMap; -use std::time::Duration; - -use serde_derive::{Deserialize, Serialize}; - -use autopush_common::errors::{ApcErrorKind, Result}; - -use crate::server::protocol::BroadcastValue; - -// A Broadcast entry Key in a BroadcastRegistry -type BroadcastKey = u32; - -// Broadcasts a client is subscribed to and the last change seen -#[derive(Debug, Default)] -pub struct BroadcastSubs { - broadcast_list: Vec, - change_count: u32, -} - -#[derive(Debug)] -struct BroadcastRegistry { - lookup: HashMap, - table: Vec, -} - -// Return result of the first delta call for a client given a full list of broadcast id's and -// versions -#[derive(Debug)] -pub struct BroadcastSubsInit(pub BroadcastSubs, pub Vec); - -impl BroadcastRegistry { - fn new() -> BroadcastRegistry { - BroadcastRegistry { - lookup: HashMap::new(), - table: Vec::new(), - } - } - - // Add's a new broadcast to the lookup table, returns the existing key if the broadcast already - // exists - fn add_broadcast(&mut self, broadcast_id: String) -> BroadcastKey { - if let Some(v) = self.lookup.get(&broadcast_id) { - return *v; - } - let i = self.table.len() as BroadcastKey; - self.table.push(broadcast_id.clone()); - self.lookup.insert(broadcast_id, i); - i - } - - fn lookup_id(&self, key: BroadcastKey) -> Option { - self.table.get(key as usize).cloned() - } - - fn lookup_key(&self, broadcast_id: &str) -> Option { - self.lookup.get(broadcast_id).copied() - } -} - -// An individual broadcast and the current change count -#[derive(Debug)] -struct BroadcastRevision { - change_count: u32, - broadcast: BroadcastKey, -} - -// A provided Broadcast/Version used for `BroadcastSubsInit`, client comparisons, and outgoing -// deltas -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] -pub struct Broadcast { - broadcast_id: String, - version: String, -} - -impl Broadcast { - /// Errors out a broadcast for broadcasts that weren't found - pub fn error(self) -> Broadcast { - Broadcast { - broadcast_id: self.broadcast_id, - version: "Broadcast not found".to_string(), - } - } -} - -// Handy From impls for common hashmap to/from conversions -impl From<(String, String)> for Broadcast { - fn from(val: (String, String)) -> Broadcast { - Broadcast { - broadcast_id: val.0, - version: val.1, - } - } -} - -impl From for (String, BroadcastValue) { - fn from(bcast: Broadcast) -> (String, BroadcastValue) { - (bcast.broadcast_id, BroadcastValue::Value(bcast.version)) - } -} - -impl Broadcast { - pub fn from_hashmap(val: HashMap) -> Vec { - val.into_iter().map(|v| v.into()).collect() - } - - pub fn vec_into_hashmap(broadcasts: Vec) -> HashMap { - broadcasts.into_iter().map(|v| v.into()).collect() - } -} - -// BroadcastChangeTracker tracks the broadcasts, their change_count, and the broadcast lookup -// registry -#[derive(Debug)] -pub struct BroadcastChangeTracker { - broadcast_list: Vec, - broadcast_registry: BroadcastRegistry, - broadcast_versions: HashMap, - change_count: u32, -} - -#[derive(Deserialize)] -pub struct MegaphoneAPIResponse { - pub broadcasts: HashMap, -} - -impl BroadcastChangeTracker { - /// Creates a new `BroadcastChangeTracker` initialized with the provided `broadcasts`. - pub fn new(broadcasts: Vec) -> BroadcastChangeTracker { - let mut tracker = BroadcastChangeTracker { - broadcast_list: Vec::new(), - broadcast_registry: BroadcastRegistry::new(), - broadcast_versions: HashMap::new(), - change_count: 0, - }; - for srv in broadcasts { - let key = tracker.broadcast_registry.add_broadcast(srv.broadcast_id); - tracker.broadcast_versions.insert(key, srv.version); - } - tracker - } - - /// Creates a new `BroadcastChangeTracker` initialized from a Megaphone API server version set - /// as provided as the fetch URL. - /// - /// This method uses a synchronous HTTP call. - pub fn with_api_broadcasts(url: &str, token: &str) -> reqwest::Result { - let client = reqwest::Client::builder() - .timeout(Duration::from_secs(1)) - .build()?; - let MegaphoneAPIResponse { broadcasts } = client - .get(url) - .header("Authorization", token.to_string()) - .send()? - .error_for_status()? - .json()?; - let broadcasts = Broadcast::from_hashmap(broadcasts); - Ok(BroadcastChangeTracker::new(broadcasts)) - } - - /// Add a new broadcast to the BroadcastChangeTracker, triggering a change_count increase. - /// Note: If the broadcast already exists, it will be updated instead. - pub fn add_broadcast(&mut self, broadcast: Broadcast) -> u32 { - if let Ok(change_count) = self.update_broadcast(broadcast.clone()) { - trace!("📢 returning change count {}", &change_count); - return change_count; - } - self.change_count += 1; - let key = self - .broadcast_registry - .add_broadcast(broadcast.broadcast_id); - self.broadcast_versions.insert(key, broadcast.version); - self.broadcast_list.push(BroadcastRevision { - change_count: self.change_count, - broadcast: key, - }); - self.change_count - } - - /// Update a `broadcast` to a new revision, triggering a change_count increase. - /// - /// Returns an error if the `broadcast` was never initialized/added. - pub fn update_broadcast(&mut self, broadcast: Broadcast) -> Result { - let b_id = broadcast.broadcast_id.clone(); - let old_count = self.change_count; - let key = self - .broadcast_registry - .lookup_key(&broadcast.broadcast_id) - .ok_or_else(|| ApcErrorKind::BroadcastError("Broadcast not found".into()))?; - - if let Some(ver) = self.broadcast_versions.get_mut(&key) { - if *ver == broadcast.version { - return Ok(self.change_count); - } - *ver = broadcast.version; - } else { - trace!("📢 Not found: {}", &b_id); - return Err(ApcErrorKind::BroadcastError("Broadcast not found".into()).into()); - } - - trace!("📢 New version of {}", &b_id); - // Check to see if this broadcast has been updated since initialization - let bcast_index = self - .broadcast_list - .iter() - .enumerate() - .filter_map(|(i, bcast)| { - if bcast.broadcast == key { - Some(i) - } else { - None - } - }) - .next(); - self.change_count += 1; - if let Some(bcast_index) = bcast_index { - trace!("📢 {} index: {}", &b_id, &bcast_index); - let mut bcast = self.broadcast_list.remove(bcast_index); - bcast.change_count = self.change_count; - self.broadcast_list.push(bcast); - } else { - trace!("📢 adding broadcast list for {}", &b_id); - self.broadcast_list.push(BroadcastRevision { - change_count: self.change_count, - broadcast: key, - }) - } - if old_count != self.change_count { - trace!("📢 New Change available"); - } - Ok(self.change_count) - } - - /// Returns the new broadcast versions since the provided `client_set`. - pub fn change_count_delta(&self, client_set: &mut BroadcastSubs) -> Option> { - if self.change_count <= client_set.change_count { - return None; - } - let mut bcast_delta = Vec::new(); - for bcast in self.broadcast_list.iter().rev() { - if bcast.change_count <= client_set.change_count { - break; - } - if !client_set.broadcast_list.contains(&bcast.broadcast) { - continue; - } - if let Some(ver) = self.broadcast_versions.get(&bcast.broadcast) { - if let Some(bcast_id) = self.broadcast_registry.lookup_id(bcast.broadcast) { - bcast_delta.push(Broadcast { - broadcast_id: bcast_id, - version: (*ver).clone(), - }); - } - } - } - client_set.change_count = self.change_count; - if bcast_delta.is_empty() { - None - } else { - Some(bcast_delta) - } - } - - /// Returns a delta for `broadcasts` that are out of date with the latest version and a - /// the collection of broadcast subscriptions. - pub fn broadcast_delta(&self, broadcasts: &[Broadcast]) -> BroadcastSubsInit { - let mut bcast_list = Vec::new(); - let mut bcast_delta = Vec::new(); - for bcast in broadcasts.iter() { - if let Some(bcast_key) = self.broadcast_registry.lookup_key(&bcast.broadcast_id) { - if let Some(ver) = self.broadcast_versions.get(&bcast_key) { - if *ver != bcast.version { - bcast_delta.push(Broadcast { - broadcast_id: bcast.broadcast_id.clone(), - version: (*ver).clone(), - }); - } - } - bcast_list.push(bcast_key); - } - } - BroadcastSubsInit( - BroadcastSubs { - broadcast_list: bcast_list, - change_count: self.change_count, - }, - bcast_delta, - ) - } - - /// Update a `BroadcastSubs` to account for new broadcasts. - /// - /// Returns broadcasts that have changed. - pub fn subscribe_to_broadcasts( - &self, - broadcast_subs: &mut BroadcastSubs, - broadcasts: &[Broadcast], - ) -> Option> { - let mut bcast_delta = self.change_count_delta(broadcast_subs).unwrap_or_default(); - for bcast in broadcasts.iter() { - if let Some(bcast_key) = self.broadcast_registry.lookup_key(&bcast.broadcast_id) { - if let Some(ver) = self.broadcast_versions.get(&bcast_key) { - if *ver != bcast.version { - bcast_delta.push(Broadcast { - broadcast_id: bcast.broadcast_id.clone(), - version: (*ver).clone(), - }); - } - } - broadcast_subs.broadcast_list.push(bcast_key) - } - } - if bcast_delta.is_empty() { - None - } else { - Some(bcast_delta) - } - } - - /// Check a broadcast list and return unknown broadcast id's with their appropriate error - pub fn missing_broadcasts(&self, broadcasts: &[Broadcast]) -> Vec { - broadcasts - .iter() - .filter_map(|b| { - if self - .broadcast_registry - .lookup_key(&b.broadcast_id) - .is_none() - { - Some(b.clone().error()) - } else { - None - } - }) - .collect() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - fn make_broadcast_base() -> Vec { - vec![ - Broadcast { - broadcast_id: String::from("bcasta"), - version: String::from("rev1"), - }, - Broadcast { - broadcast_id: String::from("bcastb"), - version: String::from("revalha"), - }, - ] - } - - #[test] - fn test_broadcast_change_tracker() { - let broadcasts = make_broadcast_base(); - let desired_broadcasts = broadcasts.clone(); - let mut tracker = BroadcastChangeTracker::new(broadcasts); - let BroadcastSubsInit(mut broadcast_subs, delta) = - tracker.broadcast_delta(&desired_broadcasts); - assert_eq!(delta.len(), 0); - assert_eq!(broadcast_subs.change_count, 0); - assert_eq!(broadcast_subs.broadcast_list.len(), 2); - - tracker - .update_broadcast(Broadcast { - broadcast_id: String::from("bcasta"), - version: String::from("rev2"), - }) - .ok(); - let delta = tracker.change_count_delta(&mut broadcast_subs); - assert!(delta.is_some()); - let delta = delta.unwrap(); - assert_eq!(delta.len(), 1); - } - - #[test] - fn test_broadcast_change_handles_new_broadcasts() { - let broadcasts = make_broadcast_base(); - let desired_broadcasts = broadcasts.clone(); - let mut tracker = BroadcastChangeTracker::new(broadcasts); - let BroadcastSubsInit(mut broadcast_subs, _) = tracker.broadcast_delta(&desired_broadcasts); - - tracker.add_broadcast(Broadcast { - broadcast_id: String::from("bcastc"), - version: String::from("revmega"), - }); - let delta = tracker.change_count_delta(&mut broadcast_subs); - assert!(delta.is_none()); - - let delta = tracker - .subscribe_to_broadcasts( - &mut broadcast_subs, - &[Broadcast { - broadcast_id: String::from("bcastc"), - version: String::from("revision_alpha"), - }], - ) - .unwrap(); - assert_eq!(delta.len(), 1); - assert_eq!(delta[0].version, String::from("revmega")); - assert_eq!(broadcast_subs.change_count, 1); - assert_eq!(tracker.broadcast_list.len(), 1); - } -} diff --git a/autopush/src/server/dispatch.rs b/autopush/src/server/dispatch.rs deleted file mode 100644 index bc4cc31c3..000000000 --- a/autopush/src/server/dispatch.rs +++ /dev/null @@ -1,103 +0,0 @@ -//! A future to figure out where we're going to dispatch a TCP socket. -//! -//! When the websocket server receives a TCP connection it may be a websocket -//! request or a general HTTP request. Right now the websocket library we're -//! using, Tungstenite, doesn't have built-in support for handling this -//! situation, so we roll our own. -//! -//! The general idea here is that we're going to read just enough data off the -//! socket to parse an initial HTTP request. This request will be parsed by the -//! `httparse` crate. Once we've got a request we take a look at the headers and -//! if we find a websocket upgrade we classify it as a websocket request. If -//! it's otherwise a `/status` request, we return that we're supposed to get the -//! status, and finally after all that if it doesn't match we return an error. -//! -//! This is basically a "poor man's" HTTP router and while it should be good -//! enough for now it should probably be extended/refactored in the future! -//! -//! Note that also to implement this we buffer the request that we read in -//! memory and then attach that to a socket once we've classified what kind of -//! socket this is. That's done to replay the bytes we read again for the -//! tungstenite library, which'll duplicate header parsing but we don't have -//! many other options for now! - -use bytes::BytesMut; -use futures::{try_ready, Future, Poll}; -use tokio_core::net::TcpStream; -use tokio_io::AsyncRead; - -use autopush_common::errors::{ApcError, ApcErrorKind}; - -use crate::server::tls::MaybeTlsStream; -use crate::server::webpush_io::WebpushIo; - -pub struct Dispatch { - socket: Option>, - data: BytesMut, -} - -pub enum RequestType { - Websocket, - Status, - LogCheck, - LBHeartBeat, - Version, -} - -impl Dispatch { - pub fn new(socket: MaybeTlsStream) -> Self { - Self { - socket: Some(socket), - data: BytesMut::new(), - } - } -} - -impl Future for Dispatch { - type Item = (WebpushIo, RequestType); - type Error = ApcError; - - fn poll(&mut self) -> Poll<(WebpushIo, RequestType), ApcError> { - loop { - if self.data.len() == self.data.capacity() { - self.data.reserve(16); // get some extra space - } - if try_ready!(self.socket.as_mut().unwrap().read_buf(&mut self.data)) == 0 { - return Err(ApcErrorKind::GeneralError("early eof".into()).into()); - } - let ty = { - let mut headers = [httparse::EMPTY_HEADER; 32]; - let mut req = httparse::Request::new(&mut headers); - match req.parse(&self.data)? { - httparse::Status::Complete(_) => {} - httparse::Status::Partial => continue, - } - - if req.headers.iter().any(|h| h.name == "Upgrade") { - RequestType::Websocket - } else { - match req.path { - Some(path) if path.starts_with("/status") || path == "/__heartbeat__" => { - RequestType::Status - } - Some("/__lbheartbeat__") => RequestType::LBHeartBeat, - Some("/__version__") => RequestType::Version, - // legacy: - Some(path) if path.starts_with("/v1/err/crit") => RequestType::LogCheck, - // standardized: - Some("/_error") => RequestType::LogCheck, - _ => { - debug!("unknown http request {:?}", req); - return Err( - ApcErrorKind::GeneralError("unknown http request".into()).into() - ); - } - } - } - }; - - let tcp = self.socket.take().unwrap(); - return Ok((WebpushIo::new(tcp, self.data.take()), ty).into()); - } - } -} diff --git a/autopush/src/server/metrics.rs b/autopush/src/server/metrics.rs deleted file mode 100644 index 9d412f215..000000000 --- a/autopush/src/server/metrics.rs +++ /dev/null @@ -1,27 +0,0 @@ -//! Metrics tie-ins - -use std::net::UdpSocket; - -use cadence::{BufferedUdpMetricSink, NopMetricSink, QueuingMetricSink, StatsdClient}; - -use autopush_common::errors::Result; - -use crate::server::AppState; - -/// Create a cadence StatsdClient from the given options -pub fn metrics_from_state(state: &AppState) -> Result { - let builder = if let Some(statsd_host) = state.statsd_host.as_ref() { - let socket = UdpSocket::bind("0.0.0.0:0")?; - socket.set_nonblocking(true)?; - - let host = (statsd_host.as_str(), state.statsd_port); - let udp_sink = BufferedUdpMetricSink::from(host, socket)?; - let sink = QueuingMetricSink::from(udp_sink); - StatsdClient::builder("autopush", sink) - } else { - StatsdClient::builder("autopush", NopMetricSink) - }; - Ok(builder - .with_error_handler(|err| error!("Metrics send error: {}", err)) - .build()) -} diff --git a/autopush/src/server/middleware/mod.rs b/autopush/src/server/middleware/mod.rs deleted file mode 100644 index fd1010dc1..000000000 --- a/autopush/src/server/middleware/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -//! Actix middleware - -pub mod sentry2; diff --git a/autopush/src/server/middleware/sentry2.rs b/autopush/src/server/middleware/sentry2.rs deleted file mode 100644 index 8fd44cda7..000000000 --- a/autopush/src/server/middleware/sentry2.rs +++ /dev/null @@ -1,301 +0,0 @@ -/// TODO: Eventually move this to autopush-common, as well as handle a number of the -/// issues with modern sentry: -/// e.g. -/// * resolve bytes limit -/// * handle pulling `extra` data -use std::borrow::Cow; -use std::pin::Pin; -use std::sync::Arc; - -use actix_web::dev::{Service, ServiceRequest, ServiceResponse, Transform}; -use actix_web::http::StatusCode; -use actix_web::Error; -use futures_util::future::{ok, Future, Ready}; -use futures_util::FutureExt; - -use sentry_core::protocol::{self, ClientSdkPackage, Event, Request}; -use sentry_core::{Hub, SentryFutureExt}; - -// Taken from sentry-actix - -/// Reports certain failures to Sentry. -#[derive(Clone)] -pub struct Sentry { - hub: Option>, - emit_header: bool, - capture_server_errors: bool, - start_transaction: bool, -} - -impl Default for Sentry { - fn default() -> Self { - Sentry { - hub: None, - emit_header: false, - capture_server_errors: true, - start_transaction: false, - } - } -} - -impl Sentry { - #[allow(dead_code)] - /// Creates a new sentry middleware which starts a new performance monitoring transaction for each request. - pub fn with_transaction() -> Sentry { - Sentry { - start_transaction: true, - ..Sentry::default() - } - } -} - -impl Transform for Sentry -where - S: Service, Error = Error>, - S::Future: 'static, -{ - type Response = ServiceResponse; - type Error = Error; - type Transform = SentryMiddleware; - type InitError = (); - type Future = Ready>; - - fn new_transform(&self, service: S) -> Self::Future { - ok(SentryMiddleware { - service, - inner: self.clone(), - }) - } -} - -/// The middleware for individual services. -pub struct SentryMiddleware { - service: S, - inner: Sentry, -} - -impl Service for SentryMiddleware -where - S: Service, Error = Error>, - S::Future: 'static, -{ - type Response = ServiceResponse; - type Error = Error; - type Future = Pin>>>; - - fn poll_ready( - &self, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - self.service.poll_ready(cx) - } - - fn call(&self, req: ServiceRequest) -> Self::Future { - let inner = self.inner.clone(); - // XXX Sentry Modification - // crate::server is going to be specific to the given app, so we can't put this in common. - let state = req - .app_data::>() - .cloned(); - // XXX - let hub = Arc::new(Hub::new_from_top( - inner.hub.clone().unwrap_or_else(Hub::main), - )); - let client = hub.client(); - let track_sessions = client.as_ref().map_or(false, |client| { - let options = client.options(); - options.auto_session_tracking - && options.session_mode == sentry_core::SessionMode::Request - }); - if track_sessions { - hub.start_session(); - } - let with_pii = client - .as_ref() - .map_or(false, |client| client.options().send_default_pii); - - let (mut tx, sentry_req) = sentry_request_from_http(&req, with_pii); - - let transaction = if inner.start_transaction { - let name = std::mem::take(&mut tx) - .unwrap_or_else(|| format!("{} {}", req.method(), req.uri())); - - let headers = req.headers().iter().flat_map(|(header, value)| { - value.to_str().ok().map(|value| (header.as_str(), value)) - }); - - // TODO: Break apart user agent? - // XXX Sentry modification - /* - if let Some(ua_val) = req.headers().get("UserAgent") { - if let Ok(ua_str) = ua_val.to_str() { - let ua_info = crate::util::user_agent::UserAgentInfo::from(ua_val.to_str().unwrap_or_default()); - sentry::configure_scope(|scope| { - scope::add_tag("browser_version", ua_info.browser_version); - }) - } - } - */ - let ctx = sentry_core::TransactionContext::continue_from_headers( - &name, - "http.server", - headers, - ); - Some(hub.start_transaction(ctx)) - } else { - None - }; - - let parent_span = hub.configure_scope(|scope| { - let parent_span = scope.get_span(); - if let Some(transaction) = transaction.as_ref() { - scope.set_span(Some(transaction.clone().into())); - } else { - scope.set_transaction(tx.as_deref()); - } - scope.add_event_processor(move |event| Some(process_event(event, &sentry_req))); - parent_span - }); - - let fut = self.service.call(req).bind_hub(hub.clone()); - - async move { - // Service errors - let mut res: Self::Response = match fut.await { - Ok(res) => res, - Err(e) => { - // XXX Sentry Modification - if let Some(api_err) = e.as_error::() { - // if it's not reportable, , and we have access to the metrics, record it as a metric. - if !api_err.kind.is_sentry_event() { - // XXX - Modified sentry - // The error (e.g. VapidErrorKind::InvalidKey(String)) might be too cardinal, - // but we may need that information to debug a production issue. We can - // add an info here, temporarily turn on info level debugging on a given server, - // capture it, and then turn it off before we run out of money. - info!("Sending error to metrics: {:?}", api_err.kind); - if let Some(state) = state { - if let Some(label) = api_err.kind.metric_label() { - state.metrics.incr(&format!("api_error.{}", label)).is_ok(); - }; - } - debug!("Not reporting error (service error): {:?}", e); - return Err(e); - } - } - - if inner.capture_server_errors { - hub.capture_error(&e); - } - - if let Some(transaction) = transaction { - if transaction.get_status().is_none() { - let status = protocol::SpanStatus::UnknownError; - transaction.set_status(status); - } - transaction.finish(); - hub.configure_scope(|scope| scope.set_span(parent_span)); - } - return Err(e); - } - }; - - // Response errors - if inner.capture_server_errors && res.response().status().is_server_error() { - if let Some(e) = res.response().error() { - let event_id = hub.capture_error(e); - - if inner.emit_header { - res.response_mut().headers_mut().insert( - "x-sentry-event".parse().unwrap(), - event_id.simple().to_string().parse().unwrap(), - ); - } - } - } - - if let Some(transaction) = transaction { - if transaction.get_status().is_none() { - let status = map_status(res.status()); - transaction.set_status(status); - } - transaction.finish(); - hub.configure_scope(|scope| scope.set_span(parent_span)); - } - - Ok(res) - } - .boxed_local() - } -} - -fn map_status(status: StatusCode) -> protocol::SpanStatus { - match status { - StatusCode::UNAUTHORIZED => protocol::SpanStatus::Unauthenticated, - StatusCode::FORBIDDEN => protocol::SpanStatus::PermissionDenied, - StatusCode::NOT_FOUND => protocol::SpanStatus::NotFound, - StatusCode::TOO_MANY_REQUESTS => protocol::SpanStatus::ResourceExhausted, - status if status.is_client_error() => protocol::SpanStatus::InvalidArgument, - StatusCode::NOT_IMPLEMENTED => protocol::SpanStatus::Unimplemented, - StatusCode::SERVICE_UNAVAILABLE => protocol::SpanStatus::Unavailable, - status if status.is_server_error() => protocol::SpanStatus::InternalError, - StatusCode::CONFLICT => protocol::SpanStatus::AlreadyExists, - status if status.is_success() => protocol::SpanStatus::Ok, - _ => protocol::SpanStatus::UnknownError, - } -} - -/// Build a Sentry request struct from the HTTP request -fn sentry_request_from_http(request: &ServiceRequest, with_pii: bool) -> (Option, Request) { - let transaction = if let Some(name) = request.match_name() { - Some(String::from(name)) - } else { - request.match_pattern() - }; - - let mut sentry_req = Request { - url: format!( - "{}://{}{}", - request.connection_info().scheme(), - request.connection_info().host(), - request.uri() - ) - .parse() - .ok(), - method: Some(request.method().to_string()), - headers: request - .headers() - .iter() - .map(|(k, v)| (k.to_string(), v.to_str().unwrap_or_default().to_string())) - .collect(), - ..Default::default() - }; - - // If PII is enabled, include the remote address - if with_pii { - if let Some(remote) = request.connection_info().peer_addr() { - sentry_req.env.insert("REMOTE_ADDR".into(), remote.into()); - } - }; - - (transaction, sentry_req) -} - -/// Add request data to a Sentry event -fn process_event(mut event: Event<'static>, request: &Request) -> Event<'static> { - // Request - if event.request.is_none() { - event.request = Some(request.clone()); - } - - // SDK - if let Some(sdk) = event.sdk.take() { - let mut sdk = sdk.into_owned(); - sdk.packages.push(ClientSdkPackage { - name: "sentry-actix".into(), - version: env!("CARGO_PKG_VERSION").into(), - }); - event.sdk = Some(Cow::Owned(sdk)); - } - event -} diff --git a/autopush/src/server/mod.rs b/autopush/src/server/mod.rs deleted file mode 100644 index 975793ec0..000000000 --- a/autopush/src/server/mod.rs +++ /dev/null @@ -1,1009 +0,0 @@ -use std::cell::{Cell, RefCell}; -use std::collections::HashMap; -use std::env; -use std::io; -use std::net::SocketAddr; -use std::panic; -use std::path::PathBuf; -use std::rc::Rc; -use std::sync::Arc; -use std::thread; -use std::time::{Duration, Instant}; - -use cadence::StatsdClient; -use chrono::Utc; -use fernet::{Fernet, MultiFernet}; -use futures::sync::oneshot; -use futures::{task, try_ready}; -use futures::{Async, AsyncSink, Future, Poll, Sink, StartSend, Stream}; -use hyper::{server::conn::Http, StatusCode}; -use openssl::ssl::SslAcceptor; -use sentry::{self, capture_message}; -use serde_json::{self, json}; -use tokio_core::net::TcpListener; -use tokio_core::reactor::{Core, Handle, Timeout}; -use tokio_tungstenite::{accept_hdr_async, WebSocketStream}; -use tungstenite::handshake::server::Request; -use tungstenite::{self, Message}; - -use autopush_common::errors::{ApcError, ApcErrorKind, Result}; -use autopush_common::logging; -use autopush_common::notification::Notification; - -use crate::client::Client; -use crate::db::DynamoStorage; -use crate::megaphone::{ - Broadcast, BroadcastChangeTracker, BroadcastSubs, BroadcastSubsInit, MegaphoneAPIResponse, -}; -use crate::server::dispatch::{Dispatch, RequestType}; -use crate::server::metrics::metrics_from_state; -use crate::server::protocol::{BroadcastValue, ClientMessage, ServerMessage}; -use crate::server::rc::RcObject; -use crate::server::registry::ClientRegistry; -use crate::server::webpush_io::WebpushIo; -use crate::settings::Settings; -use crate::{http, timeout, MyFuture}; - -mod dispatch; -mod metrics; -pub mod protocol; -mod rc; -pub mod registry; -mod tls; -mod webpush_io; - -const UAHEADER: &str = "User-Agent"; - -fn ito_dur(seconds: u32) -> Option { - if seconds == 0 { - None - } else { - Some(Duration::new(seconds.into(), 0)) - } -} - -fn fto_dur(seconds: f64) -> Option { - if seconds == 0.0 { - None - } else { - Some(Duration::new( - seconds as u64, - (seconds.fract() * 1_000_000_000.0) as u32, - )) - } -} - -// a signaler to shut down a tokio Core and its associated thread -struct ShutdownHandle(oneshot::Sender<()>, thread::JoinHandle<()>); - -pub struct AutopushServer { - app_state: Arc, - shutdown_handles: Cell>>, - _guard: Option, -} - -impl AutopushServer { - pub fn new(app_state: AppState) -> Self { - let guard = if let Ok(dsn) = env::var("SENTRY_DSN") { - let guard = sentry::init(( - dsn, - sentry::ClientOptions { - release: sentry::release_name!(), - ..autopush_common::sentry::client_options() - }, - )); - /* - Sentry 0.29+ automatically enables `PanicIntegration`. - see https://docs.rs/sentry-panic/latest/sentry_panic/ - */ - Some(guard) - } else { - None - }; - Self { - app_state: Arc::new(app_state), - shutdown_handles: Cell::new(None), - _guard: guard, - } - } - - pub fn start(&self) { - logging::init_logging( - !self.app_state.human_logs, - env!("CARGO_PKG_NAME"), - env!("CARGO_PKG_VERSION"), - ) - .expect("init_logging failed"); - let handles = Server::start(&self.app_state).expect("failed to start server"); - self.shutdown_handles.set(Some(handles)); - } - - /// Blocks execution of the calling thread until the helper thread with the - /// tokio reactor has exited. - pub fn stop(&self) -> Result<()> { - let mut result = Ok(()); - if let Some(shutdown_handles) = self.shutdown_handles.take() { - for ShutdownHandle(tx, thread) in shutdown_handles { - let _ = tx.send(()); - if let Err(err) = thread.join() { - result = Err(ApcErrorKind::Thread(err).into()); - } - } - } - logging::reset_logging(); - result - } -} - -pub struct AppState { - pub router_port: u16, - pub port: u16, - pub fernet: MultiFernet, - pub ssl_key: Option, - pub ssl_cert: Option, - pub ssl_dh_param: Option, - pub open_handshake_timeout: Option, - pub auto_ping_interval: Duration, - pub auto_ping_timeout: Duration, - pub max_connections: Option, - pub close_handshake_timeout: Option, - pub message_table_name: String, - pub router_table_name: String, - pub router_url: String, - pub endpoint_url: String, - pub statsd_host: Option, - pub statsd_port: u16, - pub megaphone_api_url: Option, - pub megaphone_api_token: Option, - pub megaphone_poll_interval: Duration, - pub human_logs: bool, - pub msg_limit: u32, -} - -impl AppState { - pub fn from_settings(settings: Settings) -> Result { - let crypto_key = &settings.crypto_key; - if !(crypto_key.starts_with('[') && crypto_key.ends_with(']')) { - return Err( - ApcErrorKind::GeneralError("Missing AUTOPUSH__CRYPTO_KEY set".into()).into(), - ); - } - let crypto_key = &crypto_key[1..crypto_key.len() - 1]; - debug!("Fernet keys: {:?}", &crypto_key); - let fernets: Vec = crypto_key - .split(',') - .map(|s| s.trim().to_string()) - .map(|key| { - Fernet::new(&key) - .unwrap_or_else(|| panic!("Invalid AUTOPUSH__CRYPTO_KEY: {:?}", key)) - }) - .collect(); - let fernet = MultiFernet::new(fernets); - - let router_url = settings.router_url(); - let endpoint_url = settings.endpoint_url(); - Ok(Self { - port: settings.port, - fernet, - router_port: settings.router_port, - statsd_host: if settings.statsd_host.is_empty() { - None - } else { - Some(settings.statsd_host) - }, - statsd_port: settings.statsd_port, - message_table_name: settings.message_tablename, - router_table_name: settings.router_tablename, - router_url, - endpoint_url, - ssl_key: settings.router_ssl_key.map(PathBuf::from), - ssl_cert: settings.router_ssl_cert.map(PathBuf::from), - ssl_dh_param: settings.router_ssl_dh_param.map(PathBuf::from), - auto_ping_interval: fto_dur(settings.auto_ping_interval) - .expect("auto ping interval cannot be 0"), - auto_ping_timeout: fto_dur(settings.auto_ping_timeout) - .expect("auto ping timeout cannot be 0"), - close_handshake_timeout: ito_dur(settings.close_handshake_timeout), - max_connections: if settings.max_connections == 0 { - None - } else { - Some(settings.max_connections) - }, - open_handshake_timeout: ito_dur(5), - megaphone_api_url: settings.megaphone_api_url, - megaphone_api_token: settings.megaphone_api_token, - megaphone_poll_interval: ito_dur(settings.megaphone_poll_interval) - .expect("megaphone poll interval cannot be 0"), - human_logs: settings.human_logs, - msg_limit: settings.msg_limit, - }) - } -} - -/// The main AutoConnect server -pub struct Server { - /// List of known Clients, mapped by UAID, for this node. - pub clients: Arc, - /// Handle to the Broadcast change monitor - broadcaster: RefCell, - /// Handle to the current Database Client - pub ddb: DynamoStorage, - /// Count of open cells - open_connections: Cell, - /// OBSOLETE - tls_acceptor: Option, - /// Application Communal Data - pub app_state: Arc, - /// tokio reactor core handle - pub handle: Handle, - /// analytics reporting - pub metrics: Arc, -} - -impl Server { - /// Creates a new server handle used by Megaphone and other services. - /// - /// This will spawn a new server with the [`state`](autoconnect-settings::AppState) specified, spinning up a - /// separate thread for the tokio reactor. The returned ShutdownHandles can - /// be used to interact with it (e.g. shut it down). - fn start(app_state: &Arc) -> Result> { - let mut shutdown_handles = vec![]; - - let (inittx, initrx) = oneshot::channel(); - let (donetx, donerx) = oneshot::channel(); - - let state = app_state.clone(); - let thread = thread::spawn(move || { - let (srv, mut core) = match Server::new(&state) { - Ok(core) => { - inittx.send(None).unwrap(); - core - } - Err(e) => return inittx.send(Some(e)).unwrap(), - }; - - // Internal HTTP server setup - { - let handle = core.handle(); - let addr = SocketAddr::from(([0, 0, 0, 0], srv.app_state.router_port)); - debug!("Starting router: {:?}", addr); - let push_listener = TcpListener::bind(&addr, &handle).unwrap(); - let http = Http::new(); - let push_srv = push_listener.incoming().for_each(move |(socket, _)| { - handle.spawn( - http.serve_connection(socket, http::Push(Arc::clone(&srv.clients))) - .map(|_| ()) - .map_err(|e| debug!("Http server connection error: {}", e)), - ); - Ok(()) - }); - core.handle().spawn(push_srv.then(|res| { - debug!("Http server {:?}", res); - Ok(()) - })); - } - core.run(donerx).expect("Main Core run error"); - }); - - match initrx.wait() { - Ok(Some(e)) => Err(e), - Ok(None) => { - shutdown_handles.push(ShutdownHandle(donetx, thread)); - Ok(shutdown_handles) - } - Err(_) => panic::resume_unwind(thread.join().unwrap_err()), - } - } - - #[allow(clippy::single_char_add_str)] - fn new(app_state: &Arc) -> Result<(Rc, Core)> { - let core = Core::new()?; - let broadcaster = if let Some(ref megaphone_url) = app_state.megaphone_api_url { - let megaphone_token = app_state - .megaphone_api_token - .as_ref() - .expect("Megaphone API requires a Megaphone API Token to be set"); - BroadcastChangeTracker::with_api_broadcasts(megaphone_url, megaphone_token) - .expect("Unable to initialize megaphone with provided URL") - } else { - BroadcastChangeTracker::new(Vec::new()) - }; - let metrics = Arc::new(metrics_from_state(app_state)?); - - let srv = Rc::new(Server { - app_state: app_state.clone(), - broadcaster: RefCell::new(broadcaster), - ddb: DynamoStorage::from_settings( - &app_state.message_table_name, - &app_state.router_table_name, - metrics.clone(), - )?, - clients: Arc::new(ClientRegistry::default()), - open_connections: Cell::new(0), - handle: core.handle(), - tls_acceptor: tls::configure(app_state), - metrics, - }); - let addr = SocketAddr::from(([0, 0, 0, 0], srv.app_state.port)); - debug!("Starting server: {:?}", &addr); - let ws_listener = TcpListener::bind(&addr, &srv.handle)?; - - let handle = core.handle(); - let srv2 = srv.clone(); - let ws_srv = - ws_listener - .incoming() - .map_err(ApcErrorKind::from) - .for_each(move |(socket, addr)| { - // Make sure we're not handling too many clients before we start the - // websocket handshake. - let max = srv.app_state.max_connections.unwrap_or(u32::max_value()); - if srv.open_connections.get() >= max { - info!( - "dropping {} as we already have too many open connections", - addr - ); - return Ok(()); - } - srv.open_connections.set(srv.open_connections.get() + 1); - - // Process TLS (if configured) - let socket = tls::accept(&srv, socket); - - // Figure out if this is a websocket or a `/status` request, - let request = socket.and_then(Dispatch::new); - - // Time out both the TLS accept (if any) along with the dispatch - // to figure out where we're going. - let request = timeout(request, srv.app_state.open_handshake_timeout, &handle); - let srv2 = srv.clone(); - let handle2 = handle.clone(); - - // Setup oneshot to extract the user-agent from the header callback - let (uatx, uarx) = oneshot::channel(); - let callback = |req: &Request| { - if let Some(value) = req.headers.find_first(UAHEADER) { - let mut valstr = String::new(); - for c in value.iter() { - let c = *c as char; - valstr.push(c); - } - debug!("Found user-agent string"; "user-agent" => valstr.as_str()); - uatx.send(valstr).unwrap(); - } - debug!("No agent string found"); - Ok(None) - }; - - let client = request.and_then(move |(socket, request)| -> MyFuture<_> { - match request { - RequestType::Status => write_status(socket), - RequestType::LBHeartBeat => { - write_json(socket, StatusCode::OK, serde_json::Value::from("")) - } - RequestType::Version => write_version_file(socket), - RequestType::LogCheck => write_log_check(socket), - RequestType::Websocket => { - // Perform the websocket handshake on each - // connection, but don't let it take too long. - let ws = accept_hdr_async(socket, callback).map_err(|_e| { - ApcErrorKind::GeneralError("failed to accept client".into()) - }); - let ws = - timeout(ws, srv2.app_state.open_handshake_timeout, &handle2); - - // Once the handshake is done we'll start the main - // communication with the client, managing pings - // here and deferring to `Client` to start driving - // the internal state machine. - Box::new( - ws.and_then(move |ws| { - trace!("🏓 starting ping manager"); - PingManager::new(&srv2, ws, uarx).map_err(|_e| { - ApcErrorKind::GeneralError( - "failed to make ping handler".into(), - ) - .into() - }) - }) - .flatten(), - ) - } - } - }); - - let srv = srv.clone(); - handle.spawn(client.then(move |res| { - srv.open_connections.set(srv.open_connections.get() - 1); - if let Err(e) = res { - // No need to log ConnectionClosed (denotes the - // connection closed normally) - if !matches!( - e.kind, - ApcErrorKind::Ws(tungstenite::error::Error::ConnectionClosed) - ) { - debug!("🤫 {}: {}", addr, e.to_string()); - } - } - Ok(()) - })); - - Ok(()) - }); - - if let Some(ref megaphone_url) = app_state.megaphone_api_url { - let megaphone_token = app_state - .megaphone_api_token - .as_ref() - .expect("Megaphone API requires a Megaphone API Token to be set"); - let fut = MegaphoneUpdater::new( - megaphone_url, - megaphone_token, - app_state.megaphone_poll_interval, - &srv2, - ) - .expect("Unable to start megaphone updater"); - core.handle().spawn(fut.then(|res| { - trace!("📢 megaphone result: {:?}", res.map(drop)); - Ok(()) - })); - } - core.handle().spawn(ws_srv.then(|res| { - debug!("srv res: {:?}", res.map(drop)); - Ok(()) - })); - - Ok((srv2, core)) - } - - /// Initialize broadcasts for a newly connected client - pub fn broadcast_init( - &self, - desired_broadcasts: &[Broadcast], - ) -> (BroadcastSubs, HashMap) { - trace!("📢Initialized broadcasts"); - let bc = self.broadcaster.borrow(); - let BroadcastSubsInit(broadcast_subs, broadcasts) = bc.broadcast_delta(desired_broadcasts); - let mut response = Broadcast::vec_into_hashmap(broadcasts); - let missing = bc.missing_broadcasts(desired_broadcasts); - if !missing.is_empty() { - response.insert( - "errors".to_string(), - BroadcastValue::Nested(Broadcast::vec_into_hashmap(missing)), - ); - } - (broadcast_subs, response) - } - - /// Calculate whether there's new broadcast versions to go out - pub fn broadcast_delta(&self, broadcast_subs: &mut BroadcastSubs) -> Option> { - trace!("📢 Checking broadcast_delta"); - self.broadcaster.borrow().change_count_delta(broadcast_subs) - } - - /// Process a broadcast list, adding new broadcasts to be tracked and locating missing ones - /// Returns an appropriate response for use by the prototocol - pub fn process_broadcasts( - &self, - broadcast_subs: &mut BroadcastSubs, - broadcasts: &[Broadcast], - ) -> Option> { - let bc = self.broadcaster.borrow(); - let mut response: HashMap = HashMap::new(); - let missing = bc.missing_broadcasts(broadcasts); - if !missing.is_empty() { - response.insert( - "errors".to_string(), - BroadcastValue::Nested(Broadcast::vec_into_hashmap(missing)), - ); - } - if let Some(delta) = bc.subscribe_to_broadcasts(broadcast_subs, broadcasts) { - response.extend(Broadcast::vec_into_hashmap(delta)); - }; - if response.is_empty() { - None - } else { - Some(response) - } - } -} - -/* -STATE MACHINE -*/ -enum MegaphoneState { - Waiting, - Requesting(MyFuture), -} - -struct MegaphoneUpdater { - srv: Rc, - api_url: String, - api_token: String, - state: MegaphoneState, - timeout: Timeout, - poll_interval: Duration, - client: reqwest::r#async::Client, -} - -impl MegaphoneUpdater { - fn new( - uri: &str, - token: &str, - poll_interval: Duration, - srv: &Rc, - ) -> io::Result { - let client = reqwest::r#async::Client::builder() - .timeout(Duration::from_secs(1)) - .build() - .expect("Unable to build reqwest client"); - Ok(MegaphoneUpdater { - srv: srv.clone(), - api_url: uri.to_string(), - api_token: token.to_string(), - state: MegaphoneState::Waiting, - timeout: Timeout::new(poll_interval, &srv.handle)?, - poll_interval, - client, - }) - } -} - -impl Future for MegaphoneUpdater { - type Item = (); - type Error = ApcError; - - fn poll(&mut self) -> Poll<(), ApcError> { - loop { - let new_state = match self.state { - MegaphoneState::Waiting => { - try_ready!(self.timeout.poll()); - trace!("📢Sending megaphone API request"); - let fut = self - .client - .get(&self.api_url) - .header("Authorization", self.api_token.clone()) - .send() - .and_then(|response| response.error_for_status()) - .and_then(|mut response| response.json()) - .map_err(|_| { - ApcErrorKind::GeneralError( - "Unable to query/decode the API query".into(), - ) - .into() - }); - MegaphoneState::Requesting(Box::new(fut)) - } - MegaphoneState::Requesting(ref mut response) => { - let at = Instant::now() + self.poll_interval; - match response.poll() { - Ok(Async::Ready(MegaphoneAPIResponse { broadcasts })) => { - trace!("📢Fetched broadcasts: {:?}", broadcasts); - let mut broadcaster = self.srv.broadcaster.borrow_mut(); - for srv in Broadcast::from_hashmap(broadcasts) { - let vv = broadcaster.add_broadcast(srv); - trace!("📢 add_broadcast = {}", vv); - // TODO: Notify that Ping required? - } - } - Ok(Async::NotReady) => return Ok(Async::NotReady), - Err(error) => { - error!("📢Failed to get response, queue again {error:?}"); - capture_message( - &format!("Failed to get response, queue again {error:?}"), - sentry::Level::Warning, - ); - } - }; - self.timeout.reset(at); - MegaphoneState::Waiting - } - }; - self.state = new_state; - } - } -} - -enum WaitingFor { - SendPing, - Pong, - Close, -} - -/* -STATE MACHINE -*/ -enum CloseState { - Exchange(T), - Closing, -} - -struct PingManager { - socket: RcObject>>, - timeout: Timeout, - waiting: WaitingFor, - srv: Rc, - client: CloseState>>>>, -} - -impl PingManager { - fn new( - srv: &Rc, - socket: WebSocketStream, - uarx: oneshot::Receiver, - ) -> io::Result { - // The `socket` is itself a sink and a stream, and we've also got a sink - // (`tx`) and a stream (`rx`) to send messages. Half of our job will be - // doing all this proxying: reading messages from `socket` and sending - // them to `tx` while also reading messages from `rx` and sending them - // on `socket`. - // - // Our other job will be to manage the websocket protocol pings going - // out and coming back. The `opts` provided indicate how often we send - // pings and how long we'll wait for the ping to come back before we - // time it out. - // - // To make these tasks easier we start out by throwing the `socket` into - // an `Rc` object. This'll allow us to share it between the ping/pong - // management and message shuffling. - let socket = RcObject::new(WebpushSocket::new(socket)); - trace!("🏓Ping interval {:?}", &srv.app_state.auto_ping_interval); - Ok(PingManager { - timeout: Timeout::new(srv.app_state.auto_ping_interval, &srv.handle)?, - waiting: WaitingFor::SendPing, - socket: socket.clone(), - client: CloseState::Exchange(Client::new(socket, srv, uarx)), - srv: srv.clone(), - }) - } -} - -impl Future for PingManager { - type Item = (); - type Error = ApcError; - - fn poll(&mut self) -> Poll<(), ApcError> { - let mut socket = self.socket.borrow_mut(); - loop { - trace!("🏓 PingManager Poll loop"); - if socket.ws_ping { - // Don't check if we already have a delta to broadcast - if socket.broadcast_delta.is_none() { - // Determine if we can do a broadcast check, we need a connected webpush client - if let CloseState::Exchange(ref mut client) = self.client { - if let Some(delta) = client.broadcast_delta() { - socket.broadcast_delta = Some(delta); - } - } - } - - if socket.send_ws_ping()?.is_ready() { - trace!("🏓 Time to ping"); - // If we just sent a broadcast, reset the ping interval and clear the delta - if socket.broadcast_delta.is_some() { - trace!("📢 Pending"); - let at = Instant::now() + self.srv.app_state.auto_ping_interval; - self.timeout.reset(at); - socket.broadcast_delta = None; - self.waiting = WaitingFor::SendPing - } else { - let at = Instant::now() + self.srv.app_state.auto_ping_timeout; - self.timeout.reset(at); - self.waiting = WaitingFor::Pong - } - } else { - break; - } - } - debug_assert!(!socket.ws_ping); - match self.waiting { - WaitingFor::SendPing => { - trace!( - "🏓Checking pong timeout:{} pong recv'd:{}", - socket.ws_pong_timeout, - socket.ws_pong_received - ); - debug_assert!(!socket.ws_pong_timeout); - debug_assert!(!socket.ws_pong_received); - match self.timeout.poll()? { - Async::Ready(()) => { - trace!("🏓scheduling a ws ping to get sent"); - socket.ws_ping = true; - } - Async::NotReady => { - trace!("🏓not ready yet"); - break; - } - } - } - WaitingFor::Pong => { - if socket.ws_pong_received { - // If we received a pong, then switch us back to waiting - // to send out a ping - trace!("🏓ws pong received, going back to sending a ping"); - debug_assert!(!socket.ws_pong_timeout); - let at = Instant::now() + self.srv.app_state.auto_ping_interval; - self.timeout.reset(at); - self.waiting = WaitingFor::SendPing; - socket.ws_pong_received = false; - } else if socket.ws_pong_timeout { - // If our socket is waiting to deliver a pong timeout, - // then no need to keep checking the timer and we can - // keep going - trace!("🏓waiting for socket to see ws pong timed out"); - break; - } else if self.timeout.poll()?.is_ready() { - // We may not actually be reading messages from the - // websocket right now, could have been waiting on - // something else. Instead of immediately returning an - // error here wait for the stream to return `NotReady` - // when looking for messages, as then we're extra sure - // that no pong was received after this timeout elapsed. - trace!("🏓waited too long for a ws pong"); - socket.ws_pong_timeout = true; - } else { - break; - } - } - WaitingFor::Close => { - debug_assert!(!socket.ws_pong_timeout); - if self.timeout.poll()?.is_ready() { - if let CloseState::Exchange(ref mut client) = self.client { - client.shutdown(); - } - // So did the shutdown not work? We must call shutdown but no client here? - return Err(ApcErrorKind::GeneralError( - "close handshake took too long".into(), - ) - .into()); - } - } - } - } - - // Be sure to always flush out any buffered messages/pings - socket.poll_complete().map_err(|_e| { - ApcErrorKind::GeneralError("failed routine `poll_complete` call".into()) - })?; - drop(socket); - - // At this point looks our state of ping management A-OK, so try to - // make progress on our client, and when done with that execute the - // closing handshake. - loop { - match self.client { - CloseState::Exchange(ref mut client) => try_ready!(client.poll()), - CloseState::Closing => return self.socket.borrow_mut().close(), - } - - self.client = CloseState::Closing; - if let Some(dur) = self.srv.app_state.close_handshake_timeout { - let at = Instant::now() + dur; - self.timeout.reset(at); - self.waiting = WaitingFor::Close; - } - } - } -} - -// Wrapper struct to take a Sink/Stream of `Message` to a Sink/Stream of -// `ClientMessage` and `ServerMessage`. -struct WebpushSocket { - inner: T, - ws_pong_received: bool, - ws_ping: bool, - ws_pong_timeout: bool, - broadcast_delta: Option>, -} - -impl WebpushSocket { - fn new(t: T) -> WebpushSocket { - WebpushSocket { - inner: t, - ws_pong_received: false, - ws_ping: false, - ws_pong_timeout: false, - broadcast_delta: None, - } - } - - fn send_ws_ping(&mut self) -> Poll<(), ApcError> - where - T: Sink, - ApcError: From, - { - trace!("🏓 checking ping"); - if self.ws_ping { - trace!("🏓 Ping present"); - let msg = if let Some(broadcasts) = self.broadcast_delta.clone() { - trace!("🏓sending a broadcast delta"); - let server_msg = ServerMessage::Broadcast { - broadcasts: Broadcast::vec_into_hashmap(broadcasts), - }; - let s = server_msg.to_json()?; - Message::Text(s) - } else { - trace!("🏓sending a ws ping"); - Message::Ping(Vec::new()) - }; - match self.inner.start_send(msg)? { - AsyncSink::Ready => { - trace!("🏓ws ping sent"); - self.ws_ping = false; - } - AsyncSink::NotReady(_) => { - trace!("🏓ws ping not ready to be sent"); - return Ok(Async::NotReady); - } - } - } else { - trace!("🏓No Ping"); - } - Ok(Async::Ready(())) - } -} - -impl Stream for WebpushSocket -where - T: Stream, - ApcError: From, -{ - type Item = ClientMessage; - type Error = ApcError; - - fn poll(&mut self) -> Poll, ApcError> { - loop { - let msg = match self.inner.poll()? { - Async::Ready(Some(msg)) => msg, - Async::Ready(None) => return Ok(None.into()), - Async::NotReady => { - // If we don't have any more messages and our pong timeout - // elapsed (set above) then this is where we start - // triggering errors. - if self.ws_pong_timeout { - return Err(ApcErrorKind::PongTimeout.into()); - } - return Ok(Async::NotReady); - } - }; - match msg { - Message::Text(ref s) => { - trace!("🢤 text message {}", s); - let msg = s - .parse() - .map_err(|_e| ApcErrorKind::InvalidClientMessage(s.to_owned()))?; - return Ok(Some(msg).into()); - } - - Message::Binary(_) => { - return Err( - ApcErrorKind::InvalidClientMessage("binary content".to_string()).into(), - ); - } - - // sending a pong is already managed by lower layers, just go to - // the next message - Message::Ping(_) => {} - - // Wake up ourselves to ensure the above ping logic eventually - // sees this pong. - Message::Pong(_) => { - self.ws_pong_received = true; - self.ws_pong_timeout = false; - task::current().notify(); - } - - Message::Close(_) => return Err(tungstenite::Error::ConnectionClosed.into()), - } - } - } -} - -impl Sink for WebpushSocket -where - T: Sink, - ApcError: From, -{ - type SinkItem = ServerMessage; - type SinkError = ApcError; - - fn start_send(&mut self, msg: ServerMessage) -> StartSend { - if self.send_ws_ping()?.is_not_ready() { - return Ok(AsyncSink::NotReady(msg)); - } - let s = msg - .to_json() - .map_err(|_e| ApcErrorKind::GeneralError("failed to serialize".into()))?; - match self.inner.start_send(Message::Text(s))? { - AsyncSink::Ready => Ok(AsyncSink::Ready), - AsyncSink::NotReady(_) => Ok(AsyncSink::NotReady(msg)), - } - } - - /// Handle the poll completion. - fn poll_complete(&mut self) -> Poll<(), ApcError> { - try_ready!(self.send_ws_ping()); - Ok(self.inner.poll_complete()?) - } - - fn close(&mut self) -> Poll<(), ApcError> { - try_ready!(self.poll_complete()); - match self.inner.close() { - Ok(v) => Ok(v), - Err(e) => { - warn!("Close generated an error, possibly ok?"); - Err(From::from(e)) - } - } - } -} - -fn write_status(socket: WebpushIo) -> MyFuture<()> { - write_json( - socket, - StatusCode::OK, - json!({ - "status": "OK", - "version": env!("CARGO_PKG_VERSION"), - }), - ) -} - -/// Return a static copy of `version.json` from compile time. -pub fn write_version_file(socket: WebpushIo) -> MyFuture<()> { - write_json( - socket, - StatusCode::OK, - serde_json::Value::from(include_str!("../../../version.json")), - ) -} - -fn write_log_check(socket: WebpushIo) -> MyFuture<()> { - let status = StatusCode::IM_A_TEAPOT; - let code: u16 = status.into(); - - error!("Test Critical Message"; - "status_code" => code, - "errno" => 0, - ); - thread::spawn(|| { - panic!("LogCheck"); - }); - - write_json( - socket, - StatusCode::IM_A_TEAPOT, - json!({ - "code": code, - "errno": 999, - "error": "Test Failure", - "mesage": "FAILURE:Success", - }), - ) -} - -fn write_json(socket: WebpushIo, status: StatusCode, body: serde_json::Value) -> MyFuture<()> { - let body = body.to_string(); - let data = format!( - "\ - HTTP/1.1 {status}\r\n\ - Server: webpush\r\n\ - Date: {date}\r\n\ - Content-Length: {len}\r\n\ - Content-Type: application/json\r\n\ - \r\n\ - {body}\ - ", - status = status, - date = Utc::now().to_rfc2822(), - len = body.len(), - body = body, - ); - Box::new( - tokio_io::io::write_all(socket, data.into_bytes()) - .map(|_| ()) - .map_err(|_e| { - ApcErrorKind::GeneralError("failed to write status response".into()).into() - }), - ) -} diff --git a/autopush/src/server/protocol.rs b/autopush/src/server/protocol.rs deleted file mode 100644 index c2a4a0d84..000000000 --- a/autopush/src/server/protocol.rs +++ /dev/null @@ -1,135 +0,0 @@ -//! Definition of Internal Router and Websocket protocol messages -//! -//! This module is a structured definition of several protocol. Both -//! messages received from the client and messages sent from the server are -//! defined here. The `derive(Deserialize)` and `derive(Serialize)` annotations -//! are used to generate the ability to serialize these structures to JSON, -//! using the `serde` crate. More docs for serde can be found at -//! -use std::collections::HashMap; -use std::str::FromStr; - -use serde_derive::{Deserialize, Serialize}; -use uuid::Uuid; - -use autopush_common::notification::Notification; - -#[derive(Debug, Serialize)] -#[serde(untagged)] -pub enum BroadcastValue { - Value(String), - Nested(HashMap), -} - -#[derive(Default)] -// Used for the server to flag a webpush client to deliver a Notification or Check storage -pub enum ServerNotification { - CheckStorage, - Notification(Notification), - #[default] - Disconnect, -} - -#[derive(Debug, Deserialize)] -#[serde(tag = "messageType", rename_all = "snake_case")] -pub enum ClientMessage { - Hello { - uaid: Option, - #[serde(rename = "channelIDs", skip_serializing_if = "Option::is_none")] - channel_ids: Option>, - #[serde(skip_serializing_if = "Option::is_none")] - use_webpush: Option, - #[serde(skip_serializing_if = "Option::is_none")] - broadcasts: Option>, - }, - - Register { - #[serde(rename = "channelID")] - channel_id: String, - key: Option, - }, - - Unregister { - #[serde(rename = "channelID")] - channel_id: Uuid, - code: Option, - }, - - BroadcastSubscribe { - broadcasts: HashMap, - }, - - Ack { - updates: Vec, - }, - - Nack { - code: Option, - version: String, - }, - - Ping, -} - -impl FromStr for ClientMessage { - type Err = serde_json::error::Error; - - fn from_str(s: &str) -> Result { - // parse empty object "{}" as a Ping - serde_json::from_str::>(s) - .map(|_| ClientMessage::Ping) - .or_else(|_| serde_json::from_str(s)) - } -} - -#[derive(Debug, Deserialize)] -pub struct ClientAck { - #[serde(rename = "channelID")] - pub channel_id: Uuid, - pub version: String, -} - -#[derive(Debug, Serialize)] -#[serde(tag = "messageType", rename_all = "snake_case")] -pub enum ServerMessage { - Hello { - uaid: String, - status: u32, - #[serde(skip_serializing_if = "Option::is_none")] - use_webpush: Option, - broadcasts: HashMap, - }, - - Register { - #[serde(rename = "channelID")] - channel_id: Uuid, - status: u32, - #[serde(rename = "pushEndpoint")] - push_endpoint: String, - }, - - Unregister { - #[serde(rename = "channelID")] - channel_id: Uuid, - status: u32, - }, - - Broadcast { - broadcasts: HashMap, - }, - - Notification(Notification), - - Ping, -} - -impl ServerMessage { - pub fn to_json(&self) -> Result { - match self { - // clients recognize {"messageType": "ping"} but traditionally both - // client/server send the empty object version - ServerMessage::Ping => Ok("{}".to_owned()), - _ => serde_json::to_string(self), - } - } -} diff --git a/autopush/src/server/rc.rs b/autopush/src/server/rc.rs deleted file mode 100644 index 8eb47a458..000000000 --- a/autopush/src/server/rc.rs +++ /dev/null @@ -1,53 +0,0 @@ -use std::cell::{RefCell, RefMut}; -use std::rc::Rc; - -use futures::{Poll, Sink, StartSend, Stream}; - -/// Helper object to turn `Rc>` into a `Stream` and `Sink` -/// -/// This is basically just a helper to allow multiple "owning" references to a -/// `T` which is both a `Stream` and a `Sink`. Similar to `Stream::split` in the -/// futures crate, but doesn't actually split it (and allows internal access). -pub struct RcObject(Rc>); - -impl RcObject { - pub fn new(t: T) -> RcObject { - RcObject(Rc::new(RefCell::new(t))) - } - - pub fn borrow_mut(&self) -> RefMut<'_, T> { - self.0.borrow_mut() - } -} - -impl Stream for RcObject { - type Item = T::Item; - type Error = T::Error; - - fn poll(&mut self) -> Poll, T::Error> { - self.0.borrow_mut().poll() - } -} - -impl Sink for RcObject { - type SinkItem = T::SinkItem; - type SinkError = T::SinkError; - - fn start_send(&mut self, msg: T::SinkItem) -> StartSend { - self.0.borrow_mut().start_send(msg) - } - - fn poll_complete(&mut self) -> Poll<(), T::SinkError> { - self.0.borrow_mut().poll_complete() - } - - fn close(&mut self) -> Poll<(), T::SinkError> { - self.0.borrow_mut().close() - } -} - -impl Clone for RcObject { - fn clone(&self) -> RcObject { - RcObject(self.0.clone()) - } -} diff --git a/autopush/src/server/registry.rs b/autopush/src/server/registry.rs deleted file mode 100644 index 11268b595..000000000 --- a/autopush/src/server/registry.rs +++ /dev/null @@ -1,117 +0,0 @@ -use std::collections::HashMap; - -use futures::{ - future::{err, ok}, - Future, -}; -use futures_locks::RwLock; -use uuid::Uuid; - -use super::protocol::ServerNotification; -use super::Notification; -use crate::client::RegisteredClient; -use autopush_common::errors::{ApcError, ApcErrorKind}; - -/// `notify` + `check_storage` are used under hyper (http.rs) which `Send` -/// futures. -/// -/// `MySendFuture` is `Send` for this, similar to modern futures `BoxFuture`. -/// `MyFuture` isn't, similar to modern futures `BoxLocalFuture` -pub type MySendFuture = Box + Send>; - -#[derive(Default)] -pub struct ClientRegistry { - clients: RwLock>, -} - -impl ClientRegistry { - /// Informs this server that a new `client` has connected - /// - /// For now just registers internal state by keeping track of the `client`, - /// namely its channel to send notifications back. - pub fn connect(&self, client: RegisteredClient) -> MySendFuture<()> { - debug!("Connecting a client!"); - Box::new( - self.clients - .write() - .and_then(|mut clients| { - if let Some(client) = clients.insert(client.uaid, client) { - // Drop existing connection - let result = client.tx.unbounded_send(ServerNotification::Disconnect); - if result.is_ok() { - debug!("Told client to disconnect as a new one wants to connect"); - } - } - ok(()) - }) - .map_err(|_| ApcErrorKind::GeneralError("clients lock poisoned".into()).into()), - ) - - //.map_err(|_| "clients lock poisioned") - } - - /// A notification has come for the uaid - pub fn notify(&self, uaid: Uuid, notif: Notification) -> MySendFuture<()> { - let fut = self - .clients - .read() - .and_then(move |clients| { - debug!("Sending notification"); - if let Some(client) = clients.get(&uaid) { - debug!("Found a client to deliver a notification to"); - let result = client - .tx - .unbounded_send(ServerNotification::Notification(notif)); - if result.is_ok() { - debug!("Dropped notification in queue"); - return ok(()); - } - } - err(()) - }) - .map_err(|_| ApcErrorKind::GeneralError("User not connected".into()).into()); - Box::new(fut) - } - - /// A check for notification command has come for the uaid - pub fn check_storage(&self, uaid: Uuid) -> MySendFuture<()> { - let fut = self - .clients - .read() - .and_then(move |clients| { - if let Some(client) = clients.get(&uaid) { - let result = client.tx.unbounded_send(ServerNotification::CheckStorage); - if result.is_ok() { - debug!("Told client to check storage"); - return ok(()); - } - } - err(()) - }) - .map_err(|_| ApcErrorKind::GeneralError("User not connected".into()).into()); - Box::new(fut) - } - - /// The client specified by `uaid` has disconnected. - #[allow(clippy::clone_on_copy)] - pub fn disconnect(&self, uaid: &Uuid, uid: &Uuid) -> MySendFuture<()> { - debug!("Disconnecting client!"); - let uaidc = uaid.clone(); - let uidc = uid.clone(); - let fut = self - .clients - .write() - .and_then(move |mut clients| { - let client_exists = clients - .get(&uaidc) - .map_or(false, |client| client.uid == uidc); - if client_exists { - clients.remove(&uaidc).expect("Couldn't remove client?"); - return ok(()); - } - err(()) - }) - .map_err(|_| ApcErrorKind::GeneralError("User not connected".into()).into()); - Box::new(fut) - } -} diff --git a/autopush/src/server/tls.rs b/autopush/src/server/tls.rs deleted file mode 100644 index 7f29f920c..000000000 --- a/autopush/src/server/tls.rs +++ /dev/null @@ -1,147 +0,0 @@ -//! TLS support for the autopush server -//! -//! Currently tungstenite in the way we use it just operates generically over an -//! `AsyncRead`/`AsyncWrite` stream, so this provides a `MaybeTlsStream` type -//! which dispatches at runtime whether it's a plaintext or TLS stream after a -//! connection is established. - -use std::fs::File; -use std::io::{self, Read, Write}; -use std::path::Path; -use std::rc::Rc; - -use crate::MyFuture; - -use autopush_common::errors::*; -use futures::future; -use futures::{Future, Poll}; -use openssl::dh::Dh; -use openssl::pkey::PKey; -use openssl::ssl::{SslAcceptor, SslMethod, SslMode}; -use openssl::x509::X509; -use tokio_core::net::TcpStream; -use tokio_io::{AsyncRead, AsyncWrite}; -use tokio_openssl::{SslAcceptorExt, SslStream}; - -use crate::server::{AppState, Server}; - -/// Creates an `SslAcceptor`, if needed, ready to accept TLS connections. -/// -/// This method is called early on when the server is created and the -/// `SslAcceptor` type is stored globally in the `Server` structure, later used -/// to process all accepted TCP sockets. -pub fn configure(app_state: &AppState) -> Option { - let key = match app_state.ssl_key { - Some(ref key) => read(key), - None => return None, - }; - let key = PKey::private_key_from_pem(&key).expect("failed to create private key"); - let cert = read( - app_state - .ssl_cert - .as_ref() - .expect("ssl_cert not configured"), - ); - let cert = X509::from_pem(&cert).expect("failed to create certificate"); - - let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()) - .expect("failed to create ssl acceptor builder"); - builder - .set_private_key(&key) - .expect("failed to set private key"); - builder - .set_certificate(&cert) - .expect("failed to set certificate"); - builder - .check_private_key() - .expect("private key check failed"); - - if let Some(dh_param) = app_state.ssl_dh_param.as_ref() { - let dh_param = Dh::params_from_pem(&read(dh_param)).expect("failed to create dh"); - builder - .set_tmp_dh(&dh_param) - .expect("failed to set dh_param"); - } - - // Should help reduce peak memory consumption for idle connections - builder.set_mode(SslMode::RELEASE_BUFFERS); - - return Some(builder.build()); - - fn read(path: &Path) -> Vec { - let mut out = Vec::new(); - File::open(path) - .unwrap_or_else(|_| panic!("failed to open {path:?}")) - .read_to_end(&mut out) - .unwrap_or_else(|_| panic!("failed to read {path:?}")); - out - } -} - -/// Performs the TLS handshake, if necessary, for a socket. -/// -/// This is typically called just after a socket has been accepted from the TCP -/// listener. If the server is configured without TLS then this will immediately -/// return with a plaintext socket, or otherwise it will perform an asynchronous -/// TLS handshake and only resolve once that's completed. -pub fn accept(srv: &Rc, socket: TcpStream) -> MyFuture> { - match srv.tls_acceptor { - Some(ref acceptor) => Box::new( - acceptor - .accept_async(socket) - .map(MaybeTlsStream::Tls) - .map_err(|_e| { - ApcErrorKind::GeneralError("failed to accept TLS socket".into()).into() - }), - ), - None => Box::new(future::ok(MaybeTlsStream::Plain(socket))), - } -} - -pub enum MaybeTlsStream { - Plain(T), - Tls(SslStream), -} - -impl Read for MaybeTlsStream { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - match *self { - MaybeTlsStream::Plain(ref mut s) => s.read(buf), - MaybeTlsStream::Tls(ref mut s) => s.read(buf), - } - } -} - -impl Write for MaybeTlsStream { - fn write(&mut self, buf: &[u8]) -> io::Result { - match *self { - MaybeTlsStream::Plain(ref mut s) => s.write(buf), - MaybeTlsStream::Tls(ref mut s) => s.write(buf), - } - } - - fn flush(&mut self) -> io::Result<()> { - match *self { - MaybeTlsStream::Plain(ref mut s) => s.flush(), - MaybeTlsStream::Tls(ref mut s) => s.flush(), - } - } -} - -impl AsyncRead for MaybeTlsStream { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { - match *self { - MaybeTlsStream::Plain(ref s) => s.prepare_uninitialized_buffer(buf), - MaybeTlsStream::Tls(ref s) => s.prepare_uninitialized_buffer(buf), - } - } -} - -impl AsyncWrite for MaybeTlsStream { - fn shutdown(&mut self) -> Poll<(), io::Error> { - match *self { - MaybeTlsStream::Plain(ref mut s) => s.shutdown(), - MaybeTlsStream::Tls(ref mut s) => s.shutdown(), - } - } -} diff --git a/autopush/src/server/webpush_io.rs b/autopush/src/server/webpush_io.rs deleted file mode 100644 index cdd945361..000000000 --- a/autopush/src/server/webpush_io.rs +++ /dev/null @@ -1,84 +0,0 @@ -//! I/O wrapper created through `Dispatch` -//! -//! Most I/O happens through just raw TCP sockets, but at the beginning of a -//! request we'll take a look at the headers and figure out where to route it. -//! After that, for tungstenite the websocket library, we'll want to replay the -//! data we already read as there's no ability to pass this in currently. That -//! means we'll parse headers twice, but alas! - -use std::backtrace; -use std::io::{self, Read, Write}; - -use bytes::BytesMut; -use futures::Poll; -use tokio_core::net::TcpStream; -use tokio_io::{AsyncRead, AsyncWrite}; - -use crate::server::tls::MaybeTlsStream; - -pub struct WebpushIo { - tcp: MaybeTlsStream, - header_to_read: Option, -} - -impl WebpushIo { - pub fn new(tcp: MaybeTlsStream, header: BytesMut) -> Self { - Self { - tcp, - header_to_read: Some(header), - } - } -} - -impl Read for WebpushIo { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - // Start off by replaying the bytes already read, and after that just - // delegate everything to the internal `TcpStream` - if let Some(ref mut header) = self.header_to_read { - let n = (&header[..]).read(buf)?; - header.split_to(n); - if buf.is_empty() || n > 0 { - return Ok(n); - } - } - self.header_to_read = None; - let res = self.tcp.read(buf); - if res.is_err() { - if let Some(e) = res.as_ref().err() { - if e.kind() == std::io::ErrorKind::WouldBlock { - // quietly report the error. The socket closed early. - trace!("🢤 Detected WouldBlock, connection terminated abruptly"); - } else { - // report the backtrace because it can be difficult to determine - // what the caller is. - warn!("🢤 ERR: {:?}\n{:?}", &e, backtrace::Backtrace::capture()); - } - } - } else { - trace!("🢤 {:?}", &res); - } - res - } -} - -// All `write` calls are routed through the `TcpStream` instance directly as we -// don't buffer this at all. -impl Write for WebpushIo { - fn write(&mut self, buf: &[u8]) -> io::Result { - trace!("🢧 {:?}", String::from_utf8_lossy(buf)); - self.tcp.write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - trace!("🚽"); - self.tcp.flush() - } -} - -impl AsyncRead for WebpushIo {} - -impl AsyncWrite for WebpushIo { - fn shutdown(&mut self) -> Poll<(), io::Error> { - AsyncWrite::shutdown(&mut self.tcp) - } -} diff --git a/autopush/src/settings.rs b/autopush/src/settings.rs deleted file mode 100644 index 143b5b9e5..000000000 --- a/autopush/src/settings.rs +++ /dev/null @@ -1,250 +0,0 @@ -use std::io; -use std::net::ToSocketAddrs; - -use config::{Config, ConfigError, Environment, File}; -use fernet::Fernet; -use lazy_static::lazy_static; -use serde_derive::Deserialize; - -const ENV_PREFIX: &str = "autopush"; - -lazy_static! { - static ref HOSTNAME: String = mozsvc_common::get_hostname() - .expect("Couldn't get_hostname") - .into_string() - .expect("Couldn't convert get_hostname"); - static ref RESOLVED_HOSTNAME: String = resolve_ip(&HOSTNAME) - .unwrap_or_else(|_| panic!("Failed to resolve hostname: {}", *HOSTNAME)); -} - -/// Resolve a hostname to its IP if possible -fn resolve_ip(hostname: &str) -> io::Result { - Ok((hostname, 0) - .to_socket_addrs()? - .next() - .map_or_else(|| hostname.to_owned(), |addr| addr.ip().to_string())) -} - -/// Indicate whether the port should be included for the given scheme -fn include_port(scheme: &str, port: u16) -> bool { - !((scheme == "http" && port == 80) || (scheme == "https" && port == 443)) -} - -#[derive(Debug, Deserialize)] -#[serde(default)] -pub struct Settings { - pub port: u16, - pub hostname: Option, - pub resolve_hostname: bool, - pub router_port: u16, - pub router_hostname: Option, - pub router_tablename: String, - pub message_tablename: String, - pub router_ssl_key: Option, - pub router_ssl_cert: Option, - pub router_ssl_dh_param: Option, - pub auto_ping_interval: f64, - pub auto_ping_timeout: f64, - pub max_connections: u32, - pub close_handshake_timeout: u32, - pub endpoint_scheme: String, - pub endpoint_hostname: String, - pub endpoint_port: u16, - pub crypto_key: String, - pub statsd_host: String, - pub statsd_port: u16, - pub aws_ddb_endpoint: Option, - pub megaphone_api_url: Option, - pub megaphone_api_token: Option, - pub megaphone_poll_interval: u32, - pub human_logs: bool, - pub msg_limit: u32, -} - -impl Default for Settings { - fn default() -> Self { - Self { - port: 8080, - hostname: None, - resolve_hostname: false, - router_port: 8081, - router_hostname: None, - router_tablename: "router".to_owned(), - message_tablename: "message".to_owned(), - router_ssl_key: None, - router_ssl_cert: None, - router_ssl_dh_param: None, - auto_ping_interval: 300.0, - auto_ping_timeout: 4.0, - max_connections: 0, - close_handshake_timeout: 0, - endpoint_scheme: "http".to_owned(), - endpoint_hostname: "localhost".to_owned(), - endpoint_port: 8082, - crypto_key: format!("[{}]", Fernet::generate_key()), - statsd_host: "localhost".to_owned(), - statsd_port: 8125, - aws_ddb_endpoint: None, - megaphone_api_url: None, - megaphone_api_token: None, - megaphone_poll_interval: 30, - human_logs: false, - msg_limit: 100, - } - } -} - -impl Settings { - /// Load the settings from the config files in order first then the environment. - pub fn with_env_and_config_files(filenames: &[String]) -> Result { - let mut s = Config::builder(); - - // Merge the configs from the files - for filename in filenames { - s = s.add_source(File::with_name(filename)); - } - - // Merge the environment overrides - s = s.add_source(Environment::with_prefix(ENV_PREFIX).separator("__")); - // s = s.add_source(Environment::with_prefix(ENV_PREFIX)); - - let built = s.build()?; - built.try_deserialize::() - } - - pub fn router_url(&self) -> String { - let router_scheme = if self.router_ssl_key.is_none() { - "http" - } else { - "https" - }; - let url = format!( - "{}://{}", - router_scheme, - self.router_hostname - .as_ref() - .map_or_else(|| self.get_hostname(), String::clone), - ); - if include_port(router_scheme, self.router_port) { - format!("{}:{}", url, self.router_port) - } else { - url - } - } - - pub fn endpoint_url(&self) -> String { - let url = format!("{}://{}", self.endpoint_scheme, self.endpoint_hostname,); - if include_port(&self.endpoint_scheme, self.endpoint_port) { - format!("{}:{}", url, self.endpoint_port) - } else { - url - } - } - - fn get_hostname(&self) -> String { - if let Some(ref hostname) = self.hostname { - if self.resolve_hostname { - resolve_ip(hostname) - .unwrap_or_else(|_| panic!("Failed to resolve provided hostname: {hostname}")) - } else { - hostname.clone() - } - } else if self.resolve_hostname { - RESOLVED_HOSTNAME.clone() - } else { - HOSTNAME.clone() - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_router_url() { - let mut settings = Settings { - router_hostname: Some("testname".to_string()), - router_port: 80, - ..Default::default() - }; - let url = settings.router_url(); - assert_eq!("http://testname", url); - - settings.router_port = 8080; - let url = settings.router_url(); - assert_eq!("http://testname:8080", url); - - settings.router_port = 443; - settings.router_ssl_key = Some("key".to_string()); - let url = settings.router_url(); - assert_eq!("https://testname", url); - - settings.router_port = 8080; - let url = settings.router_url(); - assert_eq!("https://testname:8080", url); - } - - #[test] - fn test_endpoint_url() { - let mut settings = Settings { - endpoint_hostname: "testname".to_string(), - endpoint_port: 80, - endpoint_scheme: "http".to_string(), - ..Default::default() - }; - let url = settings.endpoint_url(); - assert_eq!("http://testname", url); - - settings.endpoint_port = 8080; - let url = settings.endpoint_url(); - assert_eq!("http://testname:8080", url); - - settings.endpoint_port = 443; - settings.endpoint_scheme = "https".to_string(); - let url = settings.endpoint_url(); - assert_eq!("https://testname", url); - - settings.endpoint_port = 8080; - let url = settings.endpoint_url(); - assert_eq!("https://testname:8080", url); - } - - #[test] - fn test_default_settings() { - // Test that the Config works the way we expect it to. - use std::env; - let port = format!("{ENV_PREFIX}__PORT").to_uppercase(); - let msg_limit = format!("{ENV_PREFIX}__MSG_LIMIT").to_uppercase(); - let fernet = format!("{ENV_PREFIX}__CRYPTO_KEY").to_uppercase(); - - let v1 = env::var(&port); - let v2 = env::var(&msg_limit); - env::set_var(&port, "9123"); - env::set_var(&msg_limit, "123"); - env::set_var(&fernet, "[mqCGb8D-N7mqx6iWJov9wm70Us6kA9veeXdb8QUuzLQ=]"); - let settings = Settings::with_env_and_config_files(&Vec::new()).unwrap(); - assert_eq!(settings.endpoint_hostname, "localhost".to_owned()); - assert_eq!(&settings.port, &9123); - assert_eq!(&settings.msg_limit, &123); - assert_eq!( - &settings.crypto_key, - "[mqCGb8D-N7mqx6iWJov9wm70Us6kA9veeXdb8QUuzLQ=]" - ); - - // reset (just in case) - if let Ok(p) = v1 { - trace!("Resetting {}", &port); - env::set_var(&port, p); - } else { - env::remove_var(&port); - } - if let Ok(p) = v2 { - trace!("Resetting {}", msg_limit); - env::set_var(&msg_limit, p); - } else { - env::remove_var(&msg_limit); - } - env::remove_var(&fernet); - } -} diff --git a/configs/autoendpoint.toml.sample b/configs/autoendpoint.toml.sample index 3ac36938a..a45bf7e5d 100644 --- a/configs/autoendpoint.toml.sample +++ b/configs/autoendpoint.toml.sample @@ -81,29 +81,3 @@ # "sandbox": true # } #}""" - -# Settings for the Amazon Device Messaging router -[adm] -# The minimum TTL to use. If a notification's TTL is shorter than this, it will -# be set to this value. -#min_ttl = 60 - -# The max size of notification data in bytes. This is usually dictated by ADM to -# be about 6KB. -#max_data = 6000 - -# The number of seconds to wait for FCM requests to complete -#timeout = 3 - -# The base URL to use when sending messages -#base_url = "https://api.amazon.com" - -# The credentials to use for each profile. This setting is a JSON dictionary -# where the key is the app ID. The client ID and secret are supplied for each -# application. -#profiles = """{ -# "test": { -# "client_id": "...", -# "client_secret": "..." -# } -#}""" diff --git a/configs/autopush.toml.sample b/configs/autopush.toml.sample index 67a8fc32c..276b74973 100644 --- a/configs/autopush.toml.sample +++ b/configs/autopush.toml.sample @@ -51,10 +51,6 @@ # The port of the metrics server #statsd_port = 8125 -# Override the DynamoDB endpoint via the AWS_LOCAL_DYNAMODB environment -# variable. No default value. -#aws_ddb_endpoint = "..." - # The name of the router table #router_tablename = "router" @@ -81,4 +77,4 @@ # The max number of stored messages to return to a connecting client. If this # limit is reached, the client is dropped and must re-register. -#msg_limit = 100 +#msg_limit = 150 diff --git a/docs/src/adm.md b/docs/src/adm.md index 7ede63a1e..8998c8f27 100644 --- a/docs/src/adm.md +++ b/docs/src/adm.md @@ -1,4 +1,4 @@ -# Configuring the Amazon Device Messaging Bridge +# Configuring the Amazon Device Messaging Bridge (deprecated)
ADM is no longer supported by Autopush. This section is obsolete.
diff --git a/docs/src/architecture.md b/docs/src/architecture.md index cac33bcb9..7bef278e0 100644 --- a/docs/src/architecture.md +++ b/docs/src/architecture.md @@ -44,15 +44,33 @@ If you are only running Autopush locally, you can skip to `running` as later topics in this document apply only to developing or production scale deployments of Autopush. +## WebPush Sort Keys + +Messages for WebPush are stored using a partition key + sort key, originally the sort key was: + + CHID : Encrypted(UAID: CHID) + +The encrypted portion was returned as the Location to the Application Server. Decrypting it resulted in enough information to create the sort key so that the message could be deleted and located again. + +For WebPush Topic messages, a new scheme was needed since the only way to locate the prior message is the UAID + CHID + Topic. Using Encryption in the sort key is therefore not useful since it would change every update. + +The sort key scheme for WebPush messages is: + + VERSION : CHID : TOPIC + +To ensure updated messages are not deleted, each message will still have an update-id key/value in its item. + +Non-versioned messages are assumed to be original messages from before this scheme was adopted. + +``VERSION`` is a 2-digit 0-padded number, starting at 01 for Topic messages. + ## Storage Tables -Autopush uses a key / value data storage system. It can either use -AWS DynamoDB (legacy), Google Cloud Bigtable, or a specific combination of the -two. +Autopush uses Google Cloud Bigtable as a key / value data storage system. ### DynamoDB (legacy) -For DynamoDB, Autopush used a single router and messages table. +Previously, for DynamoDB, Autopush used a single router and messages table. On startup, Autopush created these tables. For more information on DynamoDB tables, see @@ -89,6 +107,8 @@ Please note, this document will refer to the `message` table and the `router` ta legacy reasons. Please consider these to be the same as the `message` and `router` cell families. + + ### Router Table Schema The router table contains info about how to send out the incoming message. @@ -196,17 +216,17 @@ Autopush used a [table rotation system](table_rotation.md), which is now legacy. Endpoint will deliver the message to the client completely bypassing Storage. This Notification will be referred to as a Direct Notification vs. a Stored Notification. -* (_DynamoDb_) Provisioned Write Throughput for the Router table determines how +* (_DynamoDb (legacy)_) Provisioned Write Throughput for the Router table determines how many connections per second can be accepted across the entire cluster. -* (_DynamoDb_) Provisioned Read Throughput for the Router table **and** Provisioned +* (_DynamoDb (legacy)_) Provisioned Read Throughput for the Router table **and** Provisioned Write throughput for the Storage table determine maximum possible notifications per second that can be handled. In theory notification throughput can be higher than Provisioned Write Throughput on the Storage as connected clients will frequently not require using Storage at all. Read's to the Router table are still needed for every notification, whether Storage is hit or not. -* (_DynamoDb_) Provisioned Read Throughput on for the Storage table is an important +* (_DynamoDb (legacy)_) Provisioned Read Throughput on for the Storage table is an important factor in maximum notification throughput, as many slow clients may require frequent Storage checks. * If a client is reconnecting, their Router record will be old. Router diff --git a/docs/bigtable.md b/docs/src/bigtable-emulation.md similarity index 70% rename from docs/bigtable.md rename to docs/src/bigtable-emulation.md index 9709ab9c3..3e9b2007e 100644 --- a/docs/bigtable.md +++ b/docs/src/bigtable-emulation.md @@ -1,11 +1,19 @@ -# Using bigtable emulation for testing +# Bigtable Emulation -The bigtable emulator is memory only. No structure persists after +## Using Bigtable Emulation for Testing + +The Bigtable emulator is memory only. No structure persists after restart. -## Getting the SDK +You can also read the testing documentation for these steps. + +## Getting the Google CloudSDK & gcloud beta component + +See [the Google Cloud CLI installation page](https://cloud.google.com/sdk/docs/install#deb). -see [the Google Cloud CLI installation page](https://cloud.google.com/sdk/docs/install#deb) +Currently, for operations involving the Big Table emulator, this requires installation of the +beta component of the gcloud CLI. See the [documentation here](https://cloud.google.com/sdk/docs/components) +to install `gcloud beta` once the alpha CLI is installed. for docker: google/cloud-sdk:latest diff --git a/docs/src/http.md b/docs/src/http.md index c72c317ea..167741ae4 100644 --- a/docs/src/http.md +++ b/docs/src/http.md @@ -253,10 +253,18 @@ An example of the Authorization header would be: _The [VAPID Key](https://datatracker.ietf.org/doc/html/rfc8292#section-3.2) provided by the subscribing third party_ The VAPID key is optional and provides a way for an application server to voluntarily identify itself. -When the VAPID key is provided, autopush will return an endpoint that can only be used by the application server that provided the key. -**The VAPID key is formatted as a URL-safe Base64 encoded string with no padding.** +_*Please Note*_: While the VAPID key is optional, if it is included, the VAPID asserion block _must_ contain a `sub` field containing the publishing contact information as a vaild URI designator. (e.g. `mailto:admin+webpush@example.org` or `https://example.org/contact`). As an example, a minimal VAPID assertion block would contain: + +```json +{"aud": "https://updates.push.services.mozilla.com", "exp": 1725468595, "sub": "mailto:admin+webpush@example.com"} +``` + +Where `exp` and `sub` reflect the expiration time and publishing contact information. The contact information is used in case of an issue with use of the Push service and is never used for marketing purposes. + +When the VAPID key is provided, autopush will return an endpoint that can only be used by the application server that provided the key. +**The VAPID key is formatted as a URL-safe Base64 encoded string with no padding.** ## Calls diff --git a/docs/src/index.md b/docs/src/index.md index 0ca1c6324..cc34c4d16 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -70,12 +70,13 @@ it's recommended that you first familiarize yourself with * [Using a Local Storage Server](install.md#local-storage) * [Testing](testing.md) * [Testing Configuration](testing.md#testing-configuration) + * [Bigtable Google Cloud Emulation](bigtable-emulation.md) * [Running Tests](testing.md#running-tests) * [Firefox Testing](testing.md#firefox-testing) -* [Release Process](release.md) - * [Versions](release.md#versions) - * [Dev Releases](release.md#dev-releases) - * [Stage/Production Releases](release.md#stage-production-releases) +* [Release Process](releasing.md) + * [Versions](releasing.md#versions) + * [Dev Releases](releasing.md#dev-releases) + * [Stage/Production Releases](releasing.md#stage-production-releases) * [Coding Style Guide](style.md) * [Exceptions](style.md#exceptions) diff --git a/docs/src/install.md b/docs/src/install.md index 198b8e114..cd54f533e 100644 --- a/docs/src/install.md +++ b/docs/src/install.md @@ -86,9 +86,6 @@ available in the virtualenv `bin/` directory: | `endpoint_diagnostic` | Runs Endpoint diagnostics | | `autokey` | Endpoint encryption key generator | -If you are planning on using DynamoDB as your storage, you will need to have a [boto config -file](http://boto3.readthedocs.io/en/docs/guide/quickstart.html#configuration) -file or `AWS` environment keys setup. If you are planning on using Google Cloud Bigtable, you will need to configure your `GOOGLE_APPLICATION_CREDENTIALS`. See [How Application Default Credentials works](https://cloud.google.com/docs/authentication/application-default-credentials) @@ -123,37 +120,6 @@ This specifies the URL to the storage system to use. See following sections for **db_settings** This is a serialized JSON dictionary containing the storage specific settings. -## Using a Local DynamoDB Server - -Amazon supplies a [Local DynamoDB Java -server](http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tools.DynamoDBLocal.html) -to use for local testing that implements the complete DynamoDB API. This -is used for automated unit testing on Travis and can be used to run -autopush locally for testing. - -You will need the Java JDK 6.x or newer. - -To setup the server locally: - -``` bash -mkdir ddb -curl -sSL http://dynamodb-local.s3-website-us-west-2.amazonaws.com/dynamodb_local_latest.tar.gz | tar xzvC ddb/ -java -Djava.library.path=./ddb/DynamoDBLocal_lib -jar ./ddb/DynamoDBLocal.jar -sharedDb -inMemory -``` - -An example [boto config -file](http://boto3.readthedocs.io/en/docs/guide/quickstart.html#configuration) -is provided in `automock/boto.cfg` that directs autopush to your local -DynamoDB instance. - -The `db_dsn` to use for this would be the same as what is specified in the `AWS_LOCAL_DYNAMODB` environment variable. e.g. `http://127.0.0.1:8000` - -The `db_settings` contains a JSON dictionary indicating the names of the message and router table (remember to escape these values for whatever system you are using): - -```json -{"message_table":"message","router_table":"router"} -``` - ## Using Google Bigtable Emulator locally Google supplies [a Bigtable emulator](https://cloud.google.com/sdk/gcloud/reference/beta/emulators) as part of their free [SDK](https://cloud.google.com/sdk). Install the [Cloud CLI](https://cloud.google.com/sdk/docs/install), per their instructions, and then start the Bigtable emulator by running @@ -190,7 +156,7 @@ For example, if we were to use the values from the initializion script above (re {"message_family":"message","message_topic_family":"message_topic","router_family":"router","table_name":"projects/test/instances/test/tables/autopush"} ``` -## Using the "Dual" storage configuration +## Using the "Dual" storage configuration (legacy) Dual is a temporary system to be used to transition user data from one system to another. The "primary" system is read/write, while the "secondary" is read only, and is only read when a value is not found in the "primary" storage. diff --git a/docs/src/running.md b/docs/src/running.md index 2f06cce29..f7a081591 100644 --- a/docs/src/running.md +++ b/docs/src/running.md @@ -24,9 +24,7 @@ can be run on separate hosts as well. *#TODO* rebuild the docker-compose.yaml files based off of syncstorage ones. - [ ] rebuild docker-componse.yaml - - [ ] selectable for dynamodb, bigtable, dual - [ ] initialize tables - - [ ] - [] define steps here