diff --git a/.bazelrc b/.bazelrc
index fdae62c2..e3d6c35d 100644
--- a/.bazelrc
+++ b/.bazelrc
@@ -5,7 +5,7 @@ build --output_filter='^//((?!(third_party):).)*$'`
build --color=yes
build --@io_bazel_rules_docker//transitions:enable=false
build --workspace_status_command="bash tools/get_workspace_status"
-build --copt=-Werror=thread-safety-analysis
+build --copt=-Werror=thread-safety
build --config=clang
build --config=noexcept
# Disable some ROMA error checking
@@ -118,7 +118,7 @@ build:gcp_platform --@google_privacysandbox_servers_common//:platform=gcp
build:prod_mode --//:mode=prod
build:prod_mode --@google_privacysandbox_servers_common//:build_flavor=prod
-# --config prod_mode: builds the service in prod mode
+# --config nonprod_mode: builds the service in nonprod mode
build:nonprod_mode --//:mode=nonprod
build:nonprod_mode --@google_privacysandbox_servers_common//:build_flavor=non_prod
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index fe88324b..4bc1daa7 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -27,7 +27,7 @@ exclude: (?x)^(
fail_fast: true
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v4.5.0
+ rev: v4.6.0
hooks:
- id: end-of-file-fixer
- id: fix-byte-order-marker
@@ -60,12 +60,12 @@ repos:
exclude: ^(google_internal|builders/images)/.*$
- repo: https://github.com/bufbuild/buf
- rev: v1.29.0
+ rev: v1.31.0
hooks:
- id: buf-format
- repo: https://github.com/pre-commit/mirrors-clang-format
- rev: v17.0.6
+ rev: v18.1.4
hooks:
- id: clang-format
types_or:
@@ -119,7 +119,7 @@ repos:
)$
- repo: https://github.com/DavidAnson/markdownlint-cli2
- rev: v0.12.1
+ rev: v0.13.0
hooks:
- id: markdownlint-cli2
name: lint markdown
@@ -154,7 +154,7 @@ repos:
- --quiet
- repo: https://github.com/psf/black
- rev: 24.2.0
+ rev: 24.4.0
hooks:
- id: black
name: black python formatter
diff --git a/BUILD.bazel b/BUILD.bazel
index 6c8c9cb4..483d1d9a 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -19,8 +19,9 @@ load("@io_bazel_rules_go//go:def.bzl", "nogo")
package(default_visibility = ["//:__subpackages__"])
# Config settings to determine which platform the system will be built to run on
+# Use --config=VALUE_platform specified in .bazelrc instead of using this flag directly.
# Example:
-# bazel build components/... --//:platform=aws
+# bazel build components/... --config=aws_platform
string_flag(
name = "platform",
build_setting_default = "aws",
@@ -64,6 +65,10 @@ config_setting(
],
)
+# Config settings to determine which instance the system will be built to run on
+# Use --configVALUE_instance specified in .bazelrc instead of using this flag directly.
+# Example:
+# bazel build components/... --config=aws_instance
string_flag(
name = "instance",
build_setting_default = "aws",
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6699b468..07125613 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,84 @@
All notable changes to this project will be documented in this file. See [commit-and-tag-version](https://github.com/absolute-version/commit-and-tag-version) for commit guidelines.
+## 0.17.0 (2024-07-08)
+
+
+### Features
+
+* Add a set wrapper around bitset for storing uint32 values
+* Add a thread safe wrapper around hash map
+* Add b&a e2e test env
+* Add data loading support for uint32 sets
+* Add health check to AWS mesh.
+* Add hook for running set query using uint32 sets as input
+* Add interestGroupNames to V1 API
+* Add latency metrics for cache uint32 sets functions
+* Add latency without custom code execution metric
+* Add option to use existing network on AWS.
+* Add padding to responses
+* Add request log context to request context
+* Add runsetqueryint udf hook
+* Add set operation functions for bitsets
+* Add support for int32_t sets to key value cache
+* Add support for reading and writing int sets to csv files
+* Add udf hook for running int sets set query (local lookup)
+* Allow pas request to pass consented debug config and log context
+* Implement sharded RunSetQueryInt rpc for lookup client
+* Implement uint32 sets sharded lookup support
+* Load consented debug token from server parameter
+* Pass LogContext and ConsentedDebugConfig to internal lookup server in sharded case
+* Plumb the safe path log context in the cache update execution path
+* Set verbosity level for PS_VLOG
+* Simplify thread safe hash map and use a single map for node storage
+* Support uint32 sets for query parsing and evaluation
+* Support uint32 sets in InternalLookup rpc
+* Switch absl log for PS_LOG and PS_VLOG for unsafe code path
+* Switch absl log to PS_LOG for safe code path
+* Switch absl vlog to PS_VLOG for safe code path
+* Update AWS coordinators public prod endpoint from GG to G3P
+
+
+### Bug Fixes
+
+* Add missing include/library deps
+* Augment UDF loading info message
+* Correct copts build config.
+* Correct verbosity flag for gcp validator.
+* Effectively lock the key in the set map cleanup
+* Fix detached head of continuous e2e branch.
+* Properly initialize runSetQueryInt hook
+* Remove ignore interestGroupNames from envoy
+* Remove test filter to allow all unit tests run in the build
+* Simplify request context and pass it as shared pointer to the hooks
+* Upgrade common repo version
+* Use kms_binaries tar target from common repo
+* Use structured initializer for clarity
+
+
+### Dependencies
+
+* **deps:** Upgrade build-system to 0.62.0
+* **deps:** Upgrade data-plane-shared-libraries to 52239f15 2024-05-21
+* **deps:** Upgrade pre-commit hooks
+
+
+### GCP: Features
+
+* **GCP:** Switch to internal lb for the otlp collector
+* **GCP:** Switch to internal lb for the otlp collector with bug fixes
+
+
+### Documentation
+
+* Add debugging playbook
+* Correct commands for sample_word2vec getting_started example
+* KV onboarding guide
+* Update to the ads retrieval explainer
+* Update word2vec example
+* Use aws_platform bazel config
+* Use local_{platform,instance} bazel configs
+
## 0.16.0 (2024-04-05)
diff --git a/README.md b/README.md
index d1632784..fcf6a733 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,16 @@
+> [!IMPORTANT]
+>
+> The `main` branch hosts live code with latest changes. It is unstable and is used for development.
+> It is suitable for contribution and inspection of the latest code. The `release-*` branches are
+> stable releases that can be used to build and deploy the system.
+
+---
+
> FLEDGE has been renamed to Protected Audience API. To learn more about the name change, see the
> [blog post](https://privacysandbox.com/intl/en_us/news/protected-audience-api-our-new-name-for-fledge).
+---
+
# ![Privacy Sandbox Logo](docs/assets/privacy_sandbox_logo.png) FLEDGE Key/Value service
# Background
@@ -40,31 +50,234 @@ moment, to load data, instead of calling the mutation API, you would place the d
location that can be directly read by the server. See more details in the
[data loading guide](/docs/data_loading/loading_data.md).
-Currently, this service can be deployed to 1 region of your choice with more regions to be added
-soon. Monitoring and alerts are currently unavailable.
+Currently, this service can be deployed to 1 region of your choice. Multi-region configuration is up
+to the service owner to configure.
+
+## Current features
+
+### Build and deployment
+
+- Source code is available on Github
+- Releases are done on a regular basis
+- Binaries can be built from source code
+ - C++ binary
+ - [AWS & GCP] Docker container image
+ - [AWS]: Nitro EIF
+ - [AWS]: Reference AMI
+ - Other tools
+- Server can run as a standalone local process for testing without any cloud dependency or
+ TEE-related functionality
+- Server can be deployed to AWS Nitro enclave
+- Server can be deployed to GCP Confidential Space
+- Reference terraform available for a clean and comprehensive deployment to AWS or GCP
+ - Clean: assumes the cloud environment has no preset infrastructure
+ - Comprehensive: deploys all dependencies and some recommended (but not necessarily required)
+ configuration
+ - Many server behaviors can be configured by parameter flags without having to rebuild
+
+### Data loading
+
+- Server loads key/value data from cloud file storage
+- Server loads key/value data from cloud pub/sub services
+- Server loads data into an in-RAM representation for query serving
+- Server continuously monitors for new data and incrementally updates ("delta files") the in-RAM
+ representation
+- Support independent data ingestion pipelining by monitoring directories in cloud file storage
+ independently
+- Supports Flatbuffers as the data event format
+- Supports Avro and Riegeli as the data file format
+- Supports snapshot files for faster server start up
+- Users can perform compactions of delta files into snapshot files in an offline path
+
+### Read request processing
+
+- Support Protected Audience Key Value Server query spec: can be used as a BYOS server to serve
+ requests from Chrome
+- Support simple key value lookups for queries
+- Users can write "user defined functions" to execute custom logic to process queries
+- User defined functions can be written in JavaScript or WASM
+- User defined functions can call "GetValues" to look up key value from the dataset
+
+### Advanced features
+
+- Set-as-a-value is supported
+ - A key "value" pair in the dataset can be a key and a set of values
+- UDF can call "RunQuery" API to run set operations on sets (intersection, union, difference)
+- For GCP, Terraform supports deploying into an existing VPC, such as used by the Bidding and
+ Auction services Non-prod Server logs are persisted after server shutdown
+- Data can be sharded and different servers may load and serve different shards (subset) of the
+ dataset.
+- Sharding supports data locality, where the operator specifies "sharding key" for key value pairs
+ so different key value pairs can have the same sharding key.
+
+## **Timeline and roadmap**
+
+The following sections include the timelines for the Trusted Key Value Server for Protected
+Auctions. Protected Auctions refer to Protected Audiences and Protected App Signals ad targeting
+products.
+
+### **Timelines**
+
+
+
+
+
+
+ |
+ Beta testing
+ |
+ General availability
+ |
+ Enforcement
+ |
+
+
+ For Protected Audience
+
+(web browser on desktop)
+ |
+ July 2024
+
+The Privacy-Sandbox-provided Key Value Server implementation can
+
+- run as a BYOS KV server
+
- support production scale traffic and common functionalities
+
+ |
+ Q4 2024
+
+Opt-in TEE mode will be available to the Adtechs. Opt-in guidance will be published in early Q4 2024.
+ |
+ No sooner than Q3 2025
+ |
+
+
+
+
+
+
+ |
+ Beta testing
+ |
+ General availability
+ |
+
+
+ For Protected Audience
+
+(With Bidding & Auction services for Chrome or Android)
+ |
+ July 2024
+
+The Privacy-Sandbox-provided Key Value Server implementation can be used with the Bidding and Auction services and
+
+- run as a BYOS KV server
+
- support production scale traffic and common functionalities
+
+ |
+ Dec 2024
+
+The Privacy-Sandbox-provided Key Value Server implementation can be used with the Bidding and Auction services and adtechs can opt-in TEE mode
+ |
+
+
+ For Protected App Signals
+ |
+ June 2024
+
+The Privacy-Sandbox-provided Key Value Server implementation supports Ad retrieval server functionality and protected communication for live traffic testing
+ |
+ Dec 2024
+
+The implementation supports live traffic at scale
+ |
+
+
+
+
+
+### **Roadmap**
+
+#### June 2024 Beta release
+
+##### Deployment and Setup
+
+- For AWS, Terraform supports deploying into an existing VPC, such as the one that is used by the
+ Bidding and Auction services
+- Internal load balancer is used for servers to send metrics to OpenTelemetry collector
+ - In v0.16, the communication goes through a public load balancer
+
+##### Integration with the Bidding & Auction services
+
+- The Bidding and Auction services can send encrypted requests to the Key Value Server for
+ Protected App Signals
+
+##### Debugging support
+
+- [Consented Debugging](https://github.com/privacysandbox/protected-auction-services-docs/blob/main/debugging_protected_audience_api_services.md#adtech-consented-debugging)
+ is supported
+- Diagnose tool to check the cloud environment to warn for potential setup errors before the
+ system is deployed
+- Operational playbook
+- Introduction of unsafe metrics
+ - Unsafe metrics have privacy protections such as differential privacy noises
+ - More metrics for comprehensive SLO monitoring
+
+##### Runtime features
+
+- Data loading error handling
+ - The system can be configured to use different error handling strategy for different dataset
+
+##### Performance/Cost
+
+- Benchmarking tools
+- Cost explainer
+- Sets-as-values will switch to using bitsets to represent sets for faster RunQuery performance.
+
+##### Support process
+
+- Commitment to support window for active releases
+
+#### Q4 2024 Chrome-PA GA
+
+##### Chrome integration
+
+- Update to V2 protocol to support the hybrid mode of BYOS & Opt-in TEE
+- Chrome and Key Value server can communicate in the updated V2 protocol
+- Chrome can send full publisher URL to TEE KV server under V2 protocol
+
+#### H2 2024 Android-PA GA, PAS GA
+
+##### User Defined Functions
+
+- UDF can perform Key/Value lookup asynchronously
+- Flags can be passed from the server parameters into UDF
+- One Key Value Server system can be used for multiple use cases. Multiple UDFs can be loaded.
+ Different UDF can be selected based on the request type.
+- Canaring support for UDF: canary version UDF can be staged in machines with specific tags.
+
+##### Customization support
+
+- First class support for customization of the system (without violating the trust model)
+
+##### Debugging support
-> **Attention**: The Key/Value Server is publicly queryable. It does not authenticate callers. That
-> is also true for the product end state. It is recommended to only store data you do not mind seen
-> by other entities.
+- Diagnose tool to collect standard and necessary debug information for troubleshooting requests
-## How to use this repo
+##### Documentation
-The `main` branch hosts live code with latest changes. It is unstable and is used for development.
-It is suitable for contribution and inspection of the latest code. The `release-*` branches are
-stable releases that can be used to build and deploy the system.
+- Complete end to end example as a template to set up the service
## Breaking changes
-This codebase right now is in a very early stage. We expect frequent updates that may not be fully
-backward compatible.
+While we make efforts to not introduce breaking changes, we expect that to happen occasionally.
The release version follows the `[major change]-[minor change]-[patch]` scheme. All 0.x.x versions
may contain breaking changes without notice. Refer to the [release changelog](/CHANGELOG.md) for the
details of the breaking changes.
-Once the codebase is in a more stable state that is version 1.0.0, we will establish additional
-channels for announcing breaking changes and major version will always be incremented for breaking
-changes.
+At GA the version will become 1.0.0, we will establish additional channels for announcing breaking
+changes and major version will always be incremented for breaking changes.
# Key documents
diff --git a/WORKSPACE b/WORKSPACE
index da073316..2813868a 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -13,11 +13,11 @@ python_deps("//builders/bazel")
http_archive(
name = "google_privacysandbox_servers_common",
- # commit b34fe82 2024-04-03
- sha256 = "2afc7017723efb9d34b6ed713be03dbdf9b45de8ba585d2ea314eb3a52903d0a",
- strip_prefix = "data-plane-shared-libraries-b34fe821b982e06446df617edb7a6e3041c8b0db",
+ # commit 34445c1 2024-07-01
+ sha256 = "ce300bc178b1eedd88d7545b89d1d672b3b9bfb62c138ab3f4a845f159436285",
+ strip_prefix = "data-plane-shared-libraries-37522d6ac55c8592060f636d68f50feddcb9598a",
urls = [
- "https://github.com/privacysandbox/data-plane-shared-libraries/archive/b34fe821b982e06446df617edb7a6e3041c8b0db.zip",
+ "https://github.com/privacysandbox/data-plane-shared-libraries/archive/37522d6ac55c8592060f636d68f50feddcb9598a.zip",
],
)
diff --git a/builders/.github/workflows/scorecard.yaml b/builders/.github/workflows/scorecard.yaml
index dbcb6200..22bd7f8e 100644
--- a/builders/.github/workflows/scorecard.yaml
+++ b/builders/.github/workflows/scorecard.yaml
@@ -26,14 +26,14 @@ on:
- cron: '35 10 * * 4'
push:
branches:
- - main
+ - main
# Declare default permissions as read only.
permissions: read-all
jobs:
analysis:
- name: Scorecard analysis
+ name: OpenSSF Scorecard analysis
runs-on: ubuntu-latest
permissions:
# Needed to upload the results to code-scanning dashboard.
@@ -46,12 +46,12 @@ jobs:
steps:
- name: Checkout code
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0
+ uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with:
persist-credentials: false
- name: Run analysis
- uses: ossf/scorecard-action@e38b1902ae4f44df626f11ba0734b14fb91f8f86 # v2.1.2
+ uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1
with:
results_file: results.sarif
results_format: sarif
@@ -73,7 +73,7 @@ jobs:
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: Upload artifact
- uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # v3.1.0
+ uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
with:
name: SARIF file
path: results.sarif
@@ -81,6 +81,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: Upload to code-scanning
- uses: github/codeql-action/upload-sarif@17573ee1cc1b9d061760f3a006fc4aac4f944fd5 # v2.2.4
+ uses: github/codeql-action/upload-sarif@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9
with:
sarif_file: results.sarif
diff --git a/builders/.pre-commit-config.yaml b/builders/.pre-commit-config.yaml
index c7997b3e..e1ad672d 100644
--- a/builders/.pre-commit-config.yaml
+++ b/builders/.pre-commit-config.yaml
@@ -21,7 +21,7 @@ exclude: (?x)^(
fail_fast: true
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v4.4.0
+ rev: v4.6.0
hooks:
- id: end-of-file-fixer
- id: fix-byte-order-marker
@@ -47,7 +47,7 @@ repos:
- id: shellcheck
- repo: https://github.com/pre-commit/mirrors-clang-format
- rev: v16.0.6
+ rev: v18.1.4
hooks:
- id: clang-format
types_or:
@@ -55,7 +55,7 @@ repos:
- c
- repo: https://github.com/bufbuild/buf
- rev: v1.23.1
+ rev: v1.31.0
hooks:
- id: buf-format
@@ -109,7 +109,7 @@ repos:
- markdown
- repo: https://github.com/DavidAnson/markdownlint-cli2
- rev: v0.8.1
+ rev: v0.13.0
hooks:
- id: markdownlint-cli2
name: lint markdown
@@ -144,7 +144,7 @@ repos:
- --quiet
- repo: https://github.com/psf/black
- rev: 23.7.0
+ rev: 24.4.2
hooks:
- id: black
name: black python formatter
diff --git a/builders/CHANGELOG.md b/builders/CHANGELOG.md
index e323d8a3..3ced1112 100644
--- a/builders/CHANGELOG.md
+++ b/builders/CHANGELOG.md
@@ -2,6 +2,65 @@
All notable changes to this project will be documented in this file. See [commit-and-tag-version](https://github.com/absolute-version/commit-and-tag-version) for commit guidelines.
+## 0.62.0 (2024-05-10)
+
+
+### Features
+
+* Add --dir flag to normalize-dist
+
+## 0.61.1 (2024-05-10)
+
+
+### Bug Fixes
+
+* Add docker flags to container name
+* Set 8h ttl for long-running build container
+
+## 0.61.0 (2024-05-08)
+
+
+### Features
+
+* Add cbuild support for container reuse
+
+## 0.60.0 (2024-05-07)
+
+
+### Dependencies
+
+* **deps:** Upgrade coverage-tools to ubuntu 24.04
+* **deps:** Upgrade golang to 1.22.2
+
+## 0.59.0 (2024-05-02)
+
+
+### Bug Fixes
+
+* **deps:** Update pre-commit hooks
+
+
+### Dependencies
+
+* **deps:** Upgrade alpine base image
+* **deps:** Upgrade base images for Amazon Linux
+* **deps:** Upgrade grpcurl to 1.9.1
+* **deps:** Upgrade presubmit to ubuntu 24.04
+
+## 0.58.0 (2024-04-26)
+
+
+### Features
+
+* add missing AWS env variable for CodeBuild
+
+## 0.57.1 (2024-03-28)
+
+
+### Bug Fixes
+
+* Upgrade OpenSSF scorecard GitHub Action
+
## 0.57.0 (2024-03-10)
diff --git a/builders/images/build-amazonlinux2/Dockerfile b/builders/images/build-amazonlinux2/Dockerfile
index bb6c1edc..6ccce805 100644
--- a/builders/images/build-amazonlinux2/Dockerfile
+++ b/builders/images/build-amazonlinux2/Dockerfile
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-FROM amazonlinux:2.0.20230822.0
+FROM amazonlinux:2.0.20240412.0
COPY /install_apps install_golang_apps install_go.sh generate_system_bazelrc .bazelversion /scripts/
COPY get_workspace_mount /usr/local/bin
diff --git a/builders/images/build-amazonlinux2/install_apps b/builders/images/build-amazonlinux2/install_apps
index a5e6ff35..43fa2166 100755
--- a/builders/images/build-amazonlinux2/install_apps
+++ b/builders/images/build-amazonlinux2/install_apps
@@ -22,14 +22,8 @@ while [[ $# -gt 0 ]]; do
VERBOSE=1
shift
;;
- -h | --help)
- usage 0
- break
- ;;
- *)
- usage
- break
- ;;
+ -h | --help) usage 0 ;;
+ *) usage ;;
esac
done
diff --git a/builders/images/build-amazonlinux2023/Dockerfile b/builders/images/build-amazonlinux2023/Dockerfile
index d24ba9af..268bbcba 100644
--- a/builders/images/build-amazonlinux2023/Dockerfile
+++ b/builders/images/build-amazonlinux2023/Dockerfile
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-FROM amazonlinux:2023.1.20230825.0
+FROM amazonlinux:2023.4.20240416.0
COPY /install_apps install_golang_apps install_go.sh generate_system_bazelrc .bazelversion /scripts/
COPY get_workspace_mount /usr/local/bin
diff --git a/builders/images/build-amazonlinux2023/install_apps b/builders/images/build-amazonlinux2023/install_apps
index 51c64377..442021c4 100755
--- a/builders/images/build-amazonlinux2023/install_apps
+++ b/builders/images/build-amazonlinux2023/install_apps
@@ -22,14 +22,8 @@ while [[ $# -gt 0 ]]; do
VERBOSE=1
shift
;;
- -h | --help)
- usage 0
- break
- ;;
- *)
- usage
- break
- ;;
+ -h | --help) usage 0 ;;
+ *) usage ;;
esac
done
diff --git a/builders/images/build-debian/Dockerfile b/builders/images/build-debian/Dockerfile
index eb370730..735d9b66 100644
--- a/builders/images/build-debian/Dockerfile
+++ b/builders/images/build-debian/Dockerfile
@@ -18,7 +18,7 @@ ARG BASE_IMAGE=ubuntu:20.04
# hadolint ignore=DL3006
FROM ${BASE_IMAGE} as libprofiler-builder
ENV CC=clang \
- CXX=clang
+ CXX=clang++
ADD https://github.com/gperftools/gperftools/releases/download/gperftools-2.13/gperftools-2.13.tar.gz /build/gperftools.tar.gz
ADD https://apt.llvm.org/llvm.sh /build/llvm.sh
COPY compile_libprofiler /scripts/
diff --git a/builders/images/build-debian/compile_libprofiler b/builders/images/build-debian/compile_libprofiler
index f05397fb..bcc15a68 100755
--- a/builders/images/build-debian/compile_libprofiler
+++ b/builders/images/build-debian/compile_libprofiler
@@ -24,6 +24,7 @@ function install_clang() {
/build/llvm.sh ${CLANG_VER}
apt-get --quiet install -y --no-install-recommends libc++-${CLANG_VER}-dev
update-alternatives --install /usr/bin/clang clang /usr/bin/clang-${CLANG_VER} 100
+ update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-${CLANG_VER} 100
rm -f llvm.sh
clang --version
diff --git a/builders/images/coverage-tools/Dockerfile b/builders/images/coverage-tools/Dockerfile
index c2f25551..6915b8d2 100644
--- a/builders/images/coverage-tools/Dockerfile
+++ b/builders/images/coverage-tools/Dockerfile
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-FROM ubuntu:20.04
+FROM ubuntu:24.04
COPY install_apps /scripts/
diff --git a/builders/images/coverage-tools/install_apps b/builders/images/coverage-tools/install_apps
index 72fd822a..5f8713b8 100755
--- a/builders/images/coverage-tools/install_apps
+++ b/builders/images/coverage-tools/install_apps
@@ -33,8 +33,8 @@ function apt_update() {
function install_misc() {
DEBIAN_FRONTEND=noninteractive apt-get --quiet install -y --no-install-recommends \
- lcov="1.*" \
- google-perftools="2.*"
+ google-perftools="2.*" \
+ lcov="2.*"
}
function clean_debian() {
diff --git a/builders/images/generate_system_bazelrc b/builders/images/generate_system_bazelrc
index 9c8e419f..b68d9405 100755
--- a/builders/images/generate_system_bazelrc
+++ b/builders/images/generate_system_bazelrc
@@ -22,14 +22,12 @@ while [[ $# -gt 0 ]]; do
case "$1" in
--user-root-name)
USER_ROOT_NAME="$2"
- shift
- shift
+ shift 2 || usage
;;
-h | --help) usage 0 ;;
*)
printf "unrecognized arg: %s\n" "$1"
usage
- break
;;
esac
done
diff --git a/builders/images/install_go.sh b/builders/images/install_go.sh
index a436d20a..dc30cd73 100644
--- a/builders/images/install_go.sh
+++ b/builders/images/install_go.sh
@@ -22,12 +22,12 @@ function _golang_install_dir() {
function install_golang() {
declare -r _ARCH="$1"
declare -r FNAME=gobin.tar.gz
- declare -r VERSION=1.20.4
+ declare -r VERSION=1.22.2
# shellcheck disable=SC2155
declare -r GO_INSTALL_DIR="$(_golang_install_dir)"
declare -r -A GO_HASHES=(
- [amd64]="698ef3243972a51ddb4028e4a1ac63dc6d60821bf18e59a807e051fee0a385bd"
- [arm64]="105889992ee4b1d40c7c108555222ca70ae43fccb42e20fbf1eebb822f5e72c6"
+ [amd64]="5901c52b7a78002aeff14a21f93e0f064f74ce1360fce51c6ee68cd471216a17"
+ [arm64]="36e720b2d564980c162a48c7e97da2e407dfcc4239e1e58d98082dfa2486a0c1"
)
declare -r GO_HASH=${GO_HASHES[${_ARCH}]}
if [[ -z ${GO_HASH} ]]; then
diff --git a/builders/images/install_golang_apps b/builders/images/install_golang_apps
index 9d75eb94..a6bb3cfd 100755
--- a/builders/images/install_golang_apps
+++ b/builders/images/install_golang_apps
@@ -23,10 +23,7 @@ while [[ $# -gt 0 ]]; do
shift
;;
-h | --help) usage 0 ;;
- *)
- usage
- break
- ;;
+ *) usage ;;
esac
done
diff --git a/builders/images/presubmit/Dockerfile b/builders/images/presubmit/Dockerfile
index 024c05fe..53ee3778 100644
--- a/builders/images/presubmit/Dockerfile
+++ b/builders/images/presubmit/Dockerfile
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-FROM ubuntu:20.04
+FROM ubuntu:24.04
COPY install_apps install_go.sh .pre-commit-config.yaml /scripts/
COPY gitconfig /etc
diff --git a/builders/images/presubmit/install_apps b/builders/images/presubmit/install_apps
index c0d8d0d8..3986ec65 100755
--- a/builders/images/presubmit/install_apps
+++ b/builders/images/presubmit/install_apps
@@ -51,18 +51,17 @@ function apt_update() {
function install_packages() {
DEBIAN_FRONTEND=noninteractive apt-get --quiet install -y --no-install-recommends \
- apt-transport-https="2.0.*" \
+ apt-transport-https="2.7.*" \
ca-certificates \
- libcurl4="7.68.*" \
- curl="7.68.*" \
- gnupg="2.2.*" \
- lsb-release="11.1.*" \
+ libcurl4t64="8.5.*" \
+ curl="8.5.*" \
+ lsb-release="12.0*" \
openjdk-11-jre="11.0.*" \
- python3.9-venv="3.9.*" \
- shellcheck="0.7.*" \
+ python3.12-venv="3.12.*" \
+ shellcheck="0.9.*" \
software-properties-common="0.99.*" \
- wget="1.20.*"
- update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 100
+ wget="1.21.*"
+ update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.12 100
}
# Install Docker (https://docs.docker.com/engine/install/debian/)
@@ -81,9 +80,9 @@ function install_docker() {
}
function install_precommit() {
- /usr/bin/python3.9 -m venv "${PRE_COMMIT_VENV_DIR}"
+ /usr/bin/python3.12 -m venv "${PRE_COMMIT_VENV_DIR}"
"${PRE_COMMIT_VENV_DIR}"/bin/pip install \
- pre-commit~=3.1 \
+ pre-commit~=3.7 \
pylint~=3.1.0
"${PRE_COMMIT_TOOL}" --version
diff --git a/builders/images/test-tools/Dockerfile b/builders/images/test-tools/Dockerfile
index 45fab3cf..69399eb4 100644
--- a/builders/images/test-tools/Dockerfile
+++ b/builders/images/test-tools/Dockerfile
@@ -12,14 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-FROM alpine:3.18 as slowhttptest_builder
+FROM alpine:3.19 as slowhttptest_builder
# hadolint ignore=DL3018
RUN apk add --no-cache autoconf automake build-base git openssl-dev
WORKDIR /build
ADD https://github.com/shekyan/slowhttptest/archive/refs/tags/v1.9.0.tar.gz /build/src.tar.gz
RUN tar xz --strip-components 1 -f src.tar.gz && ./configure && make
-FROM alpine:3.18 as wrk_builder
+FROM alpine:3.19 as wrk_builder
ARG TARGETARCH
ENV BUILD_ARCH="${TARGETARCH}"
COPY build_wrk /build/
@@ -27,14 +27,14 @@ WORKDIR /build
ADD https://github.com/giltene/wrk2/archive/44a94c17d8e6a0bac8559b53da76848e430cb7a7.tar.gz /build/src.tar.gz
RUN /build/build_wrk
-FROM golang:1.21-alpine3.18 AS golang
+FROM golang:1.22-alpine3.19 AS golang
ENV GOBIN=/usr/local/go/bin
COPY build_golang_apps /scripts/
RUN /scripts/build_golang_apps
-FROM fullstorydev/grpcurl:v1.8.9-alpine AS grpcurl
+FROM fullstorydev/grpcurl:v1.9.1-alpine AS grpcurl
-FROM alpine:3.18
+FROM alpine:3.19
COPY --from=golang /usr/local/go/bin/* /usr/local/bin/
COPY --from=grpcurl /bin/grpcurl /usr/local/bin/
ARG TARGETARCH
diff --git a/builders/images/utils/Dockerfile b/builders/images/utils/Dockerfile
index 0d1defd6..ea4bee81 100644
--- a/builders/images/utils/Dockerfile
+++ b/builders/images/utils/Dockerfile
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-FROM alpine:3.16
+FROM alpine:3.19
RUN apk --no-cache add \
unzip~=6.0 \
diff --git a/builders/tests/data/hashes/build-amazonlinux2 b/builders/tests/data/hashes/build-amazonlinux2
index 7aed7b0f..71f56dfb 100644
--- a/builders/tests/data/hashes/build-amazonlinux2
+++ b/builders/tests/data/hashes/build-amazonlinux2
@@ -1 +1 @@
-3efa00f3a5dbe0a4708be523aa32aca91dcd56d403d3ff32e0202756b8321b3b
+57ca4f2381a0fc193b0476663171c4d339b6ef66c0d1f1c24bb3f48d368b38ab
diff --git a/builders/tests/data/hashes/build-amazonlinux2023 b/builders/tests/data/hashes/build-amazonlinux2023
index 1bcc412e..5fe991bf 100644
--- a/builders/tests/data/hashes/build-amazonlinux2023
+++ b/builders/tests/data/hashes/build-amazonlinux2023
@@ -1 +1 @@
-57396ff1c765f7b63905963cfe4498912f7f75b5cb9f7bc36bd6879af69872e7
+8d01333fe93d2ac2102dd8360a58717724b7b594d51fe4e412ec20aae181efce
diff --git a/builders/tests/data/hashes/build-debian b/builders/tests/data/hashes/build-debian
index c89114f2..57095aed 100644
--- a/builders/tests/data/hashes/build-debian
+++ b/builders/tests/data/hashes/build-debian
@@ -1 +1 @@
-38cc8a23a6a56eb6567bef3685100cd3be1c0491dcc8b953993c42182da3fa40
+c194dafd287978093f8fe6e16e981fb22028e37345e20a4d7ca84caa43f0d4c0
diff --git a/builders/tests/data/hashes/coverage-tools b/builders/tests/data/hashes/coverage-tools
index f0336331..e0127b80 100644
--- a/builders/tests/data/hashes/coverage-tools
+++ b/builders/tests/data/hashes/coverage-tools
@@ -1 +1 @@
-cd3fb189dd23793af3bdfa02d6774ccb35bddbec7059761e25c4f7be4c1e8ca1
+b768060d602e2ed1b60573edfa6afad5379e96a9d6153cd721b2a0665075fe98
diff --git a/builders/tests/data/hashes/presubmit b/builders/tests/data/hashes/presubmit
index a35c6c86..b02b21b0 100644
--- a/builders/tests/data/hashes/presubmit
+++ b/builders/tests/data/hashes/presubmit
@@ -1 +1 @@
-d9dab1c798d51f79e68fd8eb3bb83312086808d789bbc09d0f2dbf708ef5f114
+afaf1932764d07d480c4e833e6b08877f069abae87401bdac4782277c535a298
diff --git a/builders/tests/data/hashes/test-tools b/builders/tests/data/hashes/test-tools
index fc5e0b5c..63f4e4bd 100644
--- a/builders/tests/data/hashes/test-tools
+++ b/builders/tests/data/hashes/test-tools
@@ -1 +1 @@
-dd1ec6137d4dd22fec555044cd85f484adfa6c7b686880ea5449cff936bad34e
+c1111c91dcb1e9f4df65f9fd5eab60b2545b0e716cfaf59fb88c1006a6496a5e
diff --git a/builders/tests/data/hashes/utils b/builders/tests/data/hashes/utils
index da29b2fa..188febac 100644
--- a/builders/tests/data/hashes/utils
+++ b/builders/tests/data/hashes/utils
@@ -1 +1 @@
-9fca27d931acc2bc96fa0560466cc0914a0d1cc73fb8749af057caacf2911f85
+f4b8d15b26c7bef3bc94038be9b71aaf8ba8ba8d33663b7d6fb55ebdff9a902e
diff --git a/builders/tests/run-tests b/builders/tests/run-tests
index 037067d2..b7c1a7f9 100755
--- a/builders/tests/run-tests
+++ b/builders/tests/run-tests
@@ -19,18 +19,20 @@
set -o pipefail
set -o errexit
-trap _cleanup EXIT
+# shellcheck disable=SC2317
function _cleanup() {
- local -r -i STATUS=$?
- if [[ -d ${TMP_HASHES_DIR1} ]]; then
+ local -r -i _status=$?
+ if [[ -d "${TMP_HASHES_DIR1}" ]]; then
rm -rf "${TMP_HASHES_DIR1}" "${TMP_HASHES_DIR2}"
fi
- if [[ ${STATUS} -ne 0 ]]; then
- printf "Error: run-tests status code: %d\n" "${STATUS}"
+ if [[ ${_status} -ne 0 ]]; then
+ printf "Error: run-tests status code: %d\n" "${_status}"
sleep 5s
fi
- exit ${STATUS}
+ # shellcheck disable=SC2086
+ exit ${_status}
}
+trap _cleanup EXIT
function get_image_list() {
local -r _images_dir="$1"
diff --git a/builders/tools/builder.sh b/builders/tools/builder.sh
index 77317aae..d2ca2f98 100644
--- a/builders/tools/builder.sh
+++ b/builders/tools/builder.sh
@@ -112,6 +112,7 @@ function builder::add_aws_env_vars() {
"AWS_REGION"
"AWS_DEFAULT_REGION"
"AWS_PROFILE"
+ "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
)
}
diff --git a/builders/tools/cbuild b/builders/tools/cbuild
index c40a3aed..17da1336 100755
--- a/builders/tools/cbuild
+++ b/builders/tools/cbuild
@@ -77,6 +77,8 @@ declare -i WITH_DOCKER_SOCK=1
declare -i WITH_CMD_PROFILER=0
DOCKER_NETWORK="${DOCKER_NETWORK:-bridge}"
declare -i DOCKER_SECCOMP_UNCONFINED=0
+declare -i KEEP_CONTAINER_RUNNING=0
+declare LONG_RUNNING_CONTAINER_TIMEOUT=8h
while [[ $# -gt 0 ]]; do
case "$1" in
@@ -94,6 +96,9 @@ while [[ $# -gt 0 ]]; do
;;
--image)
IMAGE="$2"
+ if [[ ${IMAGE} =~ ^build-* ]]; then
+ KEEP_CONTAINER_RUNNING=1
+ fi
shift 2 || usage
;;
--without-shared-cache)
@@ -171,12 +176,14 @@ if [[ ${PWD_WORKSPACE_REL_PATH:0:1} != / ]]; then
fi
readonly WORKDIR
-declare -a DOCKER_RUN_ARGS
-DOCKER_RUN_ARGS+=(
+# DOCKER_EXEC_RUN_ARGS applies to both `docker run` and `docker exec`
+declare -a DOCKER_EXEC_RUN_ARGS=(
+ "--workdir=${WORKDIR}"
+)
+declare -a DOCKER_RUN_ARGS=(
"--rm"
"--entrypoint=/bin/bash"
"--volume=${WORKSPACE_MOUNT}:/src/workspace"
- "--workdir=${WORKDIR}"
"--network=${DOCKER_NETWORK}"
"$(echo "${EXTRA_DOCKER_RUN_ARGS}" | envsubst)"
)
@@ -200,53 +207,118 @@ fi
readonly BAZEL_ROOT=/bazel_root
if [[ ${WITH_SHARED_CACHE} -eq 0 ]]; then
# use tmpfs for as temporary, container-bound bazel cache
- DOCKER_RUN_ARGS+=(
- "--tmpfs ${BAZEL_ROOT}:exec"
- )
+ DOCKER_RUN_ARGS+=("--tmpfs=${BAZEL_ROOT}:exec")
else
# mount host filesystem for "shared" use by multiple docker container invocations
- DOCKER_RUN_ARGS+=(
- "--volume ${HOME}/.cache/bazel:${BAZEL_ROOT}"
- )
+ DOCKER_RUN_ARGS+=("--volume=${HOME}/.cache/bazel:${BAZEL_ROOT}")
fi
if [[ ${WITH_DOCKER_SOCK} -eq 1 ]]; then
- DOCKER_RUN_ARGS+=(
- "--volume /var/run/docker.sock:/var/run/docker.sock"
- )
+ DOCKER_RUN_ARGS+=("--volume=/var/run/docker.sock:/var/run/docker.sock")
fi
for evar in "${ENV_VARS[@]}"
do
- DOCKER_RUN_ARGS+=(
- "--env=${evar}"
- )
+ DOCKER_EXEC_RUN_ARGS+=("--env=${evar}")
done
if [[ -t 0 ]] && [[ -t 1 ]]; then
# stdin and stdout are open, assume it's an interactive tty session
- DOCKER_RUN_ARGS+=(
- --interactive
- --tty
+ DOCKER_EXEC_RUN_ARGS+=(
+ "--interactive"
+ "--tty"
)
fi
+function get_container_name() {
+ local -r mount="$(echo "${WORKSPACE_MOUNT}" | sha256sum)"
+ local -r image_sha="${IMAGE_TAGGED##*-}"
+ local -r docker_args_sha="$({
+cat </dev/stderr
+ docker container rm --force "${name}" >/dev/null
+ printf "finished removing docker container: %s\n" "${name}" &>/dev/stderr
+ fi
+ docker "${docker_args[@]}" --filter "status=running"
+}
+
+function long_running_container() {
+ local -r container_name="$1"
+ local -r docker_running_container="$(running_container_for "${container_name}")"
+ if [[ -z ${docker_running_container} ]]; then
+ printf "starting a new container [%s]\n" "${container_name}" &>/dev/stderr
+ if [[ -z ${CMD} ]]; then
+ # shellcheck disable=SC2068
+ docker run \
+ ${DOCKER_RUN_ARGS[@]} \
+ "${DOCKER_EXEC_RUN_ARGS[@]}" \
+ "${IMAGE_TAGGED}"
+ else
+ # shellcheck disable=SC2068
+ docker run \
+ ${DOCKER_RUN_ARGS[@]} \
+ "${DOCKER_EXEC_RUN_ARGS[@]}" \
+ --detach \
+ "${IMAGE_TAGGED}" \
+ --login -c "
+declare -i -r pid=\$(bazel info server_pid 2>/dev/null)
+# wait for pid, even if it's not a child process of this shell
+timeout ${LONG_RUNNING_CONTAINER_TIMEOUT} tail --pid=\${pid} -f /dev/null
+" &>/dev/null
+ fi
+ fi
+ running_container_for "${DOCKER_CONTAINER_NAME}"
+}
+
+if [[ ${KEEP_CONTAINER_RUNNING} -eq 1 ]]; then
+ DOCKER_RUNNING_CONTAINER="$(long_running_container "${DOCKER_CONTAINER_NAME}")"
+ docker exec \
+ "${DOCKER_EXEC_RUN_ARGS[@]}" \
+ "${DOCKER_RUNNING_CONTAINER}" \
+ /bin/bash -c "${CMD}"
else
- # shellcheck disable=SC2068
- docker run \
- ${DOCKER_RUN_ARGS[@]} \
- "${IMAGE_TAGGED}" \
- --login -c "${CMD}"
+ if [[ -z ${CMD} ]]; then
+ # shellcheck disable=SC2068
+ docker run \
+ ${DOCKER_RUN_ARGS[@]} \
+ "${DOCKER_EXEC_RUN_ARGS[@]}" \
+ "${IMAGE_TAGGED}" \
+ --login
+ elif [[ ${WITH_CMD_PROFILER} -eq 1 ]]; then
+ # shellcheck disable=SC2068
+ docker run \
+ ${DOCKER_RUN_ARGS[@]} \
+ "${DOCKER_EXEC_RUN_ARGS[@]}" \
+ "${IMAGE_TAGGED}" \
+ --login -c "'${TOOLS_RELDIR}'/normalize-bazel-symlinks; env \${CMD_PROFILER} ${CMD}"
+ else
+ # shellcheck disable=SC2068
+ docker run \
+ ${DOCKER_RUN_ARGS[@]} \
+ "${DOCKER_EXEC_RUN_ARGS[@]}" \
+ "${IMAGE_TAGGED}" \
+ --login -c "$CMD"
+ fi
fi
diff --git a/builders/tools/normalize-dist b/builders/tools/normalize-dist
index 93627d25..cfa86e11 100755
--- a/builders/tools/normalize-dist
+++ b/builders/tools/normalize-dist
@@ -19,13 +19,40 @@
set -o pipefail
set -o errexit
+declare TOP_LEVEL_DIR=dist
+
+function usage() {
+ local exitval=${1-1}
+ cat &>/dev/stderr <
+ --dir directory to normalize recursively. Default: ${TOP_LEVEL_DIR}
+USAGE
+ # shellcheck disable=SC2086
+ exit ${exitval}
+}
+
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --dir)
+ TOP_LEVEL_DIR="$2"
+ shift 2 || usage
+ ;;
+ -h | --help) usage 0 ;;
+ *)
+ printf "unrecognized arg: %s\n" "$1"
+ usage
+ ;;
+ esac
+done
+
trap _cleanup EXIT
function _cleanup() {
local -r -i STATUS=$?
if [[ ${STATUS} -eq 0 ]]; then
- printf "normalize-dist completed successfully\n" &>/dev/stderr
+ printf "normalize-dist [%s] completed successfully\n" "${TOP_LEVEL_DIR}" &>/dev/stderr
else
- printf "Error: normalize-dist completed with status code: %s\n" "${STATUS}" &>/dev/stderr
+ printf "Error: normalize-dist [%s] completed with status code: %s\n" "${TOP_LEVEL_DIR}" "${STATUS}" &>/dev/stderr
fi
exit 0
}
@@ -40,9 +67,7 @@ readonly GROUP
USER="$(builder::id u)"
readonly USER
-readonly TOP_LEVEL_DIRS="dist"
-
-printf "Setting file ownership [%s], group [%s] in dirs [%s]\n" "${USER}" "${GROUP}" "${TOP_LEVEL_DIRS}"
+printf "Setting file ownership [%s], group [%s] in dirs [%s]\n" "${USER}" "${GROUP}" "${TOP_LEVEL_DIR}"
declare -a runner=()
if [[ -f /.dockerenv ]]; then
runner+=(bash -c)
@@ -51,11 +76,9 @@ else
fi
"${runner[@]}" "
-for TOP_LEVEL_DIR in ${TOP_LEVEL_DIRS}; do
- find \${TOP_LEVEL_DIR} -type f ! -executable -exec chmod 644 {} \;
- find \${TOP_LEVEL_DIR} -type f -executable -exec chmod 755 {} \;
- find \${TOP_LEVEL_DIR} -type d -exec chmod 755 {} \;
- chgrp --recursive ${GROUP} \${TOP_LEVEL_DIR}
- chown --recursive ${USER} \${TOP_LEVEL_DIR}
-done
+find ${TOP_LEVEL_DIR} -type f ! -executable -exec chmod 644 {} \;
+find ${TOP_LEVEL_DIR} -type f -executable -exec chmod 755 {} \;
+find ${TOP_LEVEL_DIR} -type d -exec chmod 755 {} \;
+chgrp --recursive ${GROUP} ${TOP_LEVEL_DIR}
+chown --recursive ${USER} ${TOP_LEVEL_DIR}
"
diff --git a/builders/tools/terraform b/builders/tools/terraform
index 71a3e708..6cfc214c 100755
--- a/builders/tools/terraform
+++ b/builders/tools/terraform
@@ -82,4 +82,5 @@ DOCKER_RUN_ARGS+=(
# shellcheck disable=SC2068
docker run \
"${DOCKER_RUN_ARGS[@]}" \
- ${IMAGE_TAGGED} "$@"
+ "${IMAGE_TAGGED}" \
+ "$@"
diff --git a/builders/version.txt b/builders/version.txt
index 78756de3..7e9253a3 100644
--- a/builders/version.txt
+++ b/builders/version.txt
@@ -1 +1 @@
-0.57.0
\ No newline at end of file
+0.62.0
\ No newline at end of file
diff --git a/components/cloud_config/BUILD.bazel b/components/cloud_config/BUILD.bazel
index 8cd15ad6..d8ef7895 100644
--- a/components/cloud_config/BUILD.bazel
+++ b/components/cloud_config/BUILD.bazel
@@ -37,6 +37,7 @@ cc_library(
"@com_google_absl//absl/status",
"@com_google_absl//absl/status:statusor",
"@com_google_absl//absl/strings",
+ "@google_privacysandbox_servers_common//src/logger:request_context_logger",
],
)
@@ -55,6 +56,7 @@ cc_library(
"@com_google_absl//absl/status",
"@com_google_absl//absl/status:statusor",
"@com_google_absl//absl/strings",
+ "@google_privacysandbox_servers_common//src/logger:request_context_logger",
"@google_privacysandbox_servers_common//src/public/core/interface:errors",
"@google_privacysandbox_servers_common//src/public/cpio/interface/parameter_client",
],
@@ -123,6 +125,7 @@ cc_library(
"@com_google_absl//absl/log",
"@com_google_absl//absl/status",
"@com_google_absl//absl/status:statusor",
+ "@google_privacysandbox_servers_common//src/logger:request_context_logger",
],
)
@@ -163,6 +166,7 @@ cc_library(
"@com_google_absl//absl/status",
"@com_google_absl//absl/status:statusor",
"@com_google_absl//absl/strings",
+ "@google_privacysandbox_servers_common//src/logger:request_context_logger",
"@google_privacysandbox_servers_common//src/util/status_macro:status_macros",
],
)
diff --git a/components/cloud_config/instance_client.h b/components/cloud_config/instance_client.h
index 8d55733e..69e35de4 100644
--- a/components/cloud_config/instance_client.h
+++ b/components/cloud_config/instance_client.h
@@ -21,6 +21,7 @@
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
+#include "src/logger/request_context_logger.h"
// TODO: Replace config cpio client once ready
namespace kv_server {
@@ -55,7 +56,10 @@ using DescribeInstanceGroupInput =
// Client to perform instance-specific operations.
class InstanceClient {
public:
- static std::unique_ptr Create();
+ static std::unique_ptr Create(
+ privacy_sandbox::server_common::log::PSLogContext& log_context =
+ const_cast(
+ privacy_sandbox::server_common::log::kNoOpContext));
virtual ~InstanceClient() = default;
// Retrieves all tags for the current instance and returns the tag with the
@@ -89,6 +93,12 @@ class InstanceClient {
// Retrieves descriptive information about the given instances.
virtual absl::StatusOr> DescribeInstances(
const absl::flat_hash_set& instance_ids) = 0;
+
+ // Updates the log context reference to enable otel logging for instance
+ // client. This function should be called after telemetry is initialized with
+ // retrieved parameters.
+ virtual void UpdateLogContext(
+ privacy_sandbox::server_common::log::PSLogContext& log_context) = 0;
};
} // namespace kv_server
diff --git a/components/cloud_config/instance_client_aws.cc b/components/cloud_config/instance_client_aws.cc
index 0191e891..64709b20 100644
--- a/components/cloud_config/instance_client_aws.cc
+++ b/components/cloud_config/instance_client_aws.cc
@@ -186,18 +186,19 @@ class AwsInstanceClient : public InstanceClient {
std::string_view lifecycle_hook_name) override {
const absl::StatusOr instance_id = GetInstanceId();
if (!instance_id.ok()) {
- LOG(ERROR) << "Failed to get instance_id: " << instance_id.status();
+ PS_LOG(ERROR, log_context_)
+ << "Failed to get instance_id: " << instance_id.status();
return instance_id.status();
}
- LOG(INFO) << "Retrieved instance id: " << *instance_id;
+ PS_LOG(INFO, log_context_) << "Retrieved instance id: " << *instance_id;
const absl::StatusOr auto_scaling_group_name =
GetAutoScalingGroupName(*auto_scaling_client_, *instance_id);
if (!auto_scaling_group_name.ok()) {
return auto_scaling_group_name.status();
}
- LOG(INFO) << "Retrieved auto scaling group name "
- << *auto_scaling_group_name;
+ PS_LOG(INFO, log_context_)
+ << "Retrieved auto scaling group name " << *auto_scaling_group_name;
Aws::AutoScaling::Model::RecordLifecycleActionHeartbeatRequest request;
request.SetAutoScalingGroupName(*auto_scaling_group_name);
@@ -216,18 +217,19 @@ class AwsInstanceClient : public InstanceClient {
std::string_view lifecycle_hook_name) override {
const absl::StatusOr instance_id = GetInstanceId();
if (!instance_id.ok()) {
- LOG(ERROR) << "Failed to get instance_id: " << instance_id.status();
+ PS_LOG(ERROR, log_context_)
+ << "Failed to get instance_id: " << instance_id.status();
return instance_id.status();
}
- LOG(INFO) << "Retrieved instance id: " << *instance_id;
+ PS_LOG(INFO, log_context_) << "Retrieved instance id: " << *instance_id;
const absl::StatusOr auto_scaling_group_name =
GetAutoScalingGroupName(*auto_scaling_client_, *instance_id);
if (!auto_scaling_group_name.ok()) {
return auto_scaling_group_name.status();
}
- LOG(INFO) << "Retrieved auto scaling group name "
- << *auto_scaling_group_name;
+ PS_LOG(INFO, log_context_)
+ << "Retrieved auto scaling group name " << *auto_scaling_group_name;
Aws::AutoScaling::Model::CompleteLifecycleActionRequest request;
request.SetAutoScalingGroupName(*auto_scaling_group_name);
@@ -329,7 +331,13 @@ class AwsInstanceClient : public InstanceClient {
return instances;
}
- AwsInstanceClient()
+ void UpdateLogContext(
+ privacy_sandbox::server_common::log::PSLogContext& log_context) override {
+ log_context_ = log_context;
+ }
+
+ AwsInstanceClient(
+ privacy_sandbox::server_common::log::PSLogContext& log_context)
: ec2_client_(std::make_unique()),
// EC2MetadataClient does not fall back to the default client
// configuration, needs to specify it to
@@ -338,21 +346,24 @@ class AwsInstanceClient : public InstanceClient {
ec2_metadata_client_(std::make_unique(
Aws::Client::ClientConfiguration())),
auto_scaling_client_(
- std::make_unique()) {}
+ std::make_unique()),
+ log_context_(log_context) {}
private:
std::unique_ptr ec2_client_;
std::unique_ptr ec2_metadata_client_;
std::unique_ptr auto_scaling_client_;
std::string machine_id_;
+ privacy_sandbox::server_common::log::PSLogContext& log_context_;
absl::StatusOr GetTag(std::string tag) {
absl::StatusOr instance_id = GetInstanceId();
if (!instance_id.ok()) {
- LOG(ERROR) << "Failed to get instance_id: " << instance_id.status();
+ PS_LOG(ERROR, log_context_)
+ << "Failed to get instance_id: " << instance_id.status();
return instance_id;
}
- LOG(INFO) << "Retrieved instance id: " << *instance_id;
+ PS_LOG(INFO, log_context_) << "Retrieved instance id: " << *instance_id;
Aws::EC2::Model::Filter resource_id_filter;
resource_id_filter.SetName(kResourceIdFilter);
resource_id_filter.AddValues(*instance_id);
@@ -363,18 +374,20 @@ class AwsInstanceClient : public InstanceClient {
Aws::EC2::Model::DescribeTagsRequest request;
request.SetFilters({resource_id_filter, key_filter});
- LOG(INFO) << "Sending Aws::EC2::Model::DescribeTagsRequest to get tag: "
- << tag;
+ PS_LOG(INFO, log_context_)
+ << "Sending Aws::EC2::Model::DescribeTagsRequest to get tag: " << tag;
const auto outcome = ec2_client_->DescribeTags(request);
if (!outcome.IsSuccess()) {
- LOG(ERROR) << "Failed to get tag: " << outcome.GetError();
+ PS_LOG(ERROR, log_context_)
+ << "Failed to get tag: " << outcome.GetError();
return AwsErrorToStatus(outcome.GetError());
}
if (outcome.GetResult().GetTags().size() != 1) {
const std::string error_msg = absl::StrCat(
"Could not get tag ", tag, " for instance ", *instance_id);
- LOG(ERROR) << error_msg << "; Retrieved "
- << outcome.GetResult().GetTags().size() << " tags";
+ PS_LOG(ERROR, log_context_)
+ << error_msg << "; Retrieved " << outcome.GetResult().GetTags().size()
+ << " tags";
return absl::NotFoundError(error_msg);
}
return outcome.GetResult().GetTags()[0].GetValue();
@@ -383,8 +396,9 @@ class AwsInstanceClient : public InstanceClient {
} // namespace
-std::unique_ptr InstanceClient::Create() {
- return std::make_unique();
+std::unique_ptr InstanceClient::Create(
+ privacy_sandbox::server_common::log::PSLogContext& log_context) {
+ return std::make_unique(log_context);
}
} // namespace kv_server
diff --git a/components/cloud_config/instance_client_gcp.cc b/components/cloud_config/instance_client_gcp.cc
index 4839204e..5ad5f919 100644
--- a/components/cloud_config/instance_client_gcp.cc
+++ b/components/cloud_config/instance_client_gcp.cc
@@ -54,7 +54,6 @@ using google::cmrt::sdk::instance_service::v1::
using ::google::scp::core::ExecutionResult;
using ::google::scp::core::errors::GetErrorMessage;
using google::scp::cpio::InstanceClientInterface;
-using google::scp::cpio::InstanceClientOptions;
namespace compute = ::google::cloud::compute_instances_v1;
@@ -86,9 +85,10 @@ InstanceServiceStatus GetInstanceServiceStatus(const std::string& status) {
class GcpInstanceClient : public InstanceClient {
public:
- GcpInstanceClient()
- : instance_client_(google::scp::cpio::InstanceClientFactory::Create(
- InstanceClientOptions())) {
+ GcpInstanceClient(
+ privacy_sandbox::server_common::log::PSLogContext& log_context)
+ : instance_client_(google::scp::cpio::InstanceClientFactory::Create()),
+ log_context_(log_context) {
instance_client_->Init();
}
@@ -120,7 +120,7 @@ class GcpInstanceClient : public InstanceClient {
absl::Status RecordLifecycleHeartbeat(
std::string_view lifecycle_hook_name) override {
- LOG(INFO) << "Record lifecycle heartbeat.";
+ PS_LOG(INFO, log_context_) << "Record lifecycle heartbeat.";
return absl::OkStatus();
}
@@ -162,7 +162,7 @@ class GcpInstanceClient : public InstanceClient {
std::string_view lifecycle_hook_name) override {
PS_RETURN_IF_ERROR(SetInitializedLabel())
<< "Error setting the initialized label";
- LOG(INFO) << "Complete lifecycle.";
+ PS_LOG(INFO, log_context_) << "Complete lifecycle.";
return absl::OkStatus();
}
@@ -227,6 +227,11 @@ class GcpInstanceClient : public InstanceClient {
return std::vector{InstanceInfo{.id = *id}};
}
+ void UpdateLogContext(
+ privacy_sandbox::server_common::log::PSLogContext& log_context) override {
+ log_context_ = log_context;
+ }
+
private:
absl::Status GetInstanceDetails() {
absl::StatusOr resource_name =
@@ -239,13 +244,15 @@ class GcpInstanceClient : public InstanceClient {
GetInstanceDetailsByResourceNameRequest request;
request.set_instance_resource_name(resource_name.value());
- const auto& result = instance_client_->GetInstanceDetailsByResourceName(
+ absl::Status status = instance_client_->GetInstanceDetailsByResourceName(
std::move(request),
[&done, this](
const ExecutionResult& result,
const GetInstanceDetailsByResourceNameResponse& response) {
if (result.Successful()) {
- VLOG(2) << response.DebugString();
+ // TODO(b/342614468): Temporarily turn off this vlog until
+ // verbosity setting API in the common repo is fixed
+ // PS_VLOG(2, log_context_) << response.DebugString();
instance_id_ =
std::string{response.instance_details().instance_id()};
environment_ =
@@ -253,36 +260,35 @@ class GcpInstanceClient : public InstanceClient {
shard_number_ =
response.instance_details().labels().at(kShardNumberLabel);
} else {
- LOG(ERROR) << "Failed to get instance details: "
- << GetErrorMessage(result.status_code);
+ PS_LOG(ERROR, log_context_) << "Failed to get instance details: "
+ << GetErrorMessage(result.status_code);
}
done.Notify();
});
done.WaitForNotification();
- return result.Successful()
- ? absl::OkStatus()
- : absl::InternalError(GetErrorMessage(result.status_code));
+ return status;
}
absl::StatusOr GetResourceName(
std::unique_ptr& instance_client) {
std::string resource_name;
absl::Notification done;
- const auto& result = instance_client->GetCurrentInstanceResourceName(
+ absl::Status status = instance_client->GetCurrentInstanceResourceName(
GetCurrentInstanceResourceNameRequest(),
[&](const ExecutionResult& result,
const GetCurrentInstanceResourceNameResponse& response) {
if (result.Successful()) {
resource_name = std::string{response.instance_resource_name()};
} else {
- LOG(ERROR) << "Failed to get instance resource name: "
- << GetErrorMessage(result.status_code);
+ PS_LOG(ERROR, log_context_)
+ << "Failed to get instance resource name: "
+ << GetErrorMessage(result.status_code);
}
done.Notify();
});
- if (!result.Successful()) {
- return absl::InternalError(GetErrorMessage(result.status_code));
+ if (!status.ok()) {
+ return status;
}
done.WaitForNotification();
if (resource_name.empty()) {
@@ -299,10 +305,12 @@ class GcpInstanceClient : public InstanceClient {
std::unique_ptr instance_client_;
compute::InstancesClient client_ =
compute::InstancesClient(compute::MakeInstancesConnectionRest());
+ privacy_sandbox::server_common::log::PSLogContext& log_context_;
};
} // namespace
-std::unique_ptr InstanceClient::Create() {
- return std::make_unique();
+std::unique_ptr InstanceClient::Create(
+ privacy_sandbox::server_common::log::PSLogContext& log_context) {
+ return std::make_unique(log_context);
}
} // namespace kv_server
diff --git a/components/cloud_config/instance_client_local.cc b/components/cloud_config/instance_client_local.cc
index 32a30f19..a66ab089 100644
--- a/components/cloud_config/instance_client_local.cc
+++ b/components/cloud_config/instance_client_local.cc
@@ -30,6 +30,9 @@ namespace {
class LocalInstanceClient : public InstanceClient {
public:
+ explicit LocalInstanceClient(
+ privacy_sandbox::server_common::log::PSLogContext& log_context)
+ : log_context_(log_context) {}
absl::StatusOr GetEnvironmentTag() override {
return absl::GetFlag(FLAGS_environment);
}
@@ -40,13 +43,13 @@ class LocalInstanceClient : public InstanceClient {
absl::Status RecordLifecycleHeartbeat(
std::string_view lifecycle_hook_name) override {
- LOG(INFO) << "Record lifecycle heartbeat.";
+ PS_LOG(INFO, log_context_) << "Record lifecycle heartbeat.";
return absl::OkStatus();
}
absl::Status CompleteLifecycle(
std::string_view lifecycle_hook_name) override {
- LOG(INFO) << "Complete lifecycle.";
+ PS_LOG(INFO, log_context_) << "Complete lifecycle.";
return absl::OkStatus();
}
@@ -75,12 +78,21 @@ class LocalInstanceClient : public InstanceClient {
}
return std::vector{InstanceInfo{.id = *id}};
}
+
+ void UpdateLogContext(
+ privacy_sandbox::server_common::log::PSLogContext& log_context) override {
+ log_context_ = log_context;
+ }
+
+ private:
+ privacy_sandbox::server_common::log::PSLogContext& log_context_;
};
} // namespace
-std::unique_ptr InstanceClient::Create() {
- return std::make_unique();
+std::unique_ptr InstanceClient::Create(
+ privacy_sandbox::server_common::log::PSLogContext& log_context) {
+ return std::make_unique(log_context);
}
} // namespace kv_server
diff --git a/components/cloud_config/parameter_client.h b/components/cloud_config/parameter_client.h
index 0b6be921..b14e039c 100644
--- a/components/cloud_config/parameter_client.h
+++ b/components/cloud_config/parameter_client.h
@@ -19,6 +19,7 @@
#include
#include "absl/status/statusor.h"
+#include "src/logger/request_context_logger.h"
// TODO: Replace config cpio client once ready
namespace kv_server {
@@ -32,7 +33,10 @@ class ParameterClient {
};
static std::unique_ptr Create(
- ClientOptions client_options = ClientOptions());
+ ClientOptions client_options = ClientOptions(),
+ privacy_sandbox::server_common::log::PSLogContext& log_context =
+ const_cast(
+ privacy_sandbox::server_common::log::kNoOpContext));
virtual ~ParameterClient() = default;
@@ -45,6 +49,12 @@ class ParameterClient {
virtual absl::StatusOr GetBoolParameter(
std::string_view parameter_name) const = 0;
+
+ // Updates the log context reference to enable otel logging for parameter
+ // client. This function should be called after telemetry is initialized with
+ // retrieved parameters.
+ virtual void UpdateLogContext(
+ privacy_sandbox::server_common::log::PSLogContext& log_context) = 0;
};
} // namespace kv_server
diff --git a/components/cloud_config/parameter_client_aws.cc b/components/cloud_config/parameter_client_aws.cc
index bb13143a..9a3cc563 100644
--- a/components/cloud_config/parameter_client_aws.cc
+++ b/components/cloud_config/parameter_client_aws.cc
@@ -41,25 +41,27 @@ class AwsParameterClient : public ParameterClient {
absl::StatusOr GetParameter(
std::string_view parameter_name,
std::optional default_value = std::nullopt) const override {
- LOG(INFO) << "Getting parameter: " << parameter_name;
+ PS_LOG(INFO, log_context_) << "Getting parameter: " << parameter_name;
Aws::SSM::Model::GetParameterRequest request;
request.SetName(std::string(parameter_name));
const auto outcome = ssm_client_->GetParameter(request);
if (!outcome.IsSuccess()) {
if (default_value.has_value()) {
- LOG(WARNING) << "Unable to get parameter: " << parameter_name
- << " with error: " << outcome.GetError()
- << ", returning default value: " << *default_value;
+ PS_LOG(WARNING, log_context_)
+ << "Unable to get parameter: " << parameter_name
+ << " with error: " << outcome.GetError()
+ << ", returning default value: " << *default_value;
return *default_value;
} else {
- LOG(ERROR) << "Unable to get parameter: " << parameter_name
- << " with error: " << outcome.GetError();
+ PS_LOG(ERROR, log_context_)
+ << "Unable to get parameter: " << parameter_name
+ << " with error: " << outcome.GetError();
}
return AwsErrorToStatus(outcome.GetError());
}
std::string result = outcome.GetResult().GetParameter().GetValue();
- LOG(INFO) << "Got parameter: " << parameter_name
- << " with value: " << result;
+ PS_LOG(INFO, log_context_)
+ << "Got parameter: " << parameter_name << " with value: " << result;
return result;
};
@@ -79,7 +81,7 @@ class AwsParameterClient : public ParameterClient {
const std::string error =
absl::StrFormat("Failed converting %s parameter: %s to int32.",
parameter_name, *parameter);
- LOG(ERROR) << error;
+ PS_LOG(ERROR, log_context_) << error;
return absl::InvalidArgumentError(error);
}
@@ -102,15 +104,22 @@ class AwsParameterClient : public ParameterClient {
const std::string error =
absl::StrFormat("Failed converting %s parameter: %s to bool.",
parameter_name, *parameter);
- LOG(ERROR) << error;
+ PS_LOG(ERROR, log_context_) << error;
return absl::InvalidArgumentError(error);
}
return parameter_bool;
};
- explicit AwsParameterClient(ParameterClient::ClientOptions client_options)
- : client_options_(std::move(client_options)) {
+ void UpdateLogContext(
+ privacy_sandbox::server_common::log::PSLogContext& log_context) override {
+ log_context_ = log_context;
+ }
+
+ explicit AwsParameterClient(
+ ParameterClient::ClientOptions client_options,
+ privacy_sandbox::server_common::log::PSLogContext& log_context)
+ : client_options_(std::move(client_options)), log_context_(log_context) {
if (client_options.client_for_unit_testing_ != nullptr) {
ssm_client_.reset(
(Aws::SSM::SSMClient*)client_options.client_for_unit_testing_);
@@ -122,13 +131,16 @@ class AwsParameterClient : public ParameterClient {
private:
ClientOptions client_options_;
std::unique_ptr ssm_client_;
+ privacy_sandbox::server_common::log::PSLogContext& log_context_;
};
} // namespace
std::unique_ptr ParameterClient::Create(
- ParameterClient::ClientOptions client_options) {
- return std::make_unique(std::move(client_options));
+ ParameterClient::ClientOptions client_options,
+ privacy_sandbox::server_common::log::PSLogContext& log_context) {
+ return std::make_unique(std::move(client_options),
+ log_context);
}
} // namespace kv_server
diff --git a/components/cloud_config/parameter_client_aws_test.cc b/components/cloud_config/parameter_client_aws_test.cc
index 72a128e2..5880de85 100644
--- a/components/cloud_config/parameter_client_aws_test.cc
+++ b/components/cloud_config/parameter_client_aws_test.cc
@@ -37,7 +37,7 @@ class MockSsmClient : public ::Aws::SSM::SSMClient {
};
class ParameterClientAwsTest : public ::testing::Test {
- protected:
+ private:
PlatformInitializer initializer_;
};
diff --git a/components/cloud_config/parameter_client_gcp.cc b/components/cloud_config/parameter_client_gcp.cc
index a5aee5e4..e416f22f 100644
--- a/components/cloud_config/parameter_client_gcp.cc
+++ b/components/cloud_config/parameter_client_gcp.cc
@@ -44,7 +44,10 @@ using google::scp::cpio::ParameterClientOptions;
class GcpParameterClient : public ParameterClient {
public:
- explicit GcpParameterClient(ParameterClient::ClientOptions client_options) {
+ explicit GcpParameterClient(
+ ParameterClient::ClientOptions client_options,
+ privacy_sandbox::server_common::log::PSLogContext& log_context)
+ : log_context_(log_context) {
if (client_options.client_for_unit_testing_ == nullptr) {
parameter_client_ =
ParameterClientFactory::Create(ParameterClientOptions());
@@ -52,39 +55,31 @@ class GcpParameterClient : public ParameterClient {
parameter_client_.reset(std::move(
(ParameterClientInterface*)client_options.client_for_unit_testing_));
}
- auto execution_result = parameter_client_->Init();
- CHECK(execution_result.Successful())
- << "Cannot init parameter client!"
- << GetErrorMessage(execution_result.status_code);
- execution_result = parameter_client_->Run();
- CHECK(execution_result.Successful())
- << "Cannot run parameter client!"
- << GetErrorMessage(execution_result.status_code);
+ CHECK_OK(parameter_client_->Init());
+ CHECK_OK(parameter_client_->Run());
}
~GcpParameterClient() {
- auto execution_result = parameter_client_->Stop();
- if (!execution_result.Successful()) {
- LOG(ERROR) << "Cannot stop parameter client!"
- << GetErrorMessage(execution_result.status_code);
+ if (absl::Status status = parameter_client_->Stop(); !status.ok()) {
+ PS_LOG(ERROR, log_context_) << "Cannot stop parameter client!" << status;
}
}
absl::StatusOr GetParameter(
std::string_view parameter_name,
std::optional default_value = std::nullopt) const override {
- LOG(INFO) << "Getting parameter: " << parameter_name;
+ PS_LOG(INFO, log_context_) << "Getting parameter: " << parameter_name;
GetParameterRequest get_parameter_request;
get_parameter_request.set_parameter_name(parameter_name);
std::string param_value;
absl::BlockingCounter counter(1);
- auto execution_result = parameter_client_->GetParameter(
+ absl::Status status = parameter_client_->GetParameter(
std::move(get_parameter_request),
- [¶m_value, &counter](const ExecutionResult result,
- GetParameterResponse response) {
+ [¶m_value, &counter, &log_context = log_context_](
+ const ExecutionResult result, GetParameterResponse response) {
if (!result.Successful()) {
- LOG(ERROR) << "GetParameter failed: "
- << GetErrorMessage(result.status_code);
+ PS_LOG(ERROR, log_context) << "GetParameter failed: "
+ << GetErrorMessage(result.status_code);
} else {
param_value = response.parameter_value() != "EMPTY_STRING"
? response.parameter_value()
@@ -94,21 +89,21 @@ class GcpParameterClient : public ParameterClient {
counter.DecrementCount();
});
counter.Wait();
- if (!execution_result.Successful()) {
- auto status =
- absl::UnavailableError(GetErrorMessage(execution_result.status_code));
+ if (!status.ok()) {
if (default_value.has_value()) {
- LOG(WARNING) << "Unable to get parameter: " << parameter_name
- << " with error: " << status
- << ", returning default value: " << *default_value;
+ PS_LOG(WARNING, log_context_)
+ << "Unable to get parameter: " << parameter_name
+ << " with error: " << status
+ << ", returning default value: " << *default_value;
return *default_value;
}
- LOG(ERROR) << "Unable to get parameter: " << parameter_name
- << " with error: " << status;
+ PS_LOG(ERROR, log_context_)
+ << "Unable to get parameter: " << parameter_name
+ << " with error: " << status;
return status;
}
- LOG(INFO) << "Got parameter: " << parameter_name
- << " with value: " << param_value;
+ PS_LOG(INFO, log_context_) << "Got parameter: " << parameter_name
+ << " with value: " << param_value;
return param_value;
}
@@ -123,7 +118,7 @@ class GcpParameterClient : public ParameterClient {
const std::string error =
absl::StrFormat("Failed converting %s parameter: %s to int32.",
parameter_name, *parameter);
- LOG(ERROR) << error;
+ PS_LOG(ERROR, log_context_) << error;
return absl::InvalidArgumentError(error);
}
@@ -143,22 +138,30 @@ class GcpParameterClient : public ParameterClient {
const std::string error =
absl::StrFormat("Failed converting %s parameter: %s to bool.",
parameter_name, *parameter);
- LOG(ERROR) << error;
+ PS_LOG(ERROR, log_context_) << error;
return absl::InvalidArgumentError(error);
}
return parameter_bool;
}
+ void UpdateLogContext(
+ privacy_sandbox::server_common::log::PSLogContext& log_context) override {
+ log_context_ = log_context;
+ }
+
private:
std::unique_ptr parameter_client_;
+ privacy_sandbox::server_common::log::PSLogContext& log_context_;
};
} // namespace
std::unique_ptr ParameterClient::Create(
- ParameterClient::ClientOptions client_options) {
- return std::make_unique(std::move(client_options));
+ ParameterClient::ClientOptions client_options,
+ privacy_sandbox::server_common::log::PSLogContext& log_context) {
+ return std::make_unique(std::move(client_options),
+ log_context);
}
} // namespace kv_server
diff --git a/components/cloud_config/parameter_client_gcp_test.cc b/components/cloud_config/parameter_client_gcp_test.cc
index 1f81c585..d87a95de 100644
--- a/components/cloud_config/parameter_client_gcp_test.cc
+++ b/components/cloud_config/parameter_client_gcp_test.cc
@@ -56,13 +56,12 @@ class ParameterClientGcpTest : public ::testing::Test {
std::unique_ptr mock_parameter_client =
std::make_unique();
EXPECT_CALL(*mock_parameter_client, Init)
- .WillOnce(Return(SuccessExecutionResult()));
- EXPECT_CALL(*mock_parameter_client, Run)
- .WillOnce(Return(SuccessExecutionResult()));
+ .WillOnce(Return(absl::OkStatus()));
+ EXPECT_CALL(*mock_parameter_client, Run).WillOnce(Return(absl::OkStatus()));
EXPECT_CALL(*mock_parameter_client, GetParameter)
.WillRepeatedly(
[this](GetParameterRequest get_param_req,
- Callback callback) -> ExecutionResult {
+ Callback callback) -> absl::Status {
// async reading parameter like the real case
bool param_not_found = false;
if (expected_param_values_.find(get_param_req.parameter_name()) ==
@@ -83,11 +82,9 @@ class ParameterClientGcpTest : public ::testing::Test {
cb(SuccessExecutionResult(), response);
}
}));
- if (param_not_found) {
- return FailureExecutionResult(5);
- } else {
- return SuccessExecutionResult();
- }
+ return param_not_found
+ ? absl::NotFoundError("Parameter not found.")
+ : absl::OkStatus();
});
ParameterClient::ClientOptions client_options;
diff --git a/components/cloud_config/parameter_client_local.cc b/components/cloud_config/parameter_client_local.cc
index ea8bf5f4..c051a649 100644
--- a/components/cloud_config/parameter_client_local.cc
+++ b/components/cloud_config/parameter_client_local.cc
@@ -57,6 +57,8 @@ ABSL_FLAG(std::int32_t, logging_verbosity_level, 0,
"Loggging verbosity level.");
ABSL_FLAG(absl::Duration, udf_timeout, absl::Seconds(5),
"Timeout for one UDF invocation");
+ABSL_FLAG(absl::Duration, udf_update_timeout, absl::Seconds(30),
+ "Timeout for UDF code update");
ABSL_FLAG(int32_t, udf_min_log_level, 0,
"Minimum logging level for UDFs. Info=0, Warn=1, Error=2. Default is "
"0(info).");
@@ -67,6 +69,8 @@ ABSL_FLAG(std::string, data_loading_prefix_allowlist, "",
"Allowlist for blob prefixes.");
ABSL_FLAG(bool, add_missing_keys_v1, false,
"Whether to add missing keys for v1.");
+ABSL_FLAG(bool, enable_consented_log, false, "Whether to enable consented log");
+ABSL_FLAG(std::string, consented_debug_token, "", "Consented debug token");
namespace kv_server {
namespace {
@@ -79,7 +83,9 @@ namespace {
// if there's a better way.
class LocalParameterClient : public ParameterClient {
public:
- LocalParameterClient() {
+ LocalParameterClient(
+ privacy_sandbox::server_common::log::PSLogContext& log_context)
+ : log_context_(log_context) {
string_flag_values_.insert(
{"kv-server-local-directory", absl::GetFlag(FLAGS_delta_directory)});
string_flag_values_.insert({"kv-server-local-data-bucket-id",
@@ -95,6 +101,8 @@ class LocalParameterClient : public ParameterClient {
string_flag_values_.insert(
{"kv-server-local-data-loading-blob-prefix-allowlist",
absl::GetFlag(FLAGS_data_loading_prefix_allowlist)});
+ string_flag_values_.insert({"kv-server-local-consented-debug-token",
+ absl::GetFlag(FLAGS_consented_debug_token)});
// Insert more string flag values here.
int32_t_flag_values_.insert(
@@ -127,6 +135,9 @@ class LocalParameterClient : public ParameterClient {
int32_t_flag_values_.insert(
{"kv-server-local-udf-timeout-millis",
absl::ToInt64Milliseconds(absl::GetFlag(FLAGS_udf_timeout))});
+ int32_t_flag_values_.insert(
+ {"kv-server-local-udf-update-timeout-millis",
+ absl::ToInt64Milliseconds(absl::GetFlag(FLAGS_udf_update_timeout))});
int32_t_flag_values_.insert({"kv-server-local-udf-min-log-level",
absl::GetFlag(FLAGS_udf_min_log_level)});
// Insert more int32 flag values here.
@@ -140,6 +151,8 @@ class LocalParameterClient : public ParameterClient {
bool_flag_values_.insert({"kv-server-local-use-sharding-key-regex", false});
bool_flag_values_.insert({"kv-server-local-enable-otel-logger",
absl::GetFlag(FLAGS_enable_otel_logger)});
+ bool_flag_values_.insert({"kv-server-local-enable-consented-log",
+ absl::GetFlag(FLAGS_enable_consented_log)});
// Insert more bool flag values here.
}
@@ -177,17 +190,24 @@ class LocalParameterClient : public ParameterClient {
}
}
+ void UpdateLogContext(
+ privacy_sandbox::server_common::log::PSLogContext& log_context) override {
+ log_context_ = log_context;
+ }
+
private:
absl::flat_hash_map int32_t_flag_values_;
absl::flat_hash_map string_flag_values_;
absl::flat_hash_map bool_flag_values_;
+ privacy_sandbox::server_common::log::PSLogContext& log_context_;
};
} // namespace
std::unique_ptr ParameterClient::Create(
- ParameterClient::ClientOptions client_options) {
- return std::make_unique();
+ ParameterClient::ClientOptions client_options,
+ privacy_sandbox::server_common::log::PSLogContext& log_context) {
+ return std::make_unique(log_context);
}
} // namespace kv_server
diff --git a/components/cloud_config/parameter_client_local_test.cc b/components/cloud_config/parameter_client_local_test.cc
index 62393754..f3218579 100644
--- a/components/cloud_config/parameter_client_local_test.cc
+++ b/components/cloud_config/parameter_client_local_test.cc
@@ -104,6 +104,12 @@ TEST(ParameterClientLocal, ExpectedFlagDefaultsArePresent) {
ASSERT_TRUE(statusor.ok());
EXPECT_EQ(5000, *statusor);
}
+ {
+ const auto statusor =
+ client->GetInt32Parameter("kv-server-local-udf-update-timeout-millis");
+ ASSERT_TRUE(statusor.ok());
+ EXPECT_EQ(30000, *statusor);
+ }
{
const auto statusor =
client->GetInt32Parameter("kv-server-local-udf-min-log-level");
diff --git a/components/container/BUILD.bazel b/components/container/BUILD.bazel
new file mode 100644
index 00000000..876ad64c
--- /dev/null
+++ b/components/container/BUILD.bazel
@@ -0,0 +1,42 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
+
+package(default_visibility = ["//visibility:public"])
+
+cc_library(
+ name = "thread_safe_hash_map",
+ hdrs = ["thread_safe_hash_map.h"],
+ deps = [
+ "@com_google_absl//absl/base",
+ "@com_google_absl//absl/container:flat_hash_map",
+ "@com_google_absl//absl/container:node_hash_map",
+ "@com_google_absl//absl/synchronization",
+ ],
+)
+
+cc_test(
+ name = "thread_safe_hash_map_test",
+ size = "small",
+ srcs = [
+ "thread_safe_hash_map_test.cc",
+ ],
+ deps = [
+ ":thread_safe_hash_map",
+ "@com_google_absl//absl/strings",
+ "@com_google_googletest//:gtest",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
diff --git a/components/container/thread_safe_hash_map.h b/components/container/thread_safe_hash_map.h
new file mode 100644
index 00000000..cdeb2bed
--- /dev/null
+++ b/components/container/thread_safe_hash_map.h
@@ -0,0 +1,279 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef COMPONENTS_CONTAINER_THREAD_SAFE_HASH_MAP_H_
+#define COMPONENTS_CONTAINER_THREAD_SAFE_HASH_MAP_H_
+
+#include
+#include
+#include
+
+#include "absl/container/node_hash_map.h"
+#include "absl/synchronization/mutex.h"
+
+namespace kv_server {
+
+// Implements a mutex based "thread-safe" container wrapper around Abseil
+// maps. Exposes key-level locking such that two values associated two separate
+// keys can be modified concurrently.
+//
+// Note that synchronization is done using `absl::Mutex` which is not re-entrant
+// so trying to obtain a `MutableLockedNode` on key while already holding
+// another `MutableLockedNode` for the same key will result in a deadlock. For
+// example, the following code will deadlock:
+//
+// ThreadSafeHashMap map;
+// auto node = map.PutIfAbsent(10, 20);
+// map.Get(10); // This call deadlocks.
+//
+// For convenience, `LockedNode` provides a function `release()` to release a
+// lock on a key early (instead of relying on RAII), but accessing the
+// `LockedNode`'s contents after releasing the lock results in undefined
+// behavior.
+template
+class ThreadSafeHashMap {
+ public:
+ class const_iterator;
+ template
+ class LockedNodePtr;
+ // Locked read-only view for a specific key value associtation in the map.
+ // Must call `is_present()` before calling and dereferencing `key()` and
+ // `value()`.
+ class ConstLockedNodePtr;
+ // Locked view for a specific key value associtation in the map with an
+ // in-place modifable value. Must call `is_present()` before calling and
+ // dereferencing `key()` and `value()`.
+ using MutableLockedNodePtr = LockedNodePtr;
+
+ ThreadSafeHashMap() : nodes_map_mutex_(std::make_unique()) {}
+
+ // Returns a locked read-only view for `key` and it's associated value. If
+ // `key` does not exist in the map, then: `ConstLockedNode.is_present()` is
+ // false.
+ // Prefer this function for reads because it uses a shared
+ // `absl::ReaderMutexLock` for synchronization and concurrent reads do not
+ // block.
+ template
+ ConstLockedNodePtr CGet(Key&& key) const;
+
+ // Returns a locked view for `key` with a modifable value. If `key` does not
+ // exist in the map, then: `MutableLockedNode.is_present()` is false.
+ // Uses an exclusive lock `absl::WriterMutexLock` so prefer `CGet()` above for
+ // reads only.
+ template
+ MutableLockedNodePtr Get(Key&& key) const;
+
+ // Inserts `key` and `value` mapping into the map if `key` does not exist
+ // in the map.
+ // Returns:
+ // - `true` and view to the newly inserted `key`, `value` mapping if `key`
+ // does not exist.
+ // - `false` and view to the existing `key`, `value` mapping if `key` exist.
+ template
+ std::pair PutIfAbsent(Key&& key, Value&& value);
+
+ // Removes `key` and it's associated value from the map if `predicate(value)`
+ // is true.
+ template
+ void RemoveIf(
+ Key&& key, std::function predicate =
+ [](const ValueT&) { return true; });
+
+ const_iterator begin() ABSL_NO_THREAD_SAFETY_ANALYSIS;
+ const_iterator end() ABSL_NO_THREAD_SAFETY_ANALYSIS;
+
+ private:
+ struct ValueNode {
+ template
+ explicit ValueNode(Value&& val)
+ : value(std::forward(val)),
+ mutex(std::make_unique()) {}
+ ValueT value;
+ std::unique_ptr mutex;
+ };
+ using KeyValueNodesMapType =
+ absl::node_hash_map>;
+
+ template
+ NodeT GetNode(Key&& key) const;
+
+ std::unique_ptr nodes_map_mutex_;
+ KeyValueNodesMapType key_value_nodes_map_ ABSL_GUARDED_BY(*nodes_map_mutex_);
+};
+
+template
+template
+NodeT ThreadSafeHashMap::GetNode(Key&& key) const {
+ absl::ReaderMutexLock map_lock(nodes_map_mutex_.get());
+ if (auto iter = key_value_nodes_map_.find(std::forward(key));
+ iter == key_value_nodes_map_.end()) {
+ return NodeT(nullptr, nullptr, nullptr);
+ } else {
+ return NodeT(&iter->first, &iter->second->value,
+ std::make_unique(iter->second->mutex.get()));
+ }
+}
+
+template
+template
+typename ThreadSafeHashMap::ConstLockedNodePtr
+ThreadSafeHashMap::CGet(Key&& key) const {
+ return GetNode(
+ std::forward(key));
+}
+
+template
+template
+typename ThreadSafeHashMap::MutableLockedNodePtr
+ThreadSafeHashMap::Get(Key&& key) const {
+ return GetNode(
+ std::forward(key));
+}
+
+template
+template
+std::pair::MutableLockedNodePtr, bool>
+ThreadSafeHashMap::PutIfAbsent(Key&& key, Value&& value) {
+ absl::WriterMutexLock map_lock(nodes_map_mutex_.get());
+ if (auto iter = key_value_nodes_map_.find(key);
+ iter != key_value_nodes_map_.end()) {
+ return std::make_pair(
+ MutableLockedNodePtr(
+ &iter->first, &iter->second->value,
+ std::make_unique(iter->second->mutex.get())),
+ false);
+ }
+ auto result = key_value_nodes_map_.emplace(
+ std::forward(key),
+ std::make_unique(std::forward(value)));
+ return std::make_pair(
+ MutableLockedNodePtr(&result.first->first, &result.first->second->value,
+ std::make_unique(
+ result.first->second->mutex.get())),
+ true);
+}
+
+template
+template
+void ThreadSafeHashMap::RemoveIf(
+ Key&& key, std::function predicate) {
+ absl::WriterMutexLock map_lock(nodes_map_mutex_.get());
+ auto iter = key_value_nodes_map_.find(std::forward(key));
+ if (iter == key_value_nodes_map_.end()) {
+ return;
+ }
+ {
+ // Wait for any current threads using the value to release their locks.
+ absl::WriterMutexLock value_lock(iter->second->mutex.get());
+ }
+ if (predicate(iter->second->value)) {
+ key_value_nodes_map_.erase(iter);
+ }
+}
+
+template
+typename ThreadSafeHashMap::const_iterator
+ThreadSafeHashMap::begin() {
+ return const_iterator(
+ std::make_unique(nodes_map_mutex_.get()),
+ key_value_nodes_map_.begin());
+}
+
+template
+typename ThreadSafeHashMap::const_iterator
+ThreadSafeHashMap::end() {
+ return const_iterator(nullptr, key_value_nodes_map_.end());
+}
+
+template
+template
+class ThreadSafeHashMap::LockedNodePtr {
+ public:
+ bool is_present() const { return key_ != nullptr; }
+ const KeyT* key() const { return key_; }
+ ValueT* value() const { return value_; }
+ void release() { lock_ = nullptr; }
+
+ private:
+ LockedNodePtr() : LockedNodePtr(nullptr, nullptr, nullptr) {}
+ LockedNodePtr(const KeyT* key, ValueT* value,
+ std::unique_ptr lock)
+ : key_(key), value_(value), lock_(std::move(lock)) {}
+
+ friend class ThreadSafeHashMap;
+
+ const KeyT* key_;
+ ValueT* value_;
+ std::unique_ptr lock_;
+};
+
+template
+class ThreadSafeHashMap::ConstLockedNodePtr
+ : LockedNodePtr {
+ using Base = typename ThreadSafeHashMap::ConstLockedNodePtr::LockedNodePtr;
+
+ public:
+ using Base::is_present;
+ using Base::key;
+ using Base::release;
+ const ValueT* value() const { return Base::value(); }
+
+ private:
+ using Base::Base;
+};
+
+template
+class ThreadSafeHashMap::const_iterator {
+ public:
+ using value_type = ConstLockedNodePtr;
+ using pointer = value_type*;
+ using reference = value_type&;
+
+ reference operator*() ABSL_NO_THREAD_SAFETY_ANALYSIS {
+ current_node_ = std::move(ConstLockedNodePtr(
+ &nodes_map_iter_->first, &nodes_map_iter_->second->value,
+ std::make_unique(
+ nodes_map_iter_->second->mutex.get())));
+ return current_node_;
+ }
+ pointer operator->() { return &operator*(); }
+ const_iterator& operator++() {
+ nodes_map_iter_++;
+ return *this;
+ }
+ friend bool operator==(const const_iterator& a, const const_iterator& b) {
+ return a.nodes_map_iter_ == b.nodes_map_iter_;
+ }
+ friend bool operator!=(const const_iterator& a, const const_iterator& b) {
+ return !(a == b);
+ }
+
+ private:
+ const_iterator(std::unique_ptr nodes_map_lock,
+ typename KeyValueNodesMapType::iterator nodes_map_iter)
+ : nodes_map_lock_(std::move(nodes_map_lock)),
+ nodes_map_iter_(nodes_map_iter) {}
+
+ friend class ThreadSafeHashMap;
+
+ ConstLockedNodePtr current_node_;
+ std::unique_ptr nodes_map_lock_;
+ typename KeyValueNodesMapType::iterator nodes_map_iter_;
+};
+
+} // namespace kv_server
+
+#endif // COMPONENTS_CONTAINER_THREAD_SAFE_HASH_MAP_H_
diff --git a/components/container/thread_safe_hash_map_test.cc b/components/container/thread_safe_hash_map_test.cc
new file mode 100644
index 00000000..f379fbb5
--- /dev/null
+++ b/components/container/thread_safe_hash_map_test.cc
@@ -0,0 +1,193 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "components/container/thread_safe_hash_map.h"
+
+#include
+#include
+#include
+#include
+
+#include "absl/strings/str_cat.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace kv_server {
+namespace {
+
+using testing::UnorderedElementsAre;
+using testing::UnorderedElementsAreArray;
+
+TEST(ThreadSafeHashMapTest, VerifyCGet) {
+ ThreadSafeHashMap map;
+ auto node = map.CGet(10);
+ EXPECT_FALSE(node.is_present());
+ map.PutIfAbsent(10, 20);
+ node = map.CGet(10);
+ ASSERT_TRUE(node.is_present());
+ EXPECT_EQ(*node.key(), 10);
+ EXPECT_EQ(*node.value(), 20);
+}
+
+TEST(ThreadSafeHashMapTest, VerifyGet) {
+ ThreadSafeHashMap map;
+ {
+ auto node = map.Get("key");
+ EXPECT_FALSE(node.is_present());
+ }
+ for (auto i : std::vector{1, 2, 3, 4, 5}) {
+ std::string key = absl::StrCat("key", i);
+ map.PutIfAbsent(key, i);
+ auto node = map.Get(key);
+ ASSERT_TRUE(node.is_present());
+ EXPECT_EQ(*node.key(), key);
+ EXPECT_EQ(*node.value(), i);
+ }
+}
+
+TEST(ThreadSafeHashMapTest, VerifyPutIfAbsent) {
+ ThreadSafeHashMap map;
+ {
+ auto result = map.PutIfAbsent("key", "value");
+ EXPECT_TRUE(result.second);
+ EXPECT_EQ(*result.first.value(), "value");
+ }
+ {
+ auto result = map.PutIfAbsent("key", "not applied");
+ EXPECT_FALSE(result.second);
+ EXPECT_EQ(*result.first.value(), "value");
+ }
+}
+
+TEST(ThreadSafeHashMapTest, VerifyRemoveIf) {
+ ThreadSafeHashMap map;
+ map.PutIfAbsent(10, "value");
+ {
+ auto node = map.CGet(10);
+ ASSERT_TRUE(node.is_present());
+ EXPECT_EQ(*node.key(), 10);
+ EXPECT_EQ(*node.value(), "value");
+ }
+ map.RemoveIf(10,
+ [](const std::string& value) { return value == "wrong value"; });
+ {
+ auto node = map.CGet(10);
+ ASSERT_TRUE(node.is_present());
+ EXPECT_EQ(*node.key(), 10);
+ EXPECT_EQ(*node.value(), "value");
+ }
+ map.RemoveIf(10, [](const std::string& value) { return value == "value"; });
+ {
+ auto node = map.CGet(10);
+ ASSERT_FALSE(node.is_present());
+ }
+}
+
+TEST(ThreadSafeHashMapTest, VerifyIteration) {
+ ThreadSafeHashMap map;
+ for (const auto& value :
+ std::vector{"one", "two", "three", "four", "five"}) {
+ map.PutIfAbsent(value, value);
+ }
+ std::vector values;
+ for (auto& node : map) {
+ values.push_back(*node.value());
+ }
+ EXPECT_THAT(values,
+ UnorderedElementsAre("one", "two", "three", "four", "five"));
+}
+
+TEST(ThreadSafeHashMapTest, VerifyMutableLockedValue) {
+ ThreadSafeHashMap map;
+ map.PutIfAbsent("key", "value");
+ {
+ auto node = map.Get("key");
+ ASSERT_TRUE(node.is_present());
+ EXPECT_EQ(*node.value(), "value");
+ // Let's modify value in place.
+ *node.value() = "modified";
+ }
+ {
+ auto node = map.Get("key");
+ ASSERT_TRUE(node.is_present());
+ EXPECT_EQ(*node.value(), "modified");
+ }
+}
+
+TEST(ThreadSafeHashMapTest, VerifyMoveOnlyValues) {
+ ThreadSafeHashMap> map;
+ std::string_view key = "key";
+ {
+ auto node = map.PutIfAbsent(key, std::make_unique("value"));
+ EXPECT_TRUE(node.second);
+ }
+ {
+ auto node = map.CGet(key);
+ EXPECT_TRUE(node.is_present());
+ EXPECT_THAT(*node.key(), key);
+ EXPECT_THAT(**node.value(), "value");
+ }
+}
+
+TEST(ThreadSafeHashMapTest, VerifyMultiThreadedWritesToSimpleType) {
+ ThreadSafeHashMap map;
+ auto key = 10;
+ map.PutIfAbsent(key, 0);
+ auto incr = [key, &map]() {
+ auto node = map.Get(key);
+ int32_t* value = node.value();
+ *value = *value + 1;
+ };
+ int num_tasks = 100;
+ std::vector> tasks;
+ tasks.reserve(num_tasks);
+ for (int t = 0; t < num_tasks; t++) {
+ tasks.push_back(std::async(std::launch::async, incr));
+ }
+ for (auto& task : tasks) {
+ task.get();
+ }
+ auto node = map.CGet(key);
+ EXPECT_EQ(*node.value(), num_tasks);
+}
+
+TEST(ThreadSafeHashMapTest, VerifyMultiThreadedWritesToComplexType) {
+ ThreadSafeHashMap> map;
+ int32_t key = 10;
+ std::size_t size = 1000;
+ map.PutIfAbsent(key, std::vector(size, 0));
+ auto incr = [key, &map]() {
+ auto node = map.Get(key);
+ auto* values = node.value();
+ for (int i = 0; i < values->size(); i++) {
+ (*values)[i] = (*values)[i] + 1;
+ }
+ };
+ int num_tasks = 100;
+ std::vector> tasks;
+ tasks.reserve(num_tasks);
+ for (int t = 0; t < num_tasks; t++) {
+ tasks.push_back(std::async(std::launch::async, incr));
+ }
+ for (auto& task : tasks) {
+ task.get();
+ }
+ std::vector expected(size, num_tasks);
+ auto node = map.CGet(key);
+ EXPECT_THAT(*node.value(),
+ UnorderedElementsAreArray(expected.begin(), expected.end()));
+}
+
+} // namespace
+} // namespace kv_server
diff --git a/components/data/blob_storage/BUILD.bazel b/components/data/blob_storage/BUILD.bazel
index 3597529b..03d4732f 100644
--- a/components/data/blob_storage/BUILD.bazel
+++ b/components/data/blob_storage/BUILD.bazel
@@ -30,6 +30,7 @@ cc_library(
"@com_google_absl//absl/status",
"@com_google_absl//absl/status:statusor",
"@com_google_absl//absl/strings",
+ "@google_privacysandbox_servers_common//src/logger:request_context_logger",
"@google_privacysandbox_servers_common//src/telemetry:telemetry_provider",
],
)
@@ -101,6 +102,7 @@ cc_library(
"@com_google_absl//absl/status",
"@com_google_absl//absl/status:statusor",
"@com_google_absl//absl/strings",
+ "@google_privacysandbox_servers_common//src/logger:request_context_logger",
],
)
diff --git a/components/data/blob_storage/blob_storage_change_notifier.h b/components/data/blob_storage/blob_storage_change_notifier.h
index 59ad8e31..047bc0dd 100644
--- a/components/data/blob_storage/blob_storage_change_notifier.h
+++ b/components/data/blob_storage/blob_storage_change_notifier.h
@@ -40,7 +40,10 @@ class BlobStorageChangeNotifier {
const std::function& should_stop_callback) = 0;
static absl::StatusOr> Create(
- NotifierMetadata notifier_metadata);
+ NotifierMetadata notifier_metadata,
+ privacy_sandbox::server_common::log::PSLogContext& log_context =
+ const_cast(
+ privacy_sandbox::server_common::log::kNoOpContext));
};
} // namespace kv_server
diff --git a/components/data/blob_storage/blob_storage_change_notifier_gcp.cc b/components/data/blob_storage/blob_storage_change_notifier_gcp.cc
index 8b76c1bb..b77d8c9b 100644
--- a/components/data/blob_storage/blob_storage_change_notifier_gcp.cc
+++ b/components/data/blob_storage/blob_storage_change_notifier_gcp.cc
@@ -27,8 +27,9 @@ namespace {
class GcpBlobStorageChangeNotifier : public BlobStorageChangeNotifier {
public:
explicit GcpBlobStorageChangeNotifier(
- std::unique_ptr notifier)
- : notifier_(std::move(notifier)) {}
+ std::unique_ptr notifier,
+ privacy_sandbox::server_common::log::PSLogContext& log_context)
+ : notifier_(std::move(notifier)), log_context_(log_context) {}
~GcpBlobStorageChangeNotifier() override { sleep_for_.Stop(); }
@@ -46,19 +47,24 @@ class GcpBlobStorageChangeNotifier : public BlobStorageChangeNotifier {
private:
std::unique_ptr notifier_;
SleepFor sleep_for_;
+ privacy_sandbox::server_common::log::PSLogContext& log_context_;
};
} // namespace
absl::StatusOr>
-BlobStorageChangeNotifier::Create(NotifierMetadata notifier_metadata) {
+BlobStorageChangeNotifier::Create(
+ NotifierMetadata notifier_metadata,
+ privacy_sandbox::server_common::log::PSLogContext& log_context) {
absl::StatusOr> notifier =
- ChangeNotifier::Create(std::get(notifier_metadata));
+ ChangeNotifier::Create(std::get(notifier_metadata),
+ log_context);
if (!notifier.ok()) {
return notifier.status();
}
- return std::make_unique(std::move(*notifier));
+ return std::make_unique(std::move(*notifier),
+ log_context);
}
} // namespace kv_server
diff --git a/components/data/blob_storage/blob_storage_change_notifier_local.cc b/components/data/blob_storage/blob_storage_change_notifier_local.cc
index c2394dd6..bdf9298f 100644
--- a/components/data/blob_storage/blob_storage_change_notifier_local.cc
+++ b/components/data/blob_storage/blob_storage_change_notifier_local.cc
@@ -23,8 +23,9 @@ namespace {
class LocalBlobStorageChangeNotifier : public BlobStorageChangeNotifier {
public:
explicit LocalBlobStorageChangeNotifier(
- std::unique_ptr notifier)
- : notifier_(std::move(notifier)) {}
+ std::unique_ptr notifier,
+ privacy_sandbox::server_common::log::PSLogContext& log_context)
+ : notifier_(std::move(notifier)), log_context_(log_context) {}
absl::StatusOr> GetNotifications(
absl::Duration max_wait,
@@ -34,20 +35,24 @@ class LocalBlobStorageChangeNotifier : public BlobStorageChangeNotifier {
private:
std::unique_ptr notifier_;
+ privacy_sandbox::server_common::log::PSLogContext& log_context_;
};
} // namespace
absl::StatusOr>
-BlobStorageChangeNotifier::Create(NotifierMetadata notifier_metadata) {
+BlobStorageChangeNotifier::Create(
+ NotifierMetadata notifier_metadata,
+ privacy_sandbox::server_common::log::PSLogContext& log_context) {
absl::StatusOr> notifier =
- ChangeNotifier::Create(
- std::get(notifier_metadata));
+ ChangeNotifier::Create(std::get(notifier_metadata),
+ log_context);
if (!notifier.ok()) {
return notifier.status();
}
- return std::make_unique(std::move(*notifier));
+ return std::make_unique(std::move(*notifier),
+ log_context);
}
} // namespace kv_server
diff --git a/components/data/blob_storage/blob_storage_change_notifier_s3.cc b/components/data/blob_storage/blob_storage_change_notifier_s3.cc
index 0f24f007..7bea7d84 100644
--- a/components/data/blob_storage/blob_storage_change_notifier_s3.cc
+++ b/components/data/blob_storage/blob_storage_change_notifier_s3.cc
@@ -26,8 +26,10 @@ namespace {
class S3BlobStorageChangeNotifier : public BlobStorageChangeNotifier {
public:
- explicit S3BlobStorageChangeNotifier(std::unique_ptr notifier)
- : change_notifier_(std::move(notifier)) {}
+ explicit S3BlobStorageChangeNotifier(
+ std::unique_ptr notifier,
+ privacy_sandbox::server_common::log::PSLogContext& log_context)
+ : change_notifier_(std::move(notifier)), log_context_(log_context) {}
absl::StatusOr> GetNotifications(
absl::Duration max_wait,
@@ -45,8 +47,9 @@ class S3BlobStorageChangeNotifier : public BlobStorageChangeNotifier {
const absl::StatusOr parsedMessage =
ParseObjectKeyFromJson(message);
if (!parsedMessage.ok()) {
- LOG(ERROR) << "Failed to parse JSON. Error: " << parsedMessage.status()
- << " Message:" << message;
+ PS_LOG(ERROR, log_context_)
+ << "Failed to parse JSON. Error: " << parsedMessage.status()
+ << " Message:" << message;
LogServerErrorMetric(kAwsJsonParseError);
continue;
}
@@ -97,23 +100,27 @@ class S3BlobStorageChangeNotifier : public BlobStorageChangeNotifier {
}
std::unique_ptr change_notifier_;
+ privacy_sandbox::server_common::log::PSLogContext& log_context_;
};
} // namespace
absl::StatusOr>
-BlobStorageChangeNotifier::Create(NotifierMetadata notifier_metadata) {
+BlobStorageChangeNotifier::Create(
+ NotifierMetadata notifier_metadata,
+ privacy_sandbox::server_common::log::PSLogContext& log_context) {
auto cloud_notifier_metadata =
std::get(notifier_metadata);
cloud_notifier_metadata.queue_prefix = "BlobNotifier_";
absl::StatusOr> status_or =
- ChangeNotifier::Create(std::move(cloud_notifier_metadata));
+ ChangeNotifier::Create(std::move(cloud_notifier_metadata), log_context);
if (!status_or.ok()) {
return status_or.status();
}
- return std::make_unique(std::move(*status_or));
+ return std::make_unique(std::move(*status_or),
+ log_context);
}
} // namespace kv_server
diff --git a/components/data/blob_storage/blob_storage_change_notifier_s3_test.cc b/components/data/blob_storage/blob_storage_change_notifier_s3_test.cc
index 0709d015..69bc4e8f 100644
--- a/components/data/blob_storage/blob_storage_change_notifier_s3_test.cc
+++ b/components/data/blob_storage/blob_storage_change_notifier_s3_test.cc
@@ -84,8 +84,10 @@ class BlobStorageChangeNotifierS3Test : public ::testing::Test {
.WillOnce(::testing::Return(outcome));
}
- PlatformInitializer initializer_;
MockMessageService mock_message_service_;
+
+ private:
+ PlatformInitializer initializer_;
};
TEST_F(BlobStorageChangeNotifierS3Test, AwsSqsUnavailable) {
diff --git a/components/data/blob_storage/blob_storage_client.h b/components/data/blob_storage/blob_storage_client.h
index 68cb3f27..47585ef4 100644
--- a/components/data/blob_storage/blob_storage_client.h
+++ b/components/data/blob_storage/blob_storage_client.h
@@ -27,6 +27,7 @@
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
+#include "src/logger/request_context_logger.h"
namespace kv_server {
@@ -95,7 +96,10 @@ class BlobStorageClientFactory {
virtual ~BlobStorageClientFactory() = default;
virtual std::unique_ptr CreateBlobStorageClient(
BlobStorageClient::ClientOptions client_options =
- BlobStorageClient::ClientOptions()) = 0;
+ BlobStorageClient::ClientOptions(),
+ privacy_sandbox::server_common::log::PSLogContext& log_context =
+ const_cast(
+ privacy_sandbox::server_common::log::kNoOpContext)) = 0;
static std::unique_ptr Create();
};
diff --git a/components/data/blob_storage/blob_storage_client_gcp.cc b/components/data/blob_storage/blob_storage_client_gcp.cc
index 5be23c51..1d4ae64c 100644
--- a/components/data/blob_storage/blob_storage_client_gcp.cc
+++ b/components/data/blob_storage/blob_storage_client_gcp.cc
@@ -86,16 +86,24 @@ class GcpBlobInputStreamBuf : public SeekingInputStreambuf {
class GcpBlobReader : public BlobReader {
public:
- GcpBlobReader(google::cloud::storage::Client& client,
- BlobStorageClient::DataLocation location)
+ GcpBlobReader(
+ google::cloud::storage::Client& client,
+ BlobStorageClient::DataLocation location,
+ privacy_sandbox::server_common::log::PSLogContext& log_context =
+ const_cast(
+ privacy_sandbox::server_common::log::kNoOpContext))
: BlobReader(),
+ log_context_(log_context),
streambuf_(client, location,
- GetOptions([this, location](absl::Status status) {
- LOG(ERROR) << "Blob "
- << AppendPrefix(location.key, location.prefix)
- << " failed stream with: " << status;
- is_.setstate(std::ios_base::badbit);
- })),
+ GetOptions(
+ [this, location](absl::Status status) {
+ PS_LOG(ERROR, log_context_)
+ << "Blob "
+ << AppendPrefix(location.key, location.prefix)
+ << " failed stream with: " << status;
+ is_.setstate(std::ios_base::badbit);
+ },
+ log_context)),
is_(&streambuf_) {}
std::istream& Stream() { return is_; }
@@ -103,24 +111,28 @@ class GcpBlobReader : public BlobReader {
private:
static SeekingInputStreambuf::Options GetOptions(
- std::function error_callback) {
+ std::function error_callback,
+ privacy_sandbox::server_common::log::PSLogContext& log_context) {
SeekingInputStreambuf::Options options;
options.error_callback = std::move(error_callback);
+ options.log_context = log_context;
return options;
}
-
+ privacy_sandbox::server_common::log::PSLogContext& log_context_;
GcpBlobInputStreamBuf streambuf_;
std::istream is_;
};
} // namespace
GcpBlobStorageClient::GcpBlobStorageClient(
- std::unique_ptr client)
- : client_(std::move(client)) {}
+ std::unique_ptr client,
+ privacy_sandbox::server_common::log::PSLogContext& log_context)
+ : client_(std::move(client)), log_context_(log_context) {}
std::unique_ptr GcpBlobStorageClient::GetBlobReader(
DataLocation location) {
- return std::make_unique(*client_, std::move(location));
+ return std::make_unique(*client_, std::move(location),
+ log_context_);
}
absl::Status GcpBlobStorageClient::PutBlob(BlobReader& blob_reader,
@@ -157,8 +169,9 @@ absl::StatusOr> GcpBlobStorageClient::ListBlobs(
}
for (auto&& object_metadata : list_object_reader) {
if (!object_metadata) {
- LOG(ERROR) << "Blob error when listing blobs:"
- << std::move(object_metadata).status().message();
+ PS_LOG(ERROR, log_context_)
+ << "Blob error when listing blobs:"
+ << std::move(object_metadata).status().message();
continue;
}
// Manually exclude the starting name as the StartOffset option is
@@ -178,9 +191,10 @@ class GcpBlobStorageClientFactory : public BlobStorageClientFactory {
public:
~GcpBlobStorageClientFactory() = default;
std::unique_ptr CreateBlobStorageClient(
- BlobStorageClient::ClientOptions /*client_options*/) override {
+ BlobStorageClient::ClientOptions /*client_options*/,
+ privacy_sandbox::server_common::log::PSLogContext& log_context) override {
return std::make_unique(
- std::make_unique());
+ std::make_unique(), log_context);
}
};
} // namespace
diff --git a/components/data/blob_storage/blob_storage_client_gcp.h b/components/data/blob_storage/blob_storage_client_gcp.h
index d937a2b5..aaed23b2 100644
--- a/components/data/blob_storage/blob_storage_client_gcp.h
+++ b/components/data/blob_storage/blob_storage_client_gcp.h
@@ -28,7 +28,8 @@ namespace kv_server {
class GcpBlobStorageClient : public BlobStorageClient {
public:
explicit GcpBlobStorageClient(
- std::unique_ptr client);
+ std::unique_ptr client,
+ privacy_sandbox::server_common::log::PSLogContext& log_context);
~GcpBlobStorageClient() = default;
@@ -43,5 +44,6 @@ class GcpBlobStorageClient : public BlobStorageClient {
private:
std::unique_ptr client_;
+ privacy_sandbox::server_common::log::PSLogContext& log_context_;
};
} // namespace kv_server
diff --git a/components/data/blob_storage/blob_storage_client_gcp_test.cc b/components/data/blob_storage/blob_storage_client_gcp_test.cc
index e08c952e..ac4b4900 100644
--- a/components/data/blob_storage/blob_storage_client_gcp_test.cc
+++ b/components/data/blob_storage/blob_storage_client_gcp_test.cc
@@ -50,6 +50,7 @@ using testing::Property;
class GcpBlobStorageClientTest : public ::testing::Test {
protected:
PlatformInitializer initializer_;
+ privacy_sandbox::server_common::log::NoOpContext no_op_context_;
void SetUp() override {
privacy_sandbox::server_common::telemetry::TelemetryConfig config_proto;
config_proto.set_mode(
@@ -73,7 +74,8 @@ TEST_F(GcpBlobStorageClientTest, DeleteBlobSucceeds) {
google::cloud::make_status_or(gcs::internal::EmptyResponse{})));
std::unique_ptr client =
- std::make_unique(std::move(mock_client));
+ std::make_unique(std::move(mock_client),
+ no_op_context_);
BlobStorageClient::DataLocation location;
location.bucket = "test_bucket";
location.key = "test_object";
@@ -93,7 +95,8 @@ TEST_F(GcpBlobStorageClientTest, DeleteBlobFails) {
google::cloud::StatusCode::kPermissionDenied, "uh-oh")));
std::unique_ptr client =
- std::make_unique(std::move(mock_client));
+ std::make_unique(std::move(mock_client),
+ no_op_context_);
BlobStorageClient::DataLocation location;
location.bucket = "test_bucket";
location.key = "test_object";
@@ -118,7 +121,8 @@ TEST_F(GcpBlobStorageClientTest, ListBlobSucceeds) {
.WillOnce(testing::Return(response));
std::unique_ptr client =
- std::make_unique(std::move(mock_client));
+ std::make_unique(std::move(mock_client),
+ no_op_context_);
BlobStorageClient::DataLocation location;
location.bucket = "test_bucket";
location.key = "test_object";
@@ -151,7 +155,8 @@ TEST_F(GcpBlobStorageClientTest, ListBlobWithNonInclusiveStartAfter) {
.WillOnce(testing::Return(response));
std::unique_ptr client =
- std::make_unique(std::move(mock_client));
+ std::make_unique(std::move(mock_client),
+ no_op_context_);
BlobStorageClient::DataLocation location;
location.bucket = "test_bucket";
location.key = "test_object";
@@ -180,7 +185,8 @@ TEST_F(GcpBlobStorageClientTest, ListBlobWithNoNewObject) {
.WillOnce(testing::Return(response));
std::unique_ptr client =
- std::make_unique(std::move(mock_client));
+ std::make_unique(std::move(mock_client),
+ no_op_context_);
BlobStorageClient::DataLocation location;
location.bucket = "test_bucket";
location.key = "test_object";
@@ -208,7 +214,8 @@ TEST_F(GcpBlobStorageClientTest, DeleteBlobWithPrefixSucceeds) {
google::cloud::make_status_or(gcs::internal::EmptyResponse{})));
std::unique_ptr client =
- std::make_unique(std::move(mock_client));
+ std::make_unique(std::move(mock_client),
+ no_op_context_);
BlobStorageClient::DataLocation location{
.bucket = "test_bucket",
.prefix = "test_prefix",
diff --git a/components/data/blob_storage/blob_storage_client_local.cc b/components/data/blob_storage/blob_storage_client_local.cc
index 8f823a38..e4d7e190 100644
--- a/components/data/blob_storage/blob_storage_client_local.cc
+++ b/components/data/blob_storage/blob_storage_client_local.cc
@@ -50,7 +50,7 @@ std::unique_ptr FileBlobStorageClient::GetBlobReader(
std::make_unique(GetFullPath(location));
if (!reader->Stream()) {
- LOG(ERROR) << absl::ErrnoToStatus(
+ PS_LOG(ERROR, log_context_) << absl::ErrnoToStatus(
errno,
absl::StrCat("Unable to open file: ", GetFullPath(location).string()));
return nullptr;
@@ -135,8 +135,9 @@ class LocalBlobStorageClientFactory : public BlobStorageClientFactory {
public:
~LocalBlobStorageClientFactory() = default;
std::unique_ptr CreateBlobStorageClient(
- BlobStorageClient::ClientOptions /*client_options*/) override {
- return std::make_unique();
+ BlobStorageClient::ClientOptions /*client_options*/,
+ privacy_sandbox::server_common::log::PSLogContext& log_context) override {
+ return std::make_unique(log_context);
}
};
} // namespace
diff --git a/components/data/blob_storage/blob_storage_client_local.h b/components/data/blob_storage/blob_storage_client_local.h
index 7b55a9ad..a53fe6bf 100644
--- a/components/data/blob_storage/blob_storage_client_local.h
+++ b/components/data/blob_storage/blob_storage_client_local.h
@@ -26,7 +26,9 @@
namespace kv_server {
class FileBlobStorageClient : public BlobStorageClient {
public:
- FileBlobStorageClient() = default;
+ FileBlobStorageClient(
+ privacy_sandbox::server_common::log::PSLogContext& log_context)
+ : log_context_(log_context) {}
~FileBlobStorageClient() = default;
@@ -41,5 +43,6 @@ class FileBlobStorageClient : public BlobStorageClient {
private:
std::filesystem::path GetFullPath(const DataLocation& location);
+ privacy_sandbox::server_common::log::PSLogContext& log_context_;
};
} // namespace kv_server
diff --git a/components/data/blob_storage/blob_storage_client_local_test.cc b/components/data/blob_storage/blob_storage_client_local_test.cc
index e8b714d2..572090a5 100644
--- a/components/data/blob_storage/blob_storage_client_local_test.cc
+++ b/components/data/blob_storage/blob_storage_client_local_test.cc
@@ -33,6 +33,11 @@
namespace kv_server {
namespace {
+class LocalBlobStorageClientTest : public ::testing::Test {
+ protected:
+ privacy_sandbox::server_common::log::NoOpContext no_op_context_;
+};
+
void CreateSubDir(std::string_view subdir_name) {
std::filesystem::create_directory(
std::filesystem::path(::testing::TempDir()) / subdir_name);
@@ -45,9 +50,9 @@ void CreateFileInTmpDir(const std::string& filename) {
file << "arbitrary file contents";
}
-TEST(LocalBlobStorageClientTest, ListNotFoundDirectory) {
+TEST_F(LocalBlobStorageClientTest, ListNotFoundDirectory) {
std::unique_ptr client =
- std::make_unique();
+ std::make_unique(no_op_context_);
BlobStorageClient::DataLocation location;
location.bucket = "this is not a valid directory path";
@@ -57,9 +62,9 @@ TEST(LocalBlobStorageClientTest, ListNotFoundDirectory) {
client->ListBlobs(location, options).status().code());
}
-TEST(LocalBlobStorageClientTest, ListEmptyDirectory) {
+TEST_F(LocalBlobStorageClientTest, ListEmptyDirectory) {
std::unique_ptr client =
- std::make_unique();
+ std::make_unique(no_op_context_);
BlobStorageClient::DataLocation location;
// Directory contains no files by default.
@@ -70,9 +75,9 @@ TEST(LocalBlobStorageClientTest, ListEmptyDirectory) {
EXPECT_TRUE(status_or.value().empty());
}
-TEST(LocalBlobStorageClientTest, ListDirectoryWithFile) {
+TEST_F(LocalBlobStorageClientTest, ListDirectoryWithFile) {
std::unique_ptr client =
- std::make_unique();
+ std::make_unique(no_op_context_);
CreateFileInTmpDir("a");
BlobStorageClient::DataLocation location;
@@ -84,9 +89,9 @@ TEST(LocalBlobStorageClientTest, ListDirectoryWithFile) {
EXPECT_EQ(*status_or, std::vector{"a"});
}
-TEST(LocalBlobStorageClientTest, DeleteNotFoundBlob) {
+TEST_F(LocalBlobStorageClientTest, DeleteNotFoundBlob) {
std::unique_ptr client =
- std::make_unique();
+ std::make_unique(no_op_context_);
BlobStorageClient::DataLocation location;
location.bucket = "this is not a valid directory path";
@@ -95,9 +100,9 @@ TEST(LocalBlobStorageClientTest, DeleteNotFoundBlob) {
EXPECT_EQ(absl::StatusCode::kInternal, client->DeleteBlob(location).code());
}
-TEST(LocalBlobStorageClientTest, DeleteBlob) {
+TEST_F(LocalBlobStorageClientTest, DeleteBlob) {
std::unique_ptr client =
- std::make_unique();
+ std::make_unique(no_op_context_);
BlobStorageClient::DataLocation location;
location.bucket = ::testing::TempDir();
@@ -107,9 +112,9 @@ TEST(LocalBlobStorageClientTest, DeleteBlob) {
EXPECT_EQ(absl::StatusCode::kOk, client->DeleteBlob(location).code());
}
-TEST(LocalBlobStorageClientTest, PutBlob) {
+TEST_F(LocalBlobStorageClientTest, PutBlob) {
std::unique_ptr client =
- std::make_unique();
+ std::make_unique(no_op_context_);
BlobStorageClient::DataLocation from;
from.bucket = ::testing::TempDir();
@@ -126,9 +131,9 @@ TEST(LocalBlobStorageClientTest, PutBlob) {
client->PutBlob(*from_blob_reader, to).code());
}
-TEST(LocalBlobStorageClientTest, DeleteBlobWithPrefix) {
+TEST_F(LocalBlobStorageClientTest, DeleteBlobWithPrefix) {
std::unique_ptr client =
- std::make_unique