forked from GoogleCloudPlatform/cluster-toolkit
-
Notifications
You must be signed in to change notification settings - Fork 0
/
storage-gke.yaml
169 lines (145 loc) · 4.76 KB
/
storage-gke.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
blueprint_name: storage-gke
vars:
project_id: ## Set GCP Project ID Here ##
deployment_name: storage-gke-01
region: us-central1
zone: us-central1-c
# Cidr block containing the IP of the machine calling terraform.
# The following line must be updated for this example to work.
authorized_cidr: <your-ip-address>/32
deployment_groups:
- group: primary
modules:
- id: network1
source: modules/network/vpc
settings:
subnetwork_name: gke-subnet
secondary_ranges:
gke-subnet:
- range_name: pods
ip_cidr_range: 10.4.0.0/14
- range_name: services
ip_cidr_range: 10.0.32.0/20
- id: gke_cluster
source: community/modules/scheduler/gke-cluster
use: [network1]
settings:
enable_filestore_csi: true
enable_gcsfuse_csi: true
configure_workload_identity_sa: true # needed when using GCS
enable_private_endpoint: false # Allows for access from authorized public IPs
master_authorized_networks:
- display_name: deployment-machine
cidr_block: $(vars.authorized_cidr)
outputs: [instructions]
- id: debug_pool
source: community/modules/compute/gke-node-pool
use: [gke_cluster]
settings:
name: debug
zones: [$(vars.zone)]
machine_type: n2d-standard-2
### Google Cloud Storage ###
- id: data-bucket
source: community/modules/file-system/cloud-storage-bucket
settings:
local_mount: /data
random_suffix: true
force_destroy: true
- id: data-bucket-pv
source: community/modules/file-system/gke-persistent-volume
use: [gke_cluster, data-bucket]
settings: {capacity_gb: 5000}
### Filestore ###
- id: filestore
source: modules/file-system/filestore
use: [network1]
settings: {local_mount: /shared}
- id: shared-filestore-pv
source: community/modules/file-system/gke-persistent-volume
use: [gke_cluster, filestore]
### Shared Storage Job ###
- id: shared-fs-job
source: community/modules/compute/gke-job-template
use:
- gke_cluster
- debug_pool
- shared-filestore-pv
- data-bucket-pv
settings:
image: bash
command:
- bash
- -c
- |
echo "Set up job folders"
shopt -s extglob; JOB=${HOSTNAME%%-+([[:digit:]])}
mkdir /data/${JOB}/ -p; mkdir /shared/${JOB}/ -p;
echo "Writing seed file to GCS"
dd if=/dev/urandom of=/data/${JOB}/${JOB_COMPLETION_INDEX}.dat bs=1K count=1000
echo "Copy seed data from GCS to Filestore"
cp /data/${JOB}/${JOB_COMPLETION_INDEX}.dat /shared/${JOB}/
echo "Hash file from Filestore and save to GCS"
md5sum /shared/${JOB}/${JOB_COMPLETION_INDEX}.dat > /data/${JOB}/${JOB_COMPLETION_INDEX}.md5
node_count: 5
outputs: [instructions]
### Ephemeral Storage ###
- id: local-ssd-pool
source: community/modules/compute/gke-node-pool
use: [gke_cluster]
settings:
name: local-ssd
machine_type: n2d-standard-2
local_ssd_count_ephemeral_storage: 1
- id: ephemeral-storage-job
source: community/modules/compute/gke-job-template
use: [local-ssd-pool]
settings:
name: ephemeral-storage-job
ephemeral_volumes: # below shows all options, usually only 1 is needed
- type: memory # backed by node memory
mount_path: /scratch-mem
size_gb: 5
- type: local-ssd # node pool must specify local_ssd_count_ephemeral_storage
mount_path: /scratch-local-ssd
size_gb: 280 # System holds back some of 375 GiB
- type: pd-ssd
mount_path: /pd-ssd
size_gb: 100
- type: pd-balanced
mount_path: /pd-balanced
size_gb: 100
image: ljishen/fio
command: # https://cloud.google.com/compute/docs/disks/benchmarking-pd-performance
- fio
- --name=write_throughput
- --directory=/scratch-local-ssd
- --numjobs=16
- --size=5G
- --time_based
- --runtime=30s
- --ramp_time=2s
- --ioengine=libaio
- --direct=1
- --verify=0
- --bs=1M
- --iodepth=64
- --rw=write
- --group_reporting=1
- --iodepth_batch_submit=64
- --iodepth_batch_complete_max=64
outputs: [instructions]