forked from bcgov/how-to-workshops
-
Notifications
You must be signed in to change notification settings - Fork 0
/
PostgresCluster.yaml
142 lines (142 loc) · 4.99 KB
/
PostgresCluster.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
# this is a highly minimalist installation of CrunchyDB, capable of fitting inside a very small namespace.
# testing shows it works comfortably for a small dataset under light load.
# it is very likely that you will need to tune up some of these values to suit the needs of your application,
# but this is a good starting place to get your DB up and functioning - from here, you increase each resource as you require.
apiVersion: postgres-operator.crunchydata.com/v1beta1
kind: PostgresCluster
metadata:
name: hippo-ha
spec:
# this block is useful only if you also have monitoring set up for your cluster.
# this example installation is intended to be as small as possible, so it has been removed.
# however, this block remains as an example if you would like to add monitoring to your cluster.
#
# monitoring:
# pgmonitor:
# # this stuff is for the "exporter" container in the "hippo-ha-pgha1" set of pods
# exporter:
# resources:
# requests:
# cpu: 50m
# memory: 32Mi
# limits:
# cpu: 100m
# memory: 64Mi
postgresVersion: 13
instances:
- name: pgha1
replicas: 3
# this is how you create a PDB - don't make a separate one yourself!
minAvailable: 1
# these resources are for the "database" container in the "hippo-ha-pgha1" set of pods
resources:
requests:
cpu: 50m
memory: 128Mi
limits:
cpu: 250m
memory: 512Mi
sidecars:
# this stuff is for the "replication-cert-copy" container in the "hippo-ha-pgha1" set of pods
replicaCertCopy:
resources:
requests:
cpu: 50m
memory: 16Mi
limits:
cpu: 100m
memory: 32Mi
dataVolumeClaimSpec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 256Mi
storageClassName: netapp-block-standard
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
postgres-operator.crunchydata.com/cluster: hippo-ha
postgres-operator.crunchydata.com/instance-set: pgha1
backups:
pgbackrest:
global:
repo1-retention-full: "2"
repos:
- name: repo1
schedules:
# Full backup every day at 8:00am UTC
full: "0 8 * * *"
# Incremental backup every 4 hours, except at 8am UTC (when the full backup is running)
incremental: "0 0,4,12,16,20 * * *"
volume:
volumeClaimSpec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 512Mi
storageClassName: netapp-file-backup
# this stuff is for the "pgbackrest" container (the only non-init container) in the "hippo-ha-repo-host" pod
repoHost:
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 100m
memory: 128Mi
sidecars:
# this stuff is for the "pgbackrest" container in the "hippo-ha-pgha1" set of pods
pgbackrest:
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 100m
memory: 128Mi
# allows the triggering of manual backups
manual:
repoName: repo1
options:
- --type=full
patroni:
dynamicConfiguration:
postgresql:
# these will probably allow your database to start up, but you'll definitely want to tune them up a bit for anything but the most minimal DBs.
parameters:
shared_buffers: '16MB' # default is 128MB; a good tuned default for shared_buffers is 25% of the memory allocated to the pod
wal_buffers: '-1' # automatically set as 1/32 of shared_buffers or 64kB, whichever is larger
min_wal_size: '32MB'
max_wal_size: '64MB' # default is 1GB
proxy:
pgBouncer:
config:
global:
client_tls_sslmode: disable
replicas: 2
# these resources are for the "pgbouncer" container in the "hippo-ha-pgbouncer" set of pods
# there is a sidecar in these pods which are not mentioned here, but the requests/limits are teeny weeny by default so no worries there.
resources:
requests:
cpu: 50m
memory: 32Mi
limits:
cpu: 100m
memory: 64Mi
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
postgres-operator.crunchydata.com/cluster: hippo-ha
postgres-operator.crunchydata.com/role: pgbouncer