generated from onedr0p/cluster-template
-
Notifications
You must be signed in to change notification settings - Fork 0
/
config.sample.yaml
207 lines (184 loc) · 9.82 KB
/
config.sample.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
---
#
# 1. (Required) Cluster details - Cluster represents the Kubernetes cluster layer and any additional customizations
#
# (Required) Timezone is your IANA formatted timezone (e.g. America/New_York)
bootstrap_timezone: ""
# (Required) Distribution can either be k3s or talos
bootstrap_distribution: k3s
# (Required: Talos) Talos Specific Options
bootstrap_talos:
# (Required: Talos) If you need any additional System Extensions, and/or add kernel arguments generate a schematic ID.
# Go to https://factory.talos.dev/ and choose the System Extensions, and/or add kernel arguments.
schematic_id: ""
# (Optional: Talos) Add vlan tag to network master device
# See: https://www.talos.dev/latest/advanced/advanced-networking/#vlans
vlan: ""
# (Optional: Talos) Secureboot and TPM-based disk encryption
secureboot:
# (Optional) Enable secureboot on UEFI systems. Not supported on x86 platforms in BIOS mode.
# See: https://www.talos.dev/latest/talos-guides/install/bare-metal-platforms/secureboot
enabled: false
# (Optional) Enable TPM-based disk encryption. Requires TPM 2.0
# See: https://www.talos.dev/v1.6/talos-guides/install/bare-metal-platforms/secureboot/#disk-encryption-with-tpm
encrypt_disk_with_tpm: false
# (Optional) Add includes for user provided patches to generated talconfig.yaml.
# See: https://github.com/budimanjojo/talhelper/blob/179ba9ed42f70069c7842109bea24f769f7af6eb/example/extraKernelArgs-patch.yaml
# Patches are applied in this order. (global overrides cp/worker which overrides node-specific).
# Create these files to allow talos:bootstrap-genconfig to complete (empty files are ok).
# kubernetes/bootstrap/talos/patches/node_<name>.yaml # Patches for individual nodes
# kubernetes/bootstrap/talos/patches/controlPlane.yaml # Patches for controlplane nodes
# kubernetes/bootstrap/talos/patches/worker.yaml # Patches for worker nodes
# kubernetes/bootstrap/talos/patches/global.yaml # Patches for ALL nodes
user_patches: false
# (Required) The CIDR your nodes are on (e.g. 192.168.1.0/24)
bootstrap_node_network: ""
# (Optional) The default gateway for the nodes
# Default is .1 derrived from bootstrap_node_network: 'x.x.x.1'
bootstrap_node_default_gateway: ""
# (Required) Use only 1, 3 or more ODD number of controller nodes, recommended is 3
# Worker nodes are optional
bootstrap_node_inventory: []
# - name: "" # Name of the node (must match [a-z0-9-\.]+)
# address: "" # IP address of the node
# controller: true # (Required) Set to true if this is a controller node
# ssh_user: "" # (Required: k3s) SSH username of the node
# talos_disk: "" # (Required: Talos) Device path or serial number of the disk for this node
# ...
# (Optional) The DNS server to use for the cluster, this can be an existing
# local DNS server or a public one.
# Default is ["1.1.1.1", "1.0.0.1"]
# If using a local DNS server make sure it meets the following requirements:
# 1. your nodes can reach it
# 2. it is configured to forward requests to a public DNS server
# 3. you are not force redirecting DNS requests to it - this will break cert generation over DNS01
# If using multiple DNS servers make sure they are setup the same way, there is no
# guarantee that the first DNS server will always be used for every lookup.
bootstrap_dns_servers: []
# (Optional) The DNS search domain to use for the nodes.
# Default is "."
# Use the default or leave empty to avoid possible DNS issues inside the cluster.
bootstrap_search_domain: ""
# (Required) The pod CIDR for the cluster, this must NOT overlap with any
# existing networks and is usually a /16 (64K IPs).
# If you want to use IPv6 check the advanced flags below
bootstrap_pod_network: "10.69.0.0/16"
# (Required) The service CIDR for the cluster, this must NOT overlap with any
# existing networks and is usually a /16 (64K IPs).
# If you want to use IPv6 check the advanced flags below
bootstrap_service_network: "10.96.0.0/16"
# (Required) The IP address of the Kube API, choose an available IP in
# your nodes host network that is NOT being used. This is announced over L2.
# For k3s kube-vip is used, built-in functionality is used with Talos
bootstrap_controllers_vip: ""
# (Optional) Add additional SANs to the Kube API cert, this is useful
# if you want to call the Kube API by hostname rather than IP
bootstrap_tls_sans: []
# (Required) Age Public Key (e.g. age1...)
# 1. Generate a new key with the following command:
# > task sops:age-keygen
# 2. Copy the public key and paste it below
bootstrap_sops_age_pubkey: ""
# (Optional) Use cilium BGP control plane when L2 announcements won't traverse VLAN network segments.
# Needs a BGP capable router setup with the node IPs as peers.
# See: https://docs.cilium.io/en/latest/network/bgp-control-plane/
bootstrap_bgp:
enabled: false
# (Optional) If using multiple BGP peers add them here.
# Default is .1 derrived from host_network: ['x.x.x.1']
peers: []
# (Required) Set the BGP Autonomous System Number for the router(s) and nodes.
# If these match, iBGP will be used. If not, eBGP will be used.
peer_asn: "" # Router(s) AS
local_asn: "" # Node(s) AS
# (Required) The advertised CIDR for the cluster, this must NOT overlap with any
# existing networks and is usually a /16 (64K IPs).
# If you want to use IPv6 check the advanced flags below
advertised_network: ""
#
# 2. (Required) Flux details - Flux is used to manage the cluster configuration.
#
# (Required) GitHub repository URL (for private repos use the ssh:// URL)
bootstrap_github_address: ""
# (Required) GitHub repository branch
bootstrap_github_branch: "main"
# (Required) Token for GitHub push-based sync
# 1. Generate a new token with the following command:
# > openssl rand -hex 16
# 2. Copy the token and paste it below
bootstrap_github_webhook_token: ""
# (Optional) Private key for Flux to access the GitHub repository
# 1. Generate a new key with the following command:
# > ssh-keygen -t ecdsa -b 521 -C "github-deploy-key" -f github-deploy.key -q -P ""
# 2. Make sure to paste public key from "github-deploy.key.pub" into
# the deploy keys section of your repository settings.
# 3. Uncomment and paste the private key below
# 4. Optionally set your repository on GitHub to private
# bootstrap_github_private_key: |
# -----BEGIN OPENSSH PRIVATE KEY-----
# ...
# -----END OPENSSH PRIVATE KEY-----
#
# 3. (Optional) Cloudflare details - Cloudflare is used for DNS, TLS certificates and tunneling.
#
bootstrap_cloudflare:
# (Required) Disable to use a different DNS provider
enabled: false
# (Required) Cloudflare Domain
domain: ""
# (Required) Cloudflare API Token (NOT API Key)
# 1. Head over to Cloudflare and create a API Token by going to
# https://dash.cloudflare.com/profile/api-tokens
# 2. Under the `API Tokens` section click the blue `Create Token` button.
# 3. Click the blue `Use template` button for the `Edit zone DNS` template.
# 4. Name your token something like `home-kubernetes`
# 5. Under `Permissions`, click `+ Add More` and add each permission below:
# `Zone - DNS - Edit`
# `Account - Cloudflare Tunnel - Read`
# 6. Limit the permissions to a specific account and zone resources.
# 7. Click the blue `Continue to Summary` button and then the blue `Create Token` button.
# 8. Copy the token and paste it below.
token: ""
# (Required) Optionals for Cloudflare Acme
acme:
# (Required) Any email you want to be associated with the ACME account (used for TLS certs via letsencrypt.org)
email: ""
# (Required) Use the ACME production server when requesting the wildcard certificate.
# By default the ACME staging server is used. This is to prevent being rate-limited.
# Update this option to `true` when you have verified the staging certificate
# works and then re-run `task configure` and push your changes to Github.
production: false
# (Required) Provide LAN access to the cluster ingresses for internal ingress classes
# The Load balancer IP for internal ingress, choose an available IP
# in your nodes host network that is NOT being used. This is announced over L2.
ingress_vip: ""
# (Required) Gateway is used for providing DNS to your cluster on LAN
# The Load balancer IP for k8s_gateway, choose an available IP
# in your nodes host network that is NOT being used. This is announced over L2.
gateway_vip: ""
# (Required) Options for Cloudflare Tunnel
# 1. Authenticate cloudflared to your domain
# > cloudflared tunnel login
# 2. Create the tunnel
# > cloudflared tunnel create k8s
# 3. Copy the AccountTag, TunnelID, and TunnelSecret from the tunnel configuration file and paste them below
tunnel:
# (Required) Cloudflare Account ID (cat ~/.cloudflared/*.json | jq -r .AccountTag)
account_id: ""
# (Required) Cloudflared Tunnel ID (cat ~/.cloudflared/*.json | jq -r .TunnelID)
id: ""
# (Required) Cloudflared Tunnel Secret (cat ~/.cloudflared/*.json | jq -r .TunnelSecret)
secret: ""
# (Required) Provide WAN access to the cluster ingresses for external ingress classes
# The Load balancer IP for external ingress, choose an available IP
# in your nodes host network that is NOT being used. This is announced over L2.
ingress_vip: ""
# (Optional) Feature gates are used to enable experimental features
# bootstrap_feature_gates:
# # Enable Dual Stack IPv4 first
# # IMPORTANT: I am looking for people to help maintain IPv6 support since I cannot test it.
# # Ref: https://github.com/onedr0p/cluster-template/issues/1148
# # Keep in mind that Cilium does not currently support IPv6 L2 announcements.
# # Make sure you set cluster.pod_cidr and cluster.service_cidr
# # to a valid dual stack CIDRs, e.g. "10.42.0.0/16,fd00:10:244::/64"
# dual_stack_ipv4_first: false