forked from livekit/livekit-helm
-
Notifications
You must be signed in to change notification settings - Fork 0
/
server-sample.yaml
96 lines (89 loc) · 3.59 KB
/
server-sample.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
# LiveKit Helm chart will set up a Deployment, Service, HPA, and Ingress as either
# a single or multi-node LiveKit deployment.
# After installing this chart, you would still need to
# * Open ports on the firewall to the hosts (see https://docs.livekit.io/deploy/ports-firewall)
# * Update DNS of hostnames to the ingress/service that were created
replicaCount: 1
# Suggested value for gracefully terminate the pod: 5 hours
terminationGracePeriodSeconds: 18000
livekit:
# port: 7880
# Uncomment to enable prometheus metrics
# prometheus_port: 6789
log_level: info
rtc:
use_external_ip: true
# default ports used
port_range_start: 50000
port_range_end: 60000
tcp_port: 7881
redis:
address: <redis_host:port>
# db: 0
# username:
# password:
# use_tls: true
# one or more API key/secret pairs
# see https://docs.livekit.io/guides/getting-started/#generate-api-key-and-secret
keys:
myapikey: "myapisecret"
turn:
enabled: true
# must match domain of your TLS cert
domain: turn.myhost.com
# TURN/TLS port over TCP. It must be 443 if TURN load balancer is disabled
tls_port: 3478
# TURN/UDP port, must be exposed on the firewall
udp_port: 3478
# uncomment if you will manage TLS termination for TURN, secretName is not used
# when external_tls is set
# external_tls: true
# Kubernetes Secret containing TLS cert for <turn.myhost.com>
# See https://docs.livekit.io/deploy/kubernetes/#importing-ssl-certificates
secretName: <tlssecret>
# set the Kubernetes serviceType for the TURN service. By default it sets it to "LoadBalancer"
# See kubernetes serviceTypes on official documentation: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
serviceType: "LoadBalancer"
loadBalancer:
# valid values: disable, alb, aws, gke, gke-managed-cert, gke-native-vpc, do
# on AWS, we recommend using alb load balancer, which supports TLS termination
# * in order to use alb, aws-ingress-controller must be installed
# https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html
# * for gke-managed-cert type follow https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs
# and set staticIpName to your reserved static IP, and certificateName to be
# name of the managed cert
# * for do uncomment clusterIssuer with your cert manager issuer
type: disable
# staticIpName: <nameofIpAddressCreated>
# certificateName: <nameOfCert>
# clusterIssuer: letsencrypt-prod
tls:
# - hosts:
# - livekit.myhost.com
# with alb, certificates needs to reside in ACM for self-discovery
# with do, use cert-manager and create certificate for turn. Load balancer is autoamtic
# with gke, specify one or more secrets to use for the certificate
# see: https://cloud.google.com/kubernetes-engine/docs/how-to/ingress-multi-ssl#specifying_certificates_for_your_ingress
# secretName: <mysecret>
# autoscaling requires resources to be defined
autoscaling:
# set to true to enable autoscaling. when set, ignores replicaCount
enabled: false
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 60
# if LiveKit should run only on specific nodes
# this can be used to isolate designated nodes
nodeSelector:
{}
# node.kubernetes.io/instance-type: c5.2xlarge
resources:
{}
# Due to port restrictions, you can run only one instance of LiveKit per physical
# node. Because of that, we recommend giving it plenty of resources to work with
# limits:
# cpu: 6000m
# memory: 2048Mi
# requests:
# cpu: 4000m
# memory: 1024Mi