From b8dd9e77d0e6526d11dc2f63a07623f5e90ff580 Mon Sep 17 00:00:00 2001 From: root Date: Sat, 19 Aug 2023 11:59:00 +0000 Subject: [PATCH] update --- README.md | 8 +- calico/calico-ipv6.yaml | 51 +- calico/calico-typha.yaml | 35 + calico/calico.yaml | 53 +- doc/Enable-implement-IPv4-IPv6.md | 18 +- doc/kube-proxy_permissions.md | 2 +- doc/kubeadm-install-IPV6-IPV4.md | 6 +- ...tall-IPv6-IPv4-Three-Masters-Two-Slaves.md | 14 +- ...tall-IPv6-IPv4-Three-Masters-Two-Slaves.md | 14 +- ...tall-IPv6-IPv4-Three-Masters-Two-Slaves.md | 14 +- ...tall-IPv6-IPv4-Three-Masters-Two-Slaves.md | 14 +- ...v1.24.0-CentOS-binary-install-IPv6-IPv4.md | 14 +- ...tall-IPv6-IPv4-Three-Masters-Two-Slaves.md | 14 +- ...v1.24.1-CentOS-binary-install-IPv6-IPv4.md | 14 +- ...tall-IPv6-IPv4-Three-Masters-Two-Slaves.md | 14 +- ...v1.24.2-CentOS-binary-install-IPv6-IPv4.md | 12 +- ...v1.24.3-CentOS-binary-install-IPv6-IPv4.md | 12 +- ...tall-IPv6-IPv4-Three-Masters-Two-Slaves.md | 12 +- ...v1.25.0-CentOS-binary-install-IPv6-IPv4.md | 12 +- ...6-IPv4-Three-Masters-Two-Slaves-Offline.md | 12 +- ...6-IPv4-Three-Masters-Two-Slaves-Offline.md | 5481 +++++++++++++++++ images/1.jpg | Bin 0 -> 115374 bytes download.sh => shell/download.sh | 30 +- yaml/calico.yaml | 2 +- 24 files changed, 5722 insertions(+), 136 deletions(-) create mode 100644 doc/v1.28.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md create mode 100644 images/1.jpg rename download.sh => shell/download.sh (79%) diff --git a/README.md b/README.md index 16a3491..4b2b51d 100644 --- a/README.md +++ b/README.md @@ -33,6 +33,7 @@ GitHub访问不通畅可以访问国内GitEE https://gitee.com/cby-inc/Kubernete - 1.25.x - 1.26.x - 1.27.x +- 1.28.x 大版本之间是通用的,比如使用 1.26.0 的文档可以安装 1.26.x 各种版本,只是安装过程中的下载新的包即可。 @@ -48,9 +49,12 @@ https://github.com/cby-chen/kube_ansible # 五、文档 ### 最新版本文档 -- [v1.27.3-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md](./doc/v1.27.3-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md) +- [v1.28.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md](./doc/v1.28.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md) ## 安装文档 +### 1.28.x版本 +- [v1.28.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md](./doc/v1.28.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md) + ### 1.27.x版本 - [v1.27.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md](./doc/v1.27.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md) - [v1.27.3-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md](./doc/v1.27.3-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md) @@ -192,6 +196,8 @@ https://github.com/cby-chen/Kubernetes/ - 此处的费用不应该节省,可以直接使用16核32G或者64G的机器,之后集群扩容就无需扩容master节点的资源,减少风险。 - 其中master节点和etcd节点的系统分区100G即可。 +### 加群 +![avatar](./images/1.jpg) - 建议在 [Kubernetes](https://github.com/cby-chen/Kubernetes) 查看文档,后续会陆续更新文档 - 小陈网站: diff --git a/calico/calico-ipv6.yaml b/calico/calico-ipv6.yaml index 629a480..25d7f54 100644 --- a/calico/calico-ipv6.yaml +++ b/calico/calico-ipv6.yaml @@ -1052,6 +1052,7 @@ spec: with BPF programs regardless of what is the per-interfaces or global setting. Possible values are Disabled, Strict or Loose. [Default: Loose]' + pattern: ^(?i)(Disabled|Strict|Loose)?$ type: string bpfExtToServiceConnmark: description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit @@ -1069,6 +1070,7 @@ spec: is sent directly from the remote node. In "DSR" mode, the remote node appears to use the IP of the ingress node; this requires a permissive L2 network. [Default: Tunnel]' + pattern: ^(?i)(Tunnel|DSR)?$ type: string bpfForceTrackPacketsFromIfaces: description: 'BPFForceTrackPacketsFromIfaces in BPF mode, forces traffic @@ -1100,6 +1102,7 @@ spec: minimum time between updates to the dataplane for Felix''s embedded kube-proxy. Lower values give reduced set-up latency. Higher values reduce Felix CPU usage by batching up more work. [Default: 1s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string bpfL3IfacePattern: description: BPFL3IfacePattern is a regular expression that allows @@ -1124,6 +1127,7 @@ spec: when in BPF dataplane mode. One of "Off", "Info", or "Debug". The logs are emitted to the BPF trace pipe, accessible with the command `tc exec bpf debug`. [Default: Off].' + pattern: ^(?i)(Off|Info|Debug)?$ type: string bpfMapSizeConntrack: description: 'BPFMapSizeConntrack sets the size for the conntrack @@ -1188,6 +1192,7 @@ spec: to append mode, be sure that the other rules in the chains signal acceptance by falling through to the Calico rules, otherwise the Calico policy will be bypassed. [Default: insert]' + pattern: ^(?i)(insert|append)?$ type: string dataplaneDriver: description: DataplaneDriver filename of the external dataplane driver @@ -1206,8 +1211,10 @@ spec: debugMemoryProfilePath: type: string debugSimulateCalcGraphHangAfter: + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string debugSimulateDataplaneHangAfter: + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string defaultEndpointToHostAction: description: 'DefaultEndpointToHostAction controls what happens to @@ -1222,6 +1229,7 @@ spec: endpoint egress policy. Use ACCEPT to unconditionally accept packets from workloads after processing workload endpoint egress policy. [Default: Drop]' + pattern: ^(?i)(Drop|Accept|Return)?$ type: string deviceRouteProtocol: description: This defines the route protocol added to programmed device @@ -1240,6 +1248,7 @@ spec: disableConntrackInvalidCheck: type: boolean endpointReportingDelay: + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string endpointReportingEnabled: type: boolean @@ -1307,12 +1316,14 @@ spec: based on auto-detected platform capabilities. Values are specified in a comma separated list with no spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". "true" or "false" will force the feature, empty or omitted values are auto-detected. + pattern: ^([a-zA-Z0-9-_]+=(true|false|),)*([a-zA-Z0-9-_]+=(true|false|))?$ type: string featureGates: description: FeatureGates is used to enable or disable tech-preview Calico features. Values are specified in a comma separated list with no spaces, example; "BPFConnectTimeLoadBalancingWorkaround=enabled,XyZ=false". This is used to enable features that are not fully production ready. + pattern: ^([a-zA-Z0-9-_]+=([^=]+),)*([a-zA-Z0-9-_]+=([^=]+))?$ type: string floatingIPs: description: FloatingIPs configures whether or not Felix will program @@ -1374,6 +1385,7 @@ spec: description: InterfaceRefreshInterval is the period at which Felix rescans local interfaces to verify their state. The rescan can be disabled by setting the interval to 0. + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string ipipEnabled: description: 'IPIPEnabled overrides whether Felix should configure @@ -1389,18 +1401,22 @@ spec: all iptables state to ensure that no other process has accidentally broken Calico''s rules. Set to 0 to disable iptables refresh. [Default: 90s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string iptablesBackend: description: IptablesBackend specifies which backend of iptables will be used. The default is Auto. + pattern: ^(?i)(Auto|FelixConfiguration|FelixConfigurationList|Legacy|NFT)?$ type: string iptablesFilterAllowAction: + pattern: ^(?i)(Accept|Return)?$ type: string iptablesFilterDenyAction: description: IptablesFilterDenyAction controls what happens to traffic that is denied by network policy. By default Calico blocks traffic with an iptables "DROP" action. If you want to use "REJECT" action instead you can configure it in here. + pattern: ^(?i)(Drop|Reject)?$ type: string iptablesLockFilePath: description: 'IptablesLockFilePath is the location of the iptables @@ -1413,6 +1429,7 @@ spec: wait between attempts to acquire the iptables lock if it is not available. Lower values make Felix more responsive when the lock is contended, but use more CPU. [Default: 50ms]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string iptablesLockTimeout: description: 'IptablesLockTimeout is the time that Felix will wait @@ -1421,8 +1438,10 @@ spec: also take the lock. When running Felix inside a container, this requires the /run directory of the host to be mounted into the calico/node or calico/felix container. [Default: 0s disabled]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string iptablesMangleAllowAction: + pattern: ^(?i)(Accept|Return)?$ type: string iptablesMarkMask: description: 'IptablesMarkMask is the mask that Felix selects its @@ -1439,6 +1458,7 @@ spec: back in order to check the write was not clobbered by another process. This should only occur if another application on the system doesn''t respect the iptables lock. [Default: 1s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string iptablesRefreshInterval: description: 'IptablesRefreshInterval is the period at which Felix @@ -1449,6 +1469,7 @@ spec: was fixed in kernel version 4.11. If you are using v4.11 or greater you may want to set this to, a higher value to reduce Felix CPU usage. [Default: 10s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string ipv6Support: description: IPv6Support controls whether Felix enables support for @@ -1483,15 +1504,18 @@ spec: logSeverityFile: description: 'LogSeverityFile is the log severity above which logs are sent to the log file. [Default: Info]' + pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$ type: string logSeverityScreen: description: 'LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: Info]' + pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$ type: string logSeveritySys: description: 'LogSeveritySys is the log severity above which logs are sent to the syslog. Set to None for no logging to syslog. [Default: Info]' + pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$ type: string maxIpsetSize: type: integer @@ -1530,6 +1554,7 @@ spec: pattern: ^.* x-kubernetes-int-or-string: true netlinkTimeout: + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string openstackRegion: description: 'OpenstackRegion is the name of the region that a particular @@ -1584,21 +1609,25 @@ spec: description: 'ReportingInterval is the interval at which Felix reports its status into the datastore or 0 to disable. Must be non-zero in OpenStack deployments. [Default: 30s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string reportingTTL: description: 'ReportingTTL is the time-to-live setting for process-wide status reports. [Default: 90s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string routeRefreshInterval: description: 'RouteRefreshInterval is the period at which Felix re-checks the routes in the dataplane to ensure that no other process has accidentally broken Calico''s rules. Set to 0 to disable route refresh. [Default: 90s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string routeSource: description: 'RouteSource configures where Felix gets its routing information. - WorkloadIPs: use workload endpoints to construct routes. - CalicoIPAM: the default - use IPAM data to construct routes.' + pattern: ^(?i)(WorkloadIPs|CalicoIPAM)?$ type: string routeSyncDisabled: description: RouteSyncDisabled will disable all operations performed @@ -1638,6 +1667,7 @@ spec: packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled", in which case such routing loops continue to be allowed. [Default: Drop]' + pattern: ^(?i)(Drop|Reject|Disabled)?$ type: string sidecarAccelerationEnabled: description: 'SidecarAccelerationEnabled enables experimental sidecar @@ -1653,10 +1683,12 @@ spec: usageReportingInitialDelay: description: 'UsageReportingInitialDelay controls the minimum delay before Felix makes a report. [Default: 300s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string usageReportingInterval: description: 'UsageReportingInterval controls the interval at which Felix makes reports. [Default: 86400s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string useInternalDataplaneDriver: description: UseInternalDataplaneDriver, if true, Felix will use its @@ -1705,6 +1737,7 @@ spec: wireguardKeepAlive: description: 'WireguardKeepAlive controls Wireguard PersistentKeepalive option. Set 0 to disable. [Default: 0]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string wireguardListeningPort: description: 'WireguardListeningPort controls the listening port used @@ -1731,6 +1764,7 @@ spec: the allowedSourcePrefixes annotation to send traffic with a source IP address that is not theirs. This is disabled by default. When set to "Any", pods can request any prefix. + pattern: ^(?i)(Disabled|Any)?$ type: string xdpEnabled: description: 'XDPEnabled enables XDP acceleration for suitable untracked @@ -1741,6 +1775,7 @@ spec: all XDP state to ensure that no other process has accidentally broken Calico''s BPF maps or attached programs. Set to 0 to disable XDP refresh. [Default: 90s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string type: object type: object @@ -4710,7 +4745,7 @@ spec: # It can be deleted if this is a fresh installation, or if you have already # upgraded to use calico-ipam. - name: upgrade-ipam - image: m.daocloud.io/docker.io/calico/cni:master + image: docker.io/calico/cni:master imagePullPolicy: IfNotPresent command: ["/opt/cni/bin/calico-ipam", "-upgrade"] envFrom: @@ -4738,7 +4773,7 @@ spec: # This container installs the CNI binaries # and CNI network config file on each node. - name: install-cni - image: m.daocloud.io/docker.io/calico/cni:master + image: docker.io/calico/cni:master imagePullPolicy: IfNotPresent command: ["/opt/cni/bin/install"] envFrom: @@ -4781,7 +4816,7 @@ spec: # i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed # in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode. - name: "mount-bpffs" - image: m.daocloud.io/docker.io/calico/node:master + image: docker.io/calico/node:master imagePullPolicy: IfNotPresent command: ["calico-node", "-init", "-best-effort"] volumeMounts: @@ -4807,7 +4842,7 @@ spec: # container programs network policy and routes on each # host. - name: calico-node - image: m.daocloud.io/docker.io/calico/node:master + image: docker.io/calico/node:master imagePullPolicy: IfNotPresent envFrom: - configMapRef: @@ -4878,11 +4913,11 @@ spec: # no effect. This should fall within `--cluster-cidr`. # - name: CALICO_IPV4POOL_CIDR # value: "192.168.0.0/16" - # Disable file logging so `kubectl logs` works. - name: CALICO_IPV4POOL_CIDR value: "172.16.0.0/12" - name: CALICO_IPV6POOL_CIDR - value: "fc00::/48" + value: "fc00:2222::/112" + # Disable file logging so `kubectl logs` works. - name: CALICO_DISABLE_FILE_LOGGING value: "true" # Set Felix endpoint to host default action to ACCEPT. @@ -5036,7 +5071,7 @@ spec: priorityClassName: system-cluster-critical containers: - name: calico-kube-controllers - image: m.daocloud.io/docker.io/calico/kube-controllers:master + image: docker.io/calico/kube-controllers:master imagePullPolicy: IfNotPresent env: # Choose which controllers to run. @@ -5120,7 +5155,7 @@ spec: securityContext: fsGroup: 65534 containers: - - image: m.daocloud.io/docker.io/calico/typha:master + - image: docker.io/calico/typha:master imagePullPolicy: IfNotPresent name: calico-typha ports: diff --git a/calico/calico-typha.yaml b/calico/calico-typha.yaml index 0af25f7..31021d9 100644 --- a/calico/calico-typha.yaml +++ b/calico/calico-typha.yaml @@ -1050,6 +1050,7 @@ spec: with BPF programs regardless of what is the per-interfaces or global setting. Possible values are Disabled, Strict or Loose. [Default: Loose]' + pattern: ^(?i)(Disabled|Strict|Loose)?$ type: string bpfExtToServiceConnmark: description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit @@ -1067,6 +1068,7 @@ spec: is sent directly from the remote node. In "DSR" mode, the remote node appears to use the IP of the ingress node; this requires a permissive L2 network. [Default: Tunnel]' + pattern: ^(?i)(Tunnel|DSR)?$ type: string bpfForceTrackPacketsFromIfaces: description: 'BPFForceTrackPacketsFromIfaces in BPF mode, forces traffic @@ -1098,6 +1100,7 @@ spec: minimum time between updates to the dataplane for Felix''s embedded kube-proxy. Lower values give reduced set-up latency. Higher values reduce Felix CPU usage by batching up more work. [Default: 1s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string bpfL3IfacePattern: description: BPFL3IfacePattern is a regular expression that allows @@ -1122,6 +1125,7 @@ spec: when in BPF dataplane mode. One of "Off", "Info", or "Debug". The logs are emitted to the BPF trace pipe, accessible with the command `tc exec bpf debug`. [Default: Off].' + pattern: ^(?i)(Off|Info|Debug)?$ type: string bpfMapSizeConntrack: description: 'BPFMapSizeConntrack sets the size for the conntrack @@ -1186,6 +1190,7 @@ spec: to append mode, be sure that the other rules in the chains signal acceptance by falling through to the Calico rules, otherwise the Calico policy will be bypassed. [Default: insert]' + pattern: ^(?i)(insert|append)?$ type: string dataplaneDriver: description: DataplaneDriver filename of the external dataplane driver @@ -1204,8 +1209,10 @@ spec: debugMemoryProfilePath: type: string debugSimulateCalcGraphHangAfter: + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string debugSimulateDataplaneHangAfter: + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string defaultEndpointToHostAction: description: 'DefaultEndpointToHostAction controls what happens to @@ -1220,6 +1227,7 @@ spec: endpoint egress policy. Use ACCEPT to unconditionally accept packets from workloads after processing workload endpoint egress policy. [Default: Drop]' + pattern: ^(?i)(Drop|Accept|Return)?$ type: string deviceRouteProtocol: description: This defines the route protocol added to programmed device @@ -1238,6 +1246,7 @@ spec: disableConntrackInvalidCheck: type: boolean endpointReportingDelay: + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string endpointReportingEnabled: type: boolean @@ -1305,12 +1314,14 @@ spec: based on auto-detected platform capabilities. Values are specified in a comma separated list with no spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". "true" or "false" will force the feature, empty or omitted values are auto-detected. + pattern: ^([a-zA-Z0-9-_]+=(true|false|),)*([a-zA-Z0-9-_]+=(true|false|))?$ type: string featureGates: description: FeatureGates is used to enable or disable tech-preview Calico features. Values are specified in a comma separated list with no spaces, example; "BPFConnectTimeLoadBalancingWorkaround=enabled,XyZ=false". This is used to enable features that are not fully production ready. + pattern: ^([a-zA-Z0-9-_]+=([^=]+),)*([a-zA-Z0-9-_]+=([^=]+))?$ type: string floatingIPs: description: FloatingIPs configures whether or not Felix will program @@ -1372,6 +1383,7 @@ spec: description: InterfaceRefreshInterval is the period at which Felix rescans local interfaces to verify their state. The rescan can be disabled by setting the interval to 0. + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string ipipEnabled: description: 'IPIPEnabled overrides whether Felix should configure @@ -1387,18 +1399,22 @@ spec: all iptables state to ensure that no other process has accidentally broken Calico''s rules. Set to 0 to disable iptables refresh. [Default: 90s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string iptablesBackend: description: IptablesBackend specifies which backend of iptables will be used. The default is Auto. + pattern: ^(?i)(Auto|FelixConfiguration|FelixConfigurationList|Legacy|NFT)?$ type: string iptablesFilterAllowAction: + pattern: ^(?i)(Accept|Return)?$ type: string iptablesFilterDenyAction: description: IptablesFilterDenyAction controls what happens to traffic that is denied by network policy. By default Calico blocks traffic with an iptables "DROP" action. If you want to use "REJECT" action instead you can configure it in here. + pattern: ^(?i)(Drop|Reject)?$ type: string iptablesLockFilePath: description: 'IptablesLockFilePath is the location of the iptables @@ -1411,6 +1427,7 @@ spec: wait between attempts to acquire the iptables lock if it is not available. Lower values make Felix more responsive when the lock is contended, but use more CPU. [Default: 50ms]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string iptablesLockTimeout: description: 'IptablesLockTimeout is the time that Felix will wait @@ -1419,8 +1436,10 @@ spec: also take the lock. When running Felix inside a container, this requires the /run directory of the host to be mounted into the calico/node or calico/felix container. [Default: 0s disabled]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string iptablesMangleAllowAction: + pattern: ^(?i)(Accept|Return)?$ type: string iptablesMarkMask: description: 'IptablesMarkMask is the mask that Felix selects its @@ -1437,6 +1456,7 @@ spec: back in order to check the write was not clobbered by another process. This should only occur if another application on the system doesn''t respect the iptables lock. [Default: 1s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string iptablesRefreshInterval: description: 'IptablesRefreshInterval is the period at which Felix @@ -1447,6 +1467,7 @@ spec: was fixed in kernel version 4.11. If you are using v4.11 or greater you may want to set this to, a higher value to reduce Felix CPU usage. [Default: 10s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string ipv6Support: description: IPv6Support controls whether Felix enables support for @@ -1481,15 +1502,18 @@ spec: logSeverityFile: description: 'LogSeverityFile is the log severity above which logs are sent to the log file. [Default: Info]' + pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$ type: string logSeverityScreen: description: 'LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: Info]' + pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$ type: string logSeveritySys: description: 'LogSeveritySys is the log severity above which logs are sent to the syslog. Set to None for no logging to syslog. [Default: Info]' + pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$ type: string maxIpsetSize: type: integer @@ -1528,6 +1552,7 @@ spec: pattern: ^.* x-kubernetes-int-or-string: true netlinkTimeout: + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string openstackRegion: description: 'OpenstackRegion is the name of the region that a particular @@ -1582,21 +1607,25 @@ spec: description: 'ReportingInterval is the interval at which Felix reports its status into the datastore or 0 to disable. Must be non-zero in OpenStack deployments. [Default: 30s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string reportingTTL: description: 'ReportingTTL is the time-to-live setting for process-wide status reports. [Default: 90s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string routeRefreshInterval: description: 'RouteRefreshInterval is the period at which Felix re-checks the routes in the dataplane to ensure that no other process has accidentally broken Calico''s rules. Set to 0 to disable route refresh. [Default: 90s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string routeSource: description: 'RouteSource configures where Felix gets its routing information. - WorkloadIPs: use workload endpoints to construct routes. - CalicoIPAM: the default - use IPAM data to construct routes.' + pattern: ^(?i)(WorkloadIPs|CalicoIPAM)?$ type: string routeSyncDisabled: description: RouteSyncDisabled will disable all operations performed @@ -1636,6 +1665,7 @@ spec: packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled", in which case such routing loops continue to be allowed. [Default: Drop]' + pattern: ^(?i)(Drop|Reject|Disabled)?$ type: string sidecarAccelerationEnabled: description: 'SidecarAccelerationEnabled enables experimental sidecar @@ -1651,10 +1681,12 @@ spec: usageReportingInitialDelay: description: 'UsageReportingInitialDelay controls the minimum delay before Felix makes a report. [Default: 300s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string usageReportingInterval: description: 'UsageReportingInterval controls the interval at which Felix makes reports. [Default: 86400s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string useInternalDataplaneDriver: description: UseInternalDataplaneDriver, if true, Felix will use its @@ -1703,6 +1735,7 @@ spec: wireguardKeepAlive: description: 'WireguardKeepAlive controls Wireguard PersistentKeepalive option. Set 0 to disable. [Default: 0]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string wireguardListeningPort: description: 'WireguardListeningPort controls the listening port used @@ -1729,6 +1762,7 @@ spec: the allowedSourcePrefixes annotation to send traffic with a source IP address that is not theirs. This is disabled by default. When set to "Any", pods can request any prefix. + pattern: ^(?i)(Disabled|Any)?$ type: string xdpEnabled: description: 'XDPEnabled enables XDP acceleration for suitable untracked @@ -1739,6 +1773,7 @@ spec: all XDP state to ensure that no other process has accidentally broken Calico''s BPF maps or attached programs. Set to 0 to disable XDP refresh. [Default: 90s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string type: object type: object diff --git a/calico/calico.yaml b/calico/calico.yaml index 7e07135..3f88f19 100644 --- a/calico/calico.yaml +++ b/calico/calico.yaml @@ -1050,6 +1050,7 @@ spec: with BPF programs regardless of what is the per-interfaces or global setting. Possible values are Disabled, Strict or Loose. [Default: Loose]' + pattern: ^(?i)(Disabled|Strict|Loose)?$ type: string bpfExtToServiceConnmark: description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit @@ -1067,6 +1068,7 @@ spec: is sent directly from the remote node. In "DSR" mode, the remote node appears to use the IP of the ingress node; this requires a permissive L2 network. [Default: Tunnel]' + pattern: ^(?i)(Tunnel|DSR)?$ type: string bpfForceTrackPacketsFromIfaces: description: 'BPFForceTrackPacketsFromIfaces in BPF mode, forces traffic @@ -1098,6 +1100,7 @@ spec: minimum time between updates to the dataplane for Felix''s embedded kube-proxy. Lower values give reduced set-up latency. Higher values reduce Felix CPU usage by batching up more work. [Default: 1s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string bpfL3IfacePattern: description: BPFL3IfacePattern is a regular expression that allows @@ -1122,6 +1125,7 @@ spec: when in BPF dataplane mode. One of "Off", "Info", or "Debug". The logs are emitted to the BPF trace pipe, accessible with the command `tc exec bpf debug`. [Default: Off].' + pattern: ^(?i)(Off|Info|Debug)?$ type: string bpfMapSizeConntrack: description: 'BPFMapSizeConntrack sets the size for the conntrack @@ -1186,6 +1190,7 @@ spec: to append mode, be sure that the other rules in the chains signal acceptance by falling through to the Calico rules, otherwise the Calico policy will be bypassed. [Default: insert]' + pattern: ^(?i)(insert|append)?$ type: string dataplaneDriver: description: DataplaneDriver filename of the external dataplane driver @@ -1204,8 +1209,10 @@ spec: debugMemoryProfilePath: type: string debugSimulateCalcGraphHangAfter: + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string debugSimulateDataplaneHangAfter: + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string defaultEndpointToHostAction: description: 'DefaultEndpointToHostAction controls what happens to @@ -1220,6 +1227,7 @@ spec: endpoint egress policy. Use ACCEPT to unconditionally accept packets from workloads after processing workload endpoint egress policy. [Default: Drop]' + pattern: ^(?i)(Drop|Accept|Return)?$ type: string deviceRouteProtocol: description: This defines the route protocol added to programmed device @@ -1238,6 +1246,7 @@ spec: disableConntrackInvalidCheck: type: boolean endpointReportingDelay: + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string endpointReportingEnabled: type: boolean @@ -1305,12 +1314,14 @@ spec: based on auto-detected platform capabilities. Values are specified in a comma separated list with no spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". "true" or "false" will force the feature, empty or omitted values are auto-detected. + pattern: ^([a-zA-Z0-9-_]+=(true|false|),)*([a-zA-Z0-9-_]+=(true|false|))?$ type: string featureGates: description: FeatureGates is used to enable or disable tech-preview Calico features. Values are specified in a comma separated list with no spaces, example; "BPFConnectTimeLoadBalancingWorkaround=enabled,XyZ=false". This is used to enable features that are not fully production ready. + pattern: ^([a-zA-Z0-9-_]+=([^=]+),)*([a-zA-Z0-9-_]+=([^=]+))?$ type: string floatingIPs: description: FloatingIPs configures whether or not Felix will program @@ -1372,6 +1383,7 @@ spec: description: InterfaceRefreshInterval is the period at which Felix rescans local interfaces to verify their state. The rescan can be disabled by setting the interval to 0. + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string ipipEnabled: description: 'IPIPEnabled overrides whether Felix should configure @@ -1387,18 +1399,22 @@ spec: all iptables state to ensure that no other process has accidentally broken Calico''s rules. Set to 0 to disable iptables refresh. [Default: 90s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string iptablesBackend: description: IptablesBackend specifies which backend of iptables will be used. The default is Auto. + pattern: ^(?i)(Auto|FelixConfiguration|FelixConfigurationList|Legacy|NFT)?$ type: string iptablesFilterAllowAction: + pattern: ^(?i)(Accept|Return)?$ type: string iptablesFilterDenyAction: description: IptablesFilterDenyAction controls what happens to traffic that is denied by network policy. By default Calico blocks traffic with an iptables "DROP" action. If you want to use "REJECT" action instead you can configure it in here. + pattern: ^(?i)(Drop|Reject)?$ type: string iptablesLockFilePath: description: 'IptablesLockFilePath is the location of the iptables @@ -1411,6 +1427,7 @@ spec: wait between attempts to acquire the iptables lock if it is not available. Lower values make Felix more responsive when the lock is contended, but use more CPU. [Default: 50ms]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string iptablesLockTimeout: description: 'IptablesLockTimeout is the time that Felix will wait @@ -1419,8 +1436,10 @@ spec: also take the lock. When running Felix inside a container, this requires the /run directory of the host to be mounted into the calico/node or calico/felix container. [Default: 0s disabled]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string iptablesMangleAllowAction: + pattern: ^(?i)(Accept|Return)?$ type: string iptablesMarkMask: description: 'IptablesMarkMask is the mask that Felix selects its @@ -1437,6 +1456,7 @@ spec: back in order to check the write was not clobbered by another process. This should only occur if another application on the system doesn''t respect the iptables lock. [Default: 1s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string iptablesRefreshInterval: description: 'IptablesRefreshInterval is the period at which Felix @@ -1447,6 +1467,7 @@ spec: was fixed in kernel version 4.11. If you are using v4.11 or greater you may want to set this to, a higher value to reduce Felix CPU usage. [Default: 10s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string ipv6Support: description: IPv6Support controls whether Felix enables support for @@ -1481,15 +1502,18 @@ spec: logSeverityFile: description: 'LogSeverityFile is the log severity above which logs are sent to the log file. [Default: Info]' + pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$ type: string logSeverityScreen: description: 'LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: Info]' + pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$ type: string logSeveritySys: description: 'LogSeveritySys is the log severity above which logs are sent to the syslog. Set to None for no logging to syslog. [Default: Info]' + pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$ type: string maxIpsetSize: type: integer @@ -1528,6 +1552,7 @@ spec: pattern: ^.* x-kubernetes-int-or-string: true netlinkTimeout: + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string openstackRegion: description: 'OpenstackRegion is the name of the region that a particular @@ -1582,21 +1607,25 @@ spec: description: 'ReportingInterval is the interval at which Felix reports its status into the datastore or 0 to disable. Must be non-zero in OpenStack deployments. [Default: 30s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string reportingTTL: description: 'ReportingTTL is the time-to-live setting for process-wide status reports. [Default: 90s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string routeRefreshInterval: description: 'RouteRefreshInterval is the period at which Felix re-checks the routes in the dataplane to ensure that no other process has accidentally broken Calico''s rules. Set to 0 to disable route refresh. [Default: 90s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string routeSource: description: 'RouteSource configures where Felix gets its routing information. - WorkloadIPs: use workload endpoints to construct routes. - CalicoIPAM: the default - use IPAM data to construct routes.' + pattern: ^(?i)(WorkloadIPs|CalicoIPAM)?$ type: string routeSyncDisabled: description: RouteSyncDisabled will disable all operations performed @@ -1636,6 +1665,7 @@ spec: packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled", in which case such routing loops continue to be allowed. [Default: Drop]' + pattern: ^(?i)(Drop|Reject|Disabled)?$ type: string sidecarAccelerationEnabled: description: 'SidecarAccelerationEnabled enables experimental sidecar @@ -1651,10 +1681,12 @@ spec: usageReportingInitialDelay: description: 'UsageReportingInitialDelay controls the minimum delay before Felix makes a report. [Default: 300s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string usageReportingInterval: description: 'UsageReportingInterval controls the interval at which Felix makes reports. [Default: 86400s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string useInternalDataplaneDriver: description: UseInternalDataplaneDriver, if true, Felix will use its @@ -1703,6 +1735,7 @@ spec: wireguardKeepAlive: description: 'WireguardKeepAlive controls Wireguard PersistentKeepalive option. Set 0 to disable. [Default: 0]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string wireguardListeningPort: description: 'WireguardListeningPort controls the listening port used @@ -1729,6 +1762,7 @@ spec: the allowedSourcePrefixes annotation to send traffic with a source IP address that is not theirs. This is disabled by default. When set to "Any", pods can request any prefix. + pattern: ^(?i)(Disabled|Any)?$ type: string xdpEnabled: description: 'XDPEnabled enables XDP acceleration for suitable untracked @@ -1739,6 +1773,7 @@ spec: all XDP state to ensure that no other process has accidentally broken Calico''s BPF maps or attached programs. Set to 0 to disable XDP refresh. [Default: 90s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string type: object type: object @@ -4708,7 +4743,7 @@ spec: # It can be deleted if this is a fresh installation, or if you have already # upgraded to use calico-ipam. - name: upgrade-ipam - image: m.daocloud.io/m.daocloud.io/docker.io/calico/cni:master + image: docker.io/calico/cni:master imagePullPolicy: IfNotPresent command: ["/opt/cni/bin/calico-ipam", "-upgrade"] envFrom: @@ -4736,7 +4771,7 @@ spec: # This container installs the CNI binaries # and CNI network config file on each node. - name: install-cni - image: m.daocloud.io/m.daocloud.io/docker.io/calico/cni:master + image: docker.io/calico/cni:master imagePullPolicy: IfNotPresent command: ["/opt/cni/bin/install"] envFrom: @@ -4779,7 +4814,7 @@ spec: # i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed # in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode. - name: "mount-bpffs" - image: m.daocloud.io/m.daocloud.io/docker.io/calico/node:master + image: docker.io/calico/node:master imagePullPolicy: IfNotPresent command: ["calico-node", "-init", "-best-effort"] volumeMounts: @@ -4805,7 +4840,7 @@ spec: # container programs network policy and routes on each # host. - name: calico-node - image: m.daocloud.io/m.daocloud.io/docker.io/calico/node:master + image: docker.io/calico/node:master imagePullPolicy: IfNotPresent envFrom: - configMapRef: @@ -4842,8 +4877,6 @@ spec: # Auto-detect the BGP IP address. - name: IP value: "autodetect" - - name: CALICO_IPV4POOL_CIDR - value: "172.16.0.0/12" # Enable IPIP - name: CALICO_IPV4POOL_IPIP value: "Always" @@ -4874,8 +4907,8 @@ spec: # The default IPv4 pool to create on startup if none exists. Pod IPs will be # chosen from this range. Changing this value after installation will have # no effect. This should fall within `--cluster-cidr`. - # - name: CALICO_IPV4POOL_CIDR - # value: "192.168.0.0/16" + - name: CALICO_IPV4POOL_CIDR + value: "172.16.0.0/12" # Disable file logging so `kubectl logs` works. - name: CALICO_DISABLE_FILE_LOGGING value: "true" @@ -5030,7 +5063,7 @@ spec: priorityClassName: system-cluster-critical containers: - name: calico-kube-controllers - image: m.daocloud.io/m.daocloud.io/docker.io/calico/kube-controllers:master + image: docker.io/calico/kube-controllers:master imagePullPolicy: IfNotPresent env: # Choose which controllers to run. @@ -5114,7 +5147,7 @@ spec: securityContext: fsGroup: 65534 containers: - - image: m.daocloud.io/m.daocloud.io/docker.io/calico/typha:master + - image: docker.io/calico/typha:master imagePullPolicy: IfNotPresent name: calico-typha ports: diff --git a/doc/Enable-implement-IPv4-IPv6.md b/doc/Enable-implement-IPv4-IPv6.md index 3ea83fa..e1251a3 100644 --- a/doc/Enable-implement-IPv4-IPv6.md +++ b/doc/Enable-implement-IPv4-IPv6.md @@ -132,7 +132,7 @@ rtt min/avg/max/mdev = 9.937/10.269/10.602/0.347 ms ==================== ```shell ---service-cluster-ip-range=10.96.0.0/12,fd00::/108   +--service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112   --feature-gates=IPv6DualStack=true  [root@k8s-master01 ~]# vim /usr/lib/systemd/system/kube-apiserver.service @@ -152,7 +152,7 @@ ExecStart=/usr/local/bin/kube-apiserver \       --secure-port=6443  \       --insecure-port=0  \       --advertise-address=192.168.1.81 \ -      --service-cluster-ip-range=10.96.0.0/12,fd00::/108  \ +      --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112  \       --feature-gates=IPv6DualStack=true \       --service-node-port-range=30000-32767  \       --etcd-servers=https://192.168.1.81:2379,https://192.168.1.82:2379,https://192.168.1.83:2379 \ @@ -195,8 +195,8 @@ WantedBy=multi-user.target ```shell --feature-gates=IPv6DualStack=true ---service-cluster-ip-range=10.96.0.0/12,fd00::/108 ---cluster-cidr=172.16.0.0/12,fc00::/48 +--service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 +--cluster-cidr=172.16.0.0/12,fc00:2222::/112 --node-cidr-mask-size-ipv4=24 --node-cidr-mask-size-ipv6=64 @@ -225,8 +225,8 @@ ExecStart=/usr/local/bin/kube-controller-manager \       --controllers=*,bootstrapsigner,tokencleaner \       --allocate-node-cidrs=true \       --feature-gates=IPv6DualStack=true \ -      --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ -      --cluster-cidr=172.16.0.0/12,fc00::/48 \ +      --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ +      --cluster-cidr=172.16.0.0/12,fc00:2222::/112 \       --node-cidr-mask-size-ipv4=24 \       --node-cidr-mask-size-ipv6=64 \       --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  @@ -282,7 +282,7 @@ WantedBy=multi-user.target ```shell #修改如下配置 -clusterCIDR: 172.16.0.0/12,fc00::/48  +clusterCIDR: 172.16.0.0/12,fc00:2222::/112  [root@k8s-master01 ~]# vim /etc/kubernetes/kube-proxy.yaml [root@k8s-master01 ~]# cat /etc/kubernetes/kube-proxy.yaml @@ -294,7 +294,7 @@ clientConnection:   contentType: application/vnd.kubernetes.protobuf   kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig   qps: 5 -clusterCIDR: 172.16.0.0/12,fc00::/48  +clusterCIDR: 172.16.0.0/12,fc00:2222::/112  configSyncPeriod: 15m0s conntrack:   max: null @@ -344,7 +344,7 @@ udpIdleTimeout: 250ms       value: "autodetect"     - name: CALICO_IPV4POOL_CIDR -      value: "172.16.0.0/16" +      value: "172.16.0.0/12"     - name: CALICO_IPV6POOL_CIDR       value: "fc00::/48" diff --git a/doc/kube-proxy_permissions.md b/doc/kube-proxy_permissions.md index 085ba4d..050cea1 100644 --- a/doc/kube-proxy_permissions.md +++ b/doc/kube-proxy_permissions.md @@ -148,7 +148,7 @@ clientConnection: contentType: application/vnd.kubernetes.protobuf kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig qps: 5 -clusterCIDR: 172.16.0.0/12,fc00::/48 +clusterCIDR: 172.16.0.0/12,fc00:2222::/112 configSyncPeriod: 15m0s conntrack: max: null diff --git a/doc/kubeadm-install-IPV6-IPV4.md b/doc/kubeadm-install-IPV6-IPV4.md index b9158d2..028772e 100644 --- a/doc/kubeadm-install-IPV6-IPV4.md +++ b/doc/kubeadm-install-IPV6-IPV4.md @@ -192,8 +192,8 @@ kind: ClusterConfiguration kubernetesVersion: v1.24.0 imageRepository: registry.cn-hangzhou.aliyuncs.com/chenby networking: -  podSubnet: 172.16.0.0/12,fc00::/48 -  serviceSubnet: 10.96.0.0/12,fd00::/108 +  podSubnet: 172.16.0.0/12,fc00:2222::/112 +  serviceSubnet: 10.96.0.0/12,fd00:1111::/112 root@k8s-master01:~# @@ -364,7 +364,7 @@ wget https://raw.githubusercontent.com/cby-chen/Kubernetes/main/yaml/calico-ipv       value: "autodetect"     - name: CALICO_IPV4POOL_CIDR -      value: "172.16.0.0/16" +      value: "172.16.0.0/12"     - name: CALICO_IPV6POOL_CIDR       value: "fc00::/48" diff --git a/doc/v1.21.13-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md b/doc/v1.21.13-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md index 813f622..569b045 100644 --- a/doc/v1.21.13-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md +++ b/doc/v1.21.13-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md @@ -1992,7 +1992,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --secure-port=6443 \ --insecure-port=0 \ --advertise-address=10.0.0.81 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://10.0.0.81:2379,https://10.0.0.82:2379,https://10.0.0.83:2379 \ @@ -2049,7 +2049,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --secure-port=6443 \ --insecure-port=0 \ --advertise-address=10.0.0.82 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://10.0.0.81:2379,https://10.0.0.82:2379,https://10.0.0.83:2379 \ @@ -2107,7 +2107,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --secure-port=6443 \ --insecure-port=0 \ --advertise-address=10.0.0.83 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://10.0.0.81:2379,https://10.0.0.82:2379,https://10.0.0.83:2379 \ @@ -2186,8 +2186,8 @@ ExecStart=/usr/local/bin/kube-controller-manager \ --pod-eviction-timeout=2m0s \ --controllers=*,bootstrapsigner,tokencleaner \ --allocate-node-cidrs=true \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ - --cluster-cidr=172.16.0.0/12,fc00::/48 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ + --cluster-cidr=172.16.0.0/12,fc00:2222::/112 \ --node-cidr-mask-size-ipv4=24 \ --node-cidr-mask-size-ipv6=64 \ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \ @@ -2526,7 +2526,7 @@ clientConnection: contentType: application/vnd.kubernetes.protobuf kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig qps: 5 -clusterCIDR: 172.16.0.0/12,fc00::/48 +clusterCIDR: 172.16.0.0/12,fc00:2222::/112 configSyncPeriod: 15m0s conntrack: max: null @@ -2589,7 +2589,7 @@ vim calico-ipv6.yaml value: "autodetect" - name: CALICO_IPV4POOL_CIDR - value: "172.16.0.0/16" + value: "172.16.0.0/12" - name: CALICO_IPV6POOL_CIDR value: "fc00::/48" diff --git a/doc/v1.22.10-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md b/doc/v1.22.10-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md index 6b33c85..db81342 100644 --- a/doc/v1.22.10-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md +++ b/doc/v1.22.10-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md @@ -1992,7 +1992,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --secure-port=6443 \ --insecure-port=0 \ --advertise-address=10.0.0.81 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://10.0.0.81:2379,https://10.0.0.82:2379,https://10.0.0.83:2379 \ @@ -2049,7 +2049,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --secure-port=6443 \ --insecure-port=0 \ --advertise-address=10.0.0.82 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://10.0.0.81:2379,https://10.0.0.82:2379,https://10.0.0.83:2379 \ @@ -2107,7 +2107,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --secure-port=6443 \ --insecure-port=0 \ --advertise-address=10.0.0.83 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://10.0.0.81:2379,https://10.0.0.82:2379,https://10.0.0.83:2379 \ @@ -2186,8 +2186,8 @@ ExecStart=/usr/local/bin/kube-controller-manager \ --pod-eviction-timeout=2m0s \ --controllers=*,bootstrapsigner,tokencleaner \ --allocate-node-cidrs=true \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ - --cluster-cidr=172.16.0.0/12,fc00::/48 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ + --cluster-cidr=172.16.0.0/12,fc00:2222::/112 \ --node-cidr-mask-size-ipv4=24 \ --node-cidr-mask-size-ipv6=64 \ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \ @@ -2526,7 +2526,7 @@ clientConnection: contentType: application/vnd.kubernetes.protobuf kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig qps: 5 -clusterCIDR: 172.16.0.0/12,fc00::/48 +clusterCIDR: 172.16.0.0/12,fc00:2222::/112 configSyncPeriod: 15m0s conntrack: max: null @@ -2589,7 +2589,7 @@ vim calico-ipv6.yaml value: "autodetect" - name: CALICO_IPV4POOL_CIDR - value: "172.16.0.0/16" + value: "172.16.0.0/12" - name: CALICO_IPV6POOL_CIDR value: "fc00::/48" diff --git a/doc/v1.23.7-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md b/doc/v1.23.7-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md index fed3799..de64204 100644 --- a/doc/v1.23.7-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md +++ b/doc/v1.23.7-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md @@ -1992,7 +1992,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --secure-port=6443 \ --insecure-port=0 \ --advertise-address=10.0.0.81 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://10.0.0.81:2379,https://10.0.0.82:2379,https://10.0.0.83:2379 \ @@ -2049,7 +2049,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --secure-port=6443 \ --insecure-port=0 \ --advertise-address=10.0.0.82 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://10.0.0.81:2379,https://10.0.0.82:2379,https://10.0.0.83:2379 \ @@ -2107,7 +2107,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --secure-port=6443 \ --insecure-port=0 \ --advertise-address=10.0.0.83 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://10.0.0.81:2379,https://10.0.0.82:2379,https://10.0.0.83:2379 \ @@ -2186,8 +2186,8 @@ ExecStart=/usr/local/bin/kube-controller-manager \ --pod-eviction-timeout=2m0s \ --controllers=*,bootstrapsigner,tokencleaner \ --allocate-node-cidrs=true \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ - --cluster-cidr=172.16.0.0/12,fc00::/48 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ + --cluster-cidr=172.16.0.0/12,fc00:2222::/112 \ --node-cidr-mask-size-ipv4=24 \ --node-cidr-mask-size-ipv6=64 \ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \ @@ -2526,7 +2526,7 @@ clientConnection: contentType: application/vnd.kubernetes.protobuf kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig qps: 5 -clusterCIDR: 172.16.0.0/12,fc00::/48 +clusterCIDR: 172.16.0.0/12,fc00:2222::/112 configSyncPeriod: 15m0s conntrack: max: null @@ -2589,7 +2589,7 @@ vim calico-ipv6.yaml value: "autodetect" - name: CALICO_IPV4POOL_CIDR - value: "172.16.0.0/16" + value: "172.16.0.0/12" - name: CALICO_IPV6POOL_CIDR value: "fc00::/48" diff --git a/doc/v1.24.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md b/doc/v1.24.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md index cb598c1..dd9c274 100644 --- a/doc/v1.24.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md +++ b/doc/v1.24.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md @@ -2014,7 +2014,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --bind-address=0.0.0.0 \ --secure-port=6443 \ --advertise-address=10.0.0.81 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://10.0.0.81:2379,https://10.0.0.82:2379,https://10.0.0.83:2379 \ @@ -2070,7 +2070,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --bind-address=0.0.0.0 \ --secure-port=6443 \ --advertise-address=10.0.0.82 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://10.0.0.81:2379,https://10.0.0.82:2379,https://10.0.0.83:2379 \ @@ -2127,7 +2127,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --bind-address=0.0.0.0 \ --secure-port=6443 \ --advertise-address=10.0.0.83 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://10.0.0.81:2379,https://10.0.0.82:2379,https://10.0.0.83:2379 \ @@ -2206,8 +2206,8 @@ ExecStart=/usr/local/bin/kube-controller-manager \ --controllers=*,bootstrapsigner,tokencleaner \ --allocate-node-cidrs=true \ --feature-gates=IPv6DualStack=true \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ - --cluster-cidr=172.16.0.0/12,fc00::/48 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ + --cluster-cidr=172.16.0.0/12,fc00:2222::/112 \ --node-cidr-mask-size-ipv4=24 \ --node-cidr-mask-size-ipv6=64 \ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem @@ -2504,7 +2504,7 @@ clientConnection: contentType: application/vnd.kubernetes.protobuf kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig qps: 5 -clusterCIDR: 172.16.0.0/12,fc00::/48 +clusterCIDR: 172.16.0.0/12,fc00:2222::/112 configSyncPeriod: 15m0s conntrack: max: null @@ -2568,7 +2568,7 @@ vim calico-ipv6.yaml value: "autodetect" - name: CALICO_IPV4POOL_CIDR - value: "172.16.0.0/16" + value: "172.16.0.0/12" - name: CALICO_IPV6POOL_CIDR value: "fc00::/48" diff --git a/doc/v1.24.0-CentOS-binary-install-IPv6-IPv4.md b/doc/v1.24.0-CentOS-binary-install-IPv6-IPv4.md index 4eef08a..5b19680 100644 --- a/doc/v1.24.0-CentOS-binary-install-IPv6-IPv4.md +++ b/doc/v1.24.0-CentOS-binary-install-IPv6-IPv4.md @@ -2004,7 +2004,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --bind-address=0.0.0.0 \ --secure-port=6443 \ --advertise-address=10.0.0.81 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://10.0.0.81:2379,https://10.0.0.82:2379,https://10.0.0.83:2379 \ @@ -2060,7 +2060,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --bind-address=0.0.0.0 \ --secure-port=6443 \ --advertise-address=10.0.0.82 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://10.0.0.81:2379,https://10.0.0.82:2379,https://10.0.0.83:2379 \ @@ -2117,7 +2117,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --bind-address=0.0.0.0 \ --secure-port=6443 \ --advertise-address=10.0.0.83 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://10.0.0.81:2379,https://10.0.0.82:2379,https://10.0.0.83:2379 \ @@ -2196,8 +2196,8 @@ ExecStart=/usr/local/bin/kube-controller-manager \ --controllers=*,bootstrapsigner,tokencleaner \ --allocate-node-cidrs=true \ --feature-gates=IPv6DualStack=true \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ - --cluster-cidr=172.16.0.0/12,fc00::/48 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ + --cluster-cidr=172.16.0.0/12,fc00:2222::/112 \ --node-cidr-mask-size-ipv4=24 \ --node-cidr-mask-size-ipv6=64 \ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem @@ -2497,7 +2497,7 @@ clientConnection: contentType: application/vnd.kubernetes.protobuf kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig qps: 5 -clusterCIDR: 172.16.0.0/12,fc00::/48 +clusterCIDR: 172.16.0.0/12,fc00:2222::/112 configSyncPeriod: 15m0s conntrack: max: null @@ -2560,7 +2560,7 @@ EOF value: "autodetect" - name: CALICO_IPV4POOL_CIDR - value: "172.16.0.0/16" + value: "172.16.0.0/12" - name: CALICO_IPV6POOL_CIDR value: "fc00::/48" diff --git a/doc/v1.24.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md b/doc/v1.24.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md index 91bdee1..714d9f1 100644 --- a/doc/v1.24.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md +++ b/doc/v1.24.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md @@ -2014,7 +2014,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --bind-address=0.0.0.0 \ --secure-port=6443 \ --advertise-address=10.0.0.81 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://10.0.0.81:2379,https://10.0.0.82:2379,https://10.0.0.83:2379 \ @@ -2070,7 +2070,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --bind-address=0.0.0.0 \ --secure-port=6443 \ --advertise-address=10.0.0.82 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://10.0.0.81:2379,https://10.0.0.82:2379,https://10.0.0.83:2379 \ @@ -2127,7 +2127,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --bind-address=0.0.0.0 \ --secure-port=6443 \ --advertise-address=10.0.0.83 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://10.0.0.81:2379,https://10.0.0.82:2379,https://10.0.0.83:2379 \ @@ -2206,8 +2206,8 @@ ExecStart=/usr/local/bin/kube-controller-manager \ --controllers=*,bootstrapsigner,tokencleaner \ --allocate-node-cidrs=true \ --feature-gates=IPv6DualStack=true \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ - --cluster-cidr=172.16.0.0/12,fc00::/48 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ + --cluster-cidr=172.16.0.0/12,fc00:2222::/112 \ --node-cidr-mask-size-ipv4=24 \ --node-cidr-mask-size-ipv6=64 \ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem @@ -2504,7 +2504,7 @@ clientConnection: contentType: application/vnd.kubernetes.protobuf kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig qps: 5 -clusterCIDR: 172.16.0.0/12,fc00::/48 +clusterCIDR: 172.16.0.0/12,fc00:2222::/112 configSyncPeriod: 15m0s conntrack: max: null @@ -2568,7 +2568,7 @@ vim calico-ipv6.yaml value: "autodetect" - name: CALICO_IPV4POOL_CIDR - value: "172.16.0.0/16" + value: "172.16.0.0/12" - name: CALICO_IPV6POOL_CIDR value: "fc00::/48" diff --git a/doc/v1.24.1-CentOS-binary-install-IPv6-IPv4.md b/doc/v1.24.1-CentOS-binary-install-IPv6-IPv4.md index 44a85f5..70f86e3 100644 --- a/doc/v1.24.1-CentOS-binary-install-IPv6-IPv4.md +++ b/doc/v1.24.1-CentOS-binary-install-IPv6-IPv4.md @@ -2052,7 +2052,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --bind-address=0.0.0.0 \ --secure-port=6443 \ --advertise-address=10.0.0.61 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://10.0.0.61:2379,https://10.0.0.62:2379,https://10.0.0.63:2379 \ @@ -2108,7 +2108,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --bind-address=0.0.0.0 \ --secure-port=6443 \ --advertise-address=10.0.0.62 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://10.0.0.61:2379,https://10.0.0.62:2379,https://10.0.0.63:2379 \ @@ -2165,7 +2165,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --bind-address=0.0.0.0 \ --secure-port=6443 \ --advertise-address=10.0.0.63 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://10.0.0.61:2379,https://10.0.0.62:2379,https://10.0.0.63:2379 \ @@ -2244,8 +2244,8 @@ ExecStart=/usr/local/bin/kube-controller-manager \ --controllers=*,bootstrapsigner,tokencleaner \ --allocate-node-cidrs=true \ --feature-gates=IPv6DualStack=true \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ - --cluster-cidr=172.16.0.0/12,fc00::/48 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ + --cluster-cidr=172.16.0.0/12,fc00:2222::/112 \ --node-cidr-mask-size-ipv4=24 \ --node-cidr-mask-size-ipv6=64 \ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem @@ -2543,7 +2543,7 @@ clientConnection: contentType: application/vnd.kubernetes.protobuf kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig qps: 5 -clusterCIDR: 172.16.0.0/12,fc00::/48 +clusterCIDR: 172.16.0.0/12,fc00:2222::/112 configSyncPeriod: 15m0s conntrack: max: null @@ -2607,7 +2607,7 @@ vim calico-ipv6.yaml value: "autodetect" - name: CALICO_IPV4POOL_CIDR - value: "172.16.0.0/16" + value: "172.16.0.0/12" - name: CALICO_IPV6POOL_CIDR value: "fc00::/48" diff --git a/doc/v1.24.1-Ubuntu-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md b/doc/v1.24.1-Ubuntu-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md index 33ae531..766881f 100644 --- a/doc/v1.24.1-Ubuntu-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md +++ b/doc/v1.24.1-Ubuntu-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md @@ -1983,7 +1983,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --bind-address=0.0.0.0 \ --secure-port=6443 \ --advertise-address=192.168.1.11 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://192.168.1.11:2379,https://192.168.1.12:2379,https://192.168.1.13:2379 \ @@ -2039,7 +2039,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --bind-address=0.0.0.0 \ --secure-port=6443 \ --advertise-address=192.168.1.12 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://192.168.1.11:2379,https://192.168.1.12:2379,https://192.168.1.13:2379 \ @@ -2096,7 +2096,7 @@ ExecStart=/usr/local/bin/kube-apiserver \ --bind-address=0.0.0.0 \ --secure-port=6443 \ --advertise-address=192.168.1.13 \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ --feature-gates=IPv6DualStack=true \ --service-node-port-range=30000-32767 \ --etcd-servers=https://192.168.1.11:2379,https://192.168.1.12:2379,https://192.168.1.13:2379 \ @@ -2175,8 +2175,8 @@ ExecStart=/usr/local/bin/kube-controller-manager \ --controllers=*,bootstrapsigner,tokencleaner \ --allocate-node-cidrs=true \ --feature-gates=IPv6DualStack=true \ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \ - --cluster-cidr=172.16.0.0/12,fc00::/48 \ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \ + --cluster-cidr=172.16.0.0/12,fc00:2222::/112 \ --node-cidr-mask-size-ipv4=24 \ --node-cidr-mask-size-ipv6=64 \ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem @@ -2474,7 +2474,7 @@ clientConnection: contentType: application/vnd.kubernetes.protobuf kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig qps: 5 -clusterCIDR: 172.16.0.0/12,fc00::/48 +clusterCIDR: 172.16.0.0/12,fc00:2222::/112 configSyncPeriod: 15m0s conntrack: max: null @@ -2538,7 +2538,7 @@ vim calico-ipv6.yaml value: "autodetect" - name: CALICO_IPV4POOL_CIDR - value: "172.16.0.0/16" + value: "172.16.0.0/12" - name: CALICO_IPV6POOL_CIDR value: "fc00::/48" diff --git a/doc/v1.24.2-CentOS-binary-install-IPv6-IPv4.md b/doc/v1.24.2-CentOS-binary-install-IPv6-IPv4.md index 570a89c..aeb42fe 100644 --- a/doc/v1.24.2-CentOS-binary-install-IPv6-IPv4.md +++ b/doc/v1.24.2-CentOS-binary-install-IPv6-IPv4.md @@ -2075,7 +2075,7 @@ ExecStart=/usr/local/bin/kube-apiserver \\ --bind-address=0.0.0.0 \\ --secure-port=6443 \\ --advertise-address=10.0.0.61 \\ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ --feature-gates=IPv6DualStack=true \\ --service-node-port-range=30000-32767 \\ --etcd-servers=https://10.0.0.61:2379,https://10.0.0.62:2379,https://10.0.0.63:2379 \\ @@ -2131,7 +2131,7 @@ ExecStart=/usr/local/bin/kube-apiserver \\ --bind-address=0.0.0.0 \\ --secure-port=6443 \\ --advertise-address=10.0.0.62 \\ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ --feature-gates=IPv6DualStack=true \\ --service-node-port-range=30000-32767 \\ --etcd-servers=https://10.0.0.61:2379,https://10.0.0.62:2379,https://10.0.0.63:2379 \\ @@ -2188,7 +2188,7 @@ ExecStart=/usr/local/bin/kube-apiserver \\ --bind-address=0.0.0.0 \\ --secure-port=6443 \\ --advertise-address=10.0.0.63 \\ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ --feature-gates=IPv6DualStack=true \\ --service-node-port-range=30000-32767 \\ --etcd-servers=https://10.0.0.61:2379,https://10.0.0.62:2379,https://10.0.0.63:2379 \\ @@ -2267,8 +2267,8 @@ ExecStart=/usr/local/bin/kube-controller-manager \\ --controllers=*,bootstrapsigner,tokencleaner \\ --allocate-node-cidrs=true \\ --feature-gates=IPv6DualStack=true \\ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \\ - --cluster-cidr=172.16.0.0/12,fc00::/48 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ + --cluster-cidr=172.16.0.0/12,fc00:2222::/112 \\ --node-cidr-mask-size-ipv4=24 \\ --node-cidr-mask-size-ipv6=64 \\ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem @@ -2566,7 +2566,7 @@ clientConnection: contentType: application/vnd.kubernetes.protobuf kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig qps: 5 -clusterCIDR: 172.16.0.0/12,fc00::/48 +clusterCIDR: 172.16.0.0/12,fc00:2222::/112 configSyncPeriod: 15m0s conntrack: max: null diff --git a/doc/v1.24.3-CentOS-binary-install-IPv6-IPv4.md b/doc/v1.24.3-CentOS-binary-install-IPv6-IPv4.md index bd84a7e..65dbbe0 100644 --- a/doc/v1.24.3-CentOS-binary-install-IPv6-IPv4.md +++ b/doc/v1.24.3-CentOS-binary-install-IPv6-IPv4.md @@ -2031,7 +2031,7 @@ ExecStart=/usr/local/bin/kube-apiserver \\ --bind-address=0.0.0.0 \\ --secure-port=6443 \\ --advertise-address=192.168.1.61 \\ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ --feature-gates=IPv6DualStack=true \\ --service-node-port-range=30000-32767 \\ --etcd-servers=https://192.168.1.61:2379,https://192.168.1.62:2379,https://192.168.1.63:2379 \\ @@ -2087,7 +2087,7 @@ ExecStart=/usr/local/bin/kube-apiserver \\ --bind-address=0.0.0.0 \\ --secure-port=6443 \\ --advertise-address=192.168.1.62 \\ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ --feature-gates=IPv6DualStack=true \\ --service-node-port-range=30000-32767 \\ --etcd-servers=https://192.168.1.61:2379,https://192.168.1.62:2379,https://192.168.1.63:2379 \\ @@ -2144,7 +2144,7 @@ ExecStart=/usr/local/bin/kube-apiserver \\ --bind-address=0.0.0.0 \\ --secure-port=6443 \\ --advertise-address=192.168.1.63 \\ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ --feature-gates=IPv6DualStack=true \\ --service-node-port-range=30000-32767 \\ --etcd-servers=https://192.168.1.61:2379,https://192.168.1.62:2379,https://192.168.1.63:2379 \\ @@ -2222,8 +2222,8 @@ ExecStart=/usr/local/bin/kube-controller-manager \\ --controllers=*,bootstrapsigner,tokencleaner \\ --allocate-node-cidrs=true \\ --feature-gates=IPv6DualStack=true \\ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \\ - --cluster-cidr=172.16.0.0/12,fc00::/48 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ + --cluster-cidr=172.16.0.0/12,fc00:2222::/112 \\ --node-cidr-mask-size-ipv4=24 \\ --node-cidr-mask-size-ipv6=64 \\ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem @@ -2518,7 +2518,7 @@ clientConnection: contentType: application/vnd.kubernetes.protobuf kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig qps: 5 -clusterCIDR: 172.16.0.0/12,fc00::/48 +clusterCIDR: 172.16.0.0/12,fc00:2222::/112 configSyncPeriod: 15m0s conntrack: max: null diff --git a/doc/v1.25.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md b/doc/v1.25.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md index 5369324..c3d06f4 100644 --- a/doc/v1.25.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md +++ b/doc/v1.25.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md @@ -2237,7 +2237,7 @@ ExecStart=/usr/local/bin/kube-apiserver \\ --bind-address=0.0.0.0 \\ --secure-port=6443 \\ --advertise-address=192.168.1.61 \\ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ --service-node-port-range=30000-32767 \\ --etcd-servers=https://192.168.1.61:2379,https://192.168.1.62:2379,https://192.168.1.63:2379 \\ --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\ @@ -2293,7 +2293,7 @@ ExecStart=/usr/local/bin/kube-apiserver \\ --bind-address=0.0.0.0 \\ --secure-port=6443 \\ --advertise-address=192.168.1.62 \\ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ --service-node-port-range=30000-32767 \\ --etcd-servers=https://192.168.1.61:2379,https://192.168.1.62:2379,https://192.168.1.63:2379 \\ --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\ @@ -2350,7 +2350,7 @@ ExecStart=/usr/local/bin/kube-apiserver \\ --bind-address=0.0.0.0 \\ --secure-port=6443 \\ --advertise-address=192.168.1.63 \\ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ --service-node-port-range=30000-32767 \\ --etcd-servers=https://192.168.1.61:2379,https://192.168.1.62:2379,https://192.168.1.63:2379 \\ --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\ @@ -2428,8 +2428,8 @@ ExecStart=/usr/local/bin/kube-controller-manager \\ --pod-eviction-timeout=2m0s \\ --controllers=*,bootstrapsigner,tokencleaner \\ --allocate-node-cidrs=true \\ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \\ - --cluster-cidr=172.16.0.0/12,fc00::/48 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ + --cluster-cidr=172.16.0.0/12,fc00:2222::/112 \\ --node-cidr-mask-size-ipv4=24 \\ --node-cidr-mask-size-ipv6=64 \\ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem @@ -2746,7 +2746,7 @@ clientConnection: contentType: application/vnd.kubernetes.protobuf kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig qps: 5 -clusterCIDR: 172.16.0.0/12,fc00::/48 +clusterCIDR: 172.16.0.0/12,fc00:2222::/112 configSyncPeriod: 15m0s conntrack: max: null diff --git a/doc/v1.25.0-CentOS-binary-install-IPv6-IPv4.md b/doc/v1.25.0-CentOS-binary-install-IPv6-IPv4.md index 2d6317a..786a42b 100644 --- a/doc/v1.25.0-CentOS-binary-install-IPv6-IPv4.md +++ b/doc/v1.25.0-CentOS-binary-install-IPv6-IPv4.md @@ -2032,7 +2032,7 @@ ExecStart=/usr/local/bin/kube-apiserver \\ --bind-address=0.0.0.0 \\ --secure-port=6443 \\ --advertise-address=192.168.1.61 \\ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ --service-node-port-range=30000-32767 \\ --etcd-servers=https://192.168.1.61:2379,https://192.168.1.62:2379,https://192.168.1.63:2379 \\ --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\ @@ -2088,7 +2088,7 @@ ExecStart=/usr/local/bin/kube-apiserver \\ --bind-address=0.0.0.0 \\ --secure-port=6443 \\ --advertise-address=192.168.1.62 \\ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ --service-node-port-range=30000-32767 \\ --etcd-servers=https://192.168.1.61:2379,https://192.168.1.62:2379,https://192.168.1.63:2379 \\ --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\ @@ -2145,7 +2145,7 @@ ExecStart=/usr/local/bin/kube-apiserver \\ --bind-address=0.0.0.0 \\ --secure-port=6443 \\ --advertise-address=192.168.1.63 \\ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ --service-node-port-range=30000-32767 \\ --etcd-servers=https://192.168.1.61:2379,https://192.168.1.62:2379,https://192.168.1.63:2379 \\ --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\ @@ -2223,8 +2223,8 @@ ExecStart=/usr/local/bin/kube-controller-manager \\ --pod-eviction-timeout=2m0s \\ --controllers=*,bootstrapsigner,tokencleaner \\ --allocate-node-cidrs=true \\ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \\ - --cluster-cidr=172.16.0.0/12,fc00::/48 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ + --cluster-cidr=172.16.0.0/12,fc00:2222::/112 \\ --node-cidr-mask-size-ipv4=24 \\ --node-cidr-mask-size-ipv6=64 \\ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem @@ -2519,7 +2519,7 @@ clientConnection: contentType: application/vnd.kubernetes.protobuf kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig qps: 5 -clusterCIDR: 172.16.0.0/12,fc00::/48 +clusterCIDR: 172.16.0.0/12,fc00:2222::/112 configSyncPeriod: 15m0s conntrack: max: null diff --git a/doc/v1.27.3-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md b/doc/v1.27.3-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md index 6ed7037..cc0e5fd 100644 --- a/doc/v1.27.3-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md +++ b/doc/v1.27.3-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md @@ -1895,7 +1895,7 @@ ExecStart=/usr/local/bin/kube-apiserver \\ --bind-address=0.0.0.0 \\ --secure-port=6443 \\ --advertise-address=192.168.0.31 \\ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ --service-node-port-range=30000-32767 \\ --etcd-servers=https://192.168.0.31:2379,https://192.168.0.32:2379,https://192.168.0.33:2379 \\ --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\ @@ -1946,7 +1946,7 @@ ExecStart=/usr/local/bin/kube-apiserver \\ --bind-address=0.0.0.0 \\ --secure-port=6443 \\ --advertise-address=192.168.0.32 \\ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ --service-node-port-range=30000-32767 \\ --etcd-servers=https://192.168.0.31:2379,https://192.168.0.32:2379,https://192.168.0.33:2379 \\ --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\ @@ -1999,7 +1999,7 @@ ExecStart=/usr/local/bin/kube-apiserver \\ --bind-address=0.0.0.0 \\ --secure-port=6443 \\ --advertise-address=192.168.0.33 \\ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ --service-node-port-range=30000-32767 \\ --etcd-servers=https://192.168.0.31:2379,https://192.168.0.32:2379,https://192.168.0.33:2379 \\ --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\ @@ -2072,8 +2072,8 @@ ExecStart=/usr/local/bin/kube-controller-manager \\ --node-monitor-period=5s \\ --controllers=*,bootstrapsigner,tokencleaner \\ --allocate-node-cidrs=true \\ - --service-cluster-ip-range=10.96.0.0/12,fd00::/108 \\ - --cluster-cidr=172.16.0.0/12,fc00::/48 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ + --cluster-cidr=172.16.0.0/12,fc00:2222::/112 \\ --node-cidr-mask-size-ipv4=24 \\ --node-cidr-mask-size-ipv6=120 \\ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem @@ -2405,7 +2405,7 @@ clientConnection: contentType: application/vnd.kubernetes.protobuf kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig qps: 5 -clusterCIDR: 172.16.0.0/12,fc00::/48 +clusterCIDR: 172.16.0.0/12,fc00:2222::/112 configSyncPeriod: 15m0s conntrack: max: null diff --git a/doc/v1.28.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md b/doc/v1.28.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md new file mode 100644 index 0000000..bbd74a4 --- /dev/null +++ b/doc/v1.28.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md @@ -0,0 +1,5481 @@ +# 二进制安装Kubernetes(k8s)v1.28.0 + + + +[https://github.com/cby-chen/Kubernetes](https://github.com/cby-chen/Kubernetes) 开源不易,帮忙点个star,谢谢了 + +# 介绍 + +kubernetes(k8s)二进制高可用安装部署,支持IPv4+IPv6双栈。 + +我使用IPV6的目的是在公网进行访问,所以我配置了IPV6静态地址。 + +若您没有IPV6环境,或者不想使用IPv6,不对主机进行配置IPv6地址即可。 + +不配置IPV6,不影响后续,不过集群依旧是支持IPv6的。为后期留有扩展可能性。 + +若不要IPv6 ,不给网卡配置IPv6即可,不要对IPv6相关配置删除或操作,否则会出问题。 + +# 强烈建议在Github上查看文档 !!! + +## Github出问题会更新文档,并且后续尽可能第一时间更新新版本文档 !!! + +## 手动项目地址:https://github.com/cby-chen/Kubernetes + + +# 1.环境 + +| 主机名称 | IP地址 | 说明 | 软件 | +| -------- | --------- | ---------- | ------------------------------------------------------------ | +| | 192.168.1.60 | 外网节点 | 下载各种所需安装包 | +| Master01 | 192.168.0.31 | master节点 | kube-apiserver、kube-controller-manager、kube-scheduler、etcd、
kubelet、kube-proxy、nfs-client、haproxy、keepalived、nginx | +| Master02 | 192.168.0.32 | master节点 | kube-apiserver、kube-controller-manager、kube-scheduler、etcd、
kubelet、kube-proxy、nfs-client、haproxy、keepalived、nginx | +| Master03 | 192.168.0.33 | master节点 | kube-apiserver、kube-controller-manager、kube-scheduler、etcd、
kubelet、kube-proxy、nfs-client、haproxy、keepalived、nginx | +| Node01 | 192.168.0.34 | node节点 | kubelet、kube-proxy、nfs-client、nginx | +| Node02 | 192.168.0.35 | node节点 | kubelet、kube-proxy、nfs-client、nginx | +| | 192.168.0.36 | VIP | | + + +网段 +物理主机:192.168.0.0/24 +service:10.96.0.0/12 +pod:172.16.0.0/12 + +安装包已经整理好:https://ghproxy.com/https://github.com/cby-chen/Kubernetes/releases/download/v1.28.0/kubernetes-v1.28.0.tar + + + +## 1.1.k8s基础系统环境配置 + +### 1.2.配置IP + +```shell +# 注意! +# 若虚拟机是进行克隆的那么网卡的UUID会重复 +# 若UUID重复需要重新生成新的UUID +# UUID重复无法获取到IPV6地址 +# +# 查看当前的网卡列表和 UUID: +# nmcli con show +# 删除要更改 UUID 的网络连接: +# nmcli con delete uuid <原 UUID> +# 重新生成 UUID: +# nmcli con add type ethernet ifname <接口名称> con-name <新名称> +# 重新启用网络连接: +# nmcli con up <新名称> + +# 更改网卡的UUID +ssh root@192.168.0.31 "nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44;nmcli con add type ethernet ifname eth0 con-name eth0;nmcli con up eth0" +ssh root@192.168.0.32 "nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44;nmcli con add type ethernet ifname eth0 con-name eth0;nmcli con up eth0" +ssh root@192.168.0.33 "nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44;nmcli con add type ethernet ifname eth0 con-name eth0;nmcli con up eth0" +ssh root@192.168.0.34 "nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44;nmcli con add type ethernet ifname eth0 con-name eth0;nmcli con up eth0" +ssh root@192.168.0.35 "nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44;nmcli con add type ethernet ifname eth0 con-name eth0;nmcli con up eth0" + +# 参数解释 +# +# ssh ssh root@192.168.0.31 +# 使用SSH登录到IP为192.168.0.31的主机,使用root用户身份。 +# +# nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44 +# 删除 UUID 为 708a1497-2192-43a5-9f03-2ab936fb3c44 的网络连接,这是 NetworkManager 中一种特定网络配置的唯一标识符。 +# +# nmcli con add type ethernet ifname eth0 con-name eth0 +# 添加一种以太网连接类型,并指定接口名为 eth0,连接名称也为 eth0。 +# +# nmcli con up eth0 +# 开启 eth0 这个网络连接。 +# +# 简单来说,这个命令的作用是删除一个特定的网络连接配置,并添加一个名为 eth0 的以太网连接,然后启用这个新的连接。 + +# 修改静态的IPv4地址 +ssh root@192.168.0.154 "nmcli con mod eth0 ipv4.addresses 192.168.0.31/24; nmcli con mod eth0 ipv4.gateway 192.168.0.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0" +ssh root@192.168.0.156 "nmcli con mod eth0 ipv4.addresses 192.168.0.32/24; nmcli con mod eth0 ipv4.gateway 192.168.0.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0" +ssh root@192.168.0.164 "nmcli con mod eth0 ipv4.addresses 192.168.0.33/24; nmcli con mod eth0 ipv4.gateway 192.168.0.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0" +ssh root@192.168.0.166 "nmcli con mod eth0 ipv4.addresses 192.168.0.34/24; nmcli con mod eth0 ipv4.gateway 192.168.0.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0" +ssh root@192.168.0.167 "nmcli con mod eth0 ipv4.addresses 192.168.0.35/24; nmcli con mod eth0 ipv4.gateway 192.168.0.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0" + + + + +nmcli con mod eth0 ipv4.gateway 192.168.0.200; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0 +# 参数解释 +# +# ssh root@192.168.0.154 +# 使用SSH登录到IP为192.168.0.154的主机,使用root用户身份。 +# +# "nmcli con mod eth0 ipv4.addresses 192.168.0.31/24" +# 修改eth0网络连接的IPv4地址为192.168.0.31,子网掩码为 24。 +# +# "nmcli con mod eth0 ipv4.gateway 192.168.0.1" +# 修改eth0网络连接的IPv4网关为192.168.0.1。 +# +# "nmcli con mod eth0 ipv4.method manual" +# 将eth0网络连接的IPv4配置方法设置为手动。 +# +# "nmcli con mod eth0 ipv4.dns "8.8.8.8" +# 将eth0网络连接的IPv4 DNS服务器设置为 8.8.8.8。 +# +# "nmcli con up eth0" +# 启动eth0网络连接。 +# +# 总体来说,这条命令是通过SSH远程登录到指定的主机,并使用网络管理命令 (nmcli) 修改eth0网络连接的配置,包括IP地址、网关、配置方法和DNS服务器,并启动该网络连接。 + +# 没有IPv6选择不配置即可 +ssh root@192.168.0.31 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::10; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0" +ssh root@192.168.0.32 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::20; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0" +ssh root@192.168.0.33 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::30; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0" +ssh root@192.168.0.34 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::40; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0" +ssh root@192.168.0.35 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::50; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0" + +# 参数解释 +# +# ssh root@192.168.0.31 +# 通过SSH连接到IP地址为192.168.0.31的远程主机,使用root用户进行登录。 +# +# "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::10" +# 使用nmcli命令修改eth0接口的IPv6地址为fc00:43f4:1eea:1::10。 +# +# "nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1" +# 使用nmcli命令修改eth0接口的IPv6网关为fc00:43f4:1eea:1::1。 +# +# "nmcli con mod eth0 ipv6.method manual" +# 使用nmcli命令将eth0接口的IPv6配置方法修改为手动配置。 +# +# "nmcli con mod eth0 ipv6.dns "2400:3200::1" +# 使用nmcli命令设置eth0接口的IPv6 DNS服务器为2400:3200::1。 +# +# "nmcli con up eth0" +# 使用nmcli命令启动eth0接口。 +# +# 这个命令的目的是在远程主机上配置eth0接口的IPv6地址、网关、配置方法和DNS服务器,并启动eth0接口。 + + +# 查看网卡配置 +# nmcli device show eth0 +# nmcli con show eth0 +[root@localhost ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0 +TYPE=Ethernet +PROXY_METHOD=none +BROWSER_ONLY=no +BOOTPROTO=none +DEFROUTE=yes +IPV4_FAILURE_FATAL=no +IPV6INIT=yes +IPV6_AUTOCONF=no +IPV6_DEFROUTE=yes +IPV6_FAILURE_FATAL=no +IPV6_ADDR_GEN_MODE=stable-privacy +NAME=eth0 +UUID=424fd260-c480-4899-97e6-6fc9722031e8 +DEVICE=eth0 +ONBOOT=yes +IPADDR=192.168.0.31 +PREFIX=24 +GATEWAY=192.168.8.1 +DNS1=8.8.8.8 +IPV6ADDR=fc00:43f4:1eea:1::10/128 +IPV6_DEFAULTGW=fc00:43f4:1eea:1::1 +DNS2=2400:3200::1 +[root@localhost ~]# + + + +# 参数解释 +# +# TYPE=Ethernet +# 指定连接类型为以太网。 +# +# PROXY_METHOD=none +# 指定不使用代理方法。 +# +# BROWSER_ONLY=no +# 指定不仅仅在浏览器中使用代理。 +# +# BOOTPROTO=none +# 指定自动分配地址的方式为无(即手动配置IP地址)。 +# +# DEFROUTE=yes +# 指定默认路由开启。 +# +# IPV4_FAILURE_FATAL=no +# 指定IPv4连接失败时不宣告严重错误。 +# +# IPV6INIT=yes +# 指定启用IPv6。 +# +# IPV6_AUTOCONF=no +# 指定不自动配置IPv6地址。 +# +# IPV6_DEFROUTE=yes +# 指定默认IPv6路由开启。 +# +# IPV6_FAILURE_FATAL=no +# 指定IPv6连接失败时不宣告严重错误。 +# +# IPV6_ADDR_GEN_MODE=stable-privacy +# 指定IPv6地址生成模式为稳定隐私模式。 +# +# NAME=eth0 +# 指定设备名称为eth0。 +# +# UUID=424fd260-c480-4899-97e6-6fc9722031e8 +# 指定设备的唯一标识符。 +# +# DEVICE=eth0 +# 指定设备名称为eth0。 +# +# ONBOOT=yes +# 指定开机自动启用这个连接。 +# +# IPADDR=192.168.0.31 +# 指定IPv4地址为192.168.0.31。 +# +# PREFIX=24 +# 指定IPv4地址的子网掩码为24。 +# +# GATEWAY=192.168.8.1 +# 指定IPv4的网关地址为192.168.8.1。 +# +# DNS1=8.8.8.8 +# 指定首选DNS服务器为8.8.8.8。 +# +# IPV6ADDR=fc00:43f4:1eea:1::10/128 +# 指定IPv6地址为fc00:43f4:1eea:1::10,子网掩码为128。 +# +# IPV6_DEFAULTGW=fc00:43f4:1eea:1::1 +# 指定IPv6的默认网关地址为fc00:43f4:1eea:1::1。 +# +# DNS2=2400:3200::1 +# 指定备用DNS服务器为2400:3200::1。 +``` + +### 1.3.设置主机名 + +```shell +hostnamectl set-hostname k8s-master01 +hostnamectl set-hostname k8s-master02 +hostnamectl set-hostname k8s-master03 +hostnamectl set-hostname k8s-node01 +hostnamectl set-hostname k8s-node02 + +# 参数解释 +# +# 参数: set-hostname +# 解释: 这是hostnamectl命令的一个参数,用于设置系统的主机名。 +# +# 参数: k8s-master01 +# 解释: 这是要设置的主机名,将系统的主机名设置为"k8s-master01"。 +``` + + +### 1.4.配置yum源 + +```shell +# 其他系统的源地址 +# https://mirrors.tuna.tsinghua.edu.cn/help/ + +# 对于 Ubuntu +sed -i 's/cn.archive.ubuntu.com/mirrors.ustc.edu.cn/g' /etc/apt/sources.list + +# 对于 CentOS 7 +sudo sed -e 's|^mirrorlist=|#mirrorlist=|g' \ + -e 's|^#baseurl=http://mirror.centos.org/centos|baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos|g' \ + -i.bak \ + /etc/yum.repos.d/CentOS-*.repo + +# 对于 CentOS 8 +sudo sed -e 's|^mirrorlist=|#mirrorlist=|g' \ + -e 's|^#baseurl=http://mirror.centos.org/$contentdir|baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos|g' \ + -i.bak \ + /etc/yum.repos.d/CentOS-*.repo + +# 对于私有仓库 +sed -e 's|^mirrorlist=|#mirrorlist=|g' -e 's|^#baseurl=http://mirror.centos.org/\$contentdir|baseurl=http://192.168.1.123/centos|g' -i.bak /etc/yum.repos.d/CentOS-*.repo + +# 参数解释 +# +# 以上命令是用于更改系统软件源的配置,以便从国内镜像站点下载软件包和更新。 +# +# 对于 Ubuntu 系统,将 /etc/apt/sources.list 文件中的软件源地址 cn.archive.ubuntu.com 替换为 mirrors.ustc.edu.cn。 +# +# 对于 CentOS 7 系统,将 /etc/yum.repos.d/CentOS-*.repo 文件中的 mirrorlist 注释掉,并将 baseurl 的值替换为 https://mirrors.tuna.tsinghua.edu.cn/centos。 +# +# 对于 CentOS 8 系统,同样将 /etc/yum.repos.d/CentOS-*.repo 文件中的 mirrorlist 注释掉,并将 baseurl 的值替换为 https://mirrors.tuna.tsinghua.edu.cn/centos。 +# +# 对于私有仓库,将 /etc/yum.repos.d/CentOS-*.repo 文件中的 mirrorlist 注释掉,并将 baseurl 的值替换为私有仓库地址 http://192.168.1.123/centos。 +# +# 这些命令通过使用 sed 工具和正则表达式,对相应的配置文件进行批量的替换操作,从而更改系统软件源配置。 +``` + +### 1.5.安装一些必备工具 + +```shell +# 对于 Ubuntu +apt update && apt upgrade -y && apt install -y wget psmisc vim net-tools nfs-kernel-server telnet lvm2 git tar curl + +# 对于 CentOS 7 +yum update -y && yum -y install wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git tar curl + +# 对于 CentOS 8 +yum update -y && yum -y install wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git network-scripts tar curl +``` + +#### 1.5.1 下载离线所需文件(可选) + + 在互联网服务器上安装一个一模一样的系统进行下载所需包 + +##### CentOS7 +```shell +# 下载必要工具 +yum -y install createrepo yum-utils wget epel* + +# 下载全量依赖包 +repotrack createrepo wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git tar curl gcc keepalived haproxy bash-completion chrony sshpass ipvsadm ipset sysstat conntrack libseccomp + +# 删除libseccomp +rm -rf libseccomp-*.rpm + +# 下载libseccomp +wget http://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/libseccomp-2.5.1-1.el8.x86_64.rpm + +# 创建yum源信息 +createrepo -u -d /data/centos7/ + +# 拷贝包到内网机器上 +scp -r /data/centos7/ root@192.168.0.31: +scp -r /data/centos7/ root@192.168.0.32: +scp -r /data/centos7/ root@192.168.0.33: +scp -r /data/centos7/ root@192.168.0.34: +scp -r /data/centos7/ root@192.168.0.35: + +# 在内网机器上创建repo配置文件 +rm -rf /etc/yum.repos.d/* +cat > /etc/yum.repos.d/123.repo << EOF +[cby] +name=CentOS-$releasever - Media +baseurl=file:///root/centos7/ +gpgcheck=0 +enabled=1 +EOF + +# 安装下载好的包 +yum clean all +yum makecache +yum install /root/centos7/* --skip-broken -y + +#### 备注 ##### +# 安装完成后,可能还会出现yum无法使用那么再次执行 +rm -rf /etc/yum.repos.d/* +cat > /etc/yum.repos.d/123.repo << EOF +[cby] +name=CentOS-$releasever - Media +baseurl=file:///root/centos7/ +gpgcheck=0 +enabled=1 +EOF +yum clean all +yum makecache +yum install /root/centos7/* --skip-broken -y + +#### 备注 ##### +# 安装 chrony 和 libseccomp +# yum install /root/centos7/libseccomp-2.5.1*.rpm -y +# yum install /root/centos7/chrony-*.rpm -y +``` +##### CentOS8 +```shell +# 下载必要工具 +yum -y install createrepo yum-utils wget epel* + +# 下载全量依赖包 +repotrack wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git network-scripts tar curl gcc keepalived haproxy bash-completion chrony sshpass ipvsadm ipset sysstat conntrack libseccomp + +# 创建yum源信息 +createrepo -u -d /data/centos8/ + +# 拷贝包到内网机器上 +scp -r centos8/ root@192.168.0.31: +scp -r centos8/ root@192.168.0.32: +scp -r centos8/ root@192.168.0.33: +scp -r centos8/ root@192.168.0.34: +scp -r centos8/ root@192.168.0.35: + +# 在内网机器上创建repo配置文件 +rm -rf /etc/yum.repos.d/* +cat > /etc/yum.repos.d/123.repo << EOF +[cby] +name=CentOS-$releasever - Media +baseurl=file:///root/centos8/ +gpgcheck=0 +enabled=1 +EOF + +# 安装下载好的包 +yum clean all +yum makecache +yum install /root/centos8/* --skip-broken -y + +#### 备注 ##### +# 安装完成后,可能还会出现yum无法使用那么再次执行 +rm -rf /etc/yum.repos.d/* +cat > /etc/yum.repos.d/123.repo << EOF +[cby] +name=CentOS-$releasever - Media +baseurl=file:///root/centos8/ +gpgcheck=0 +enabled=1 +EOF +yum clean all +yum makecache +yum install /root/centos8/* --skip-broken -y +``` + +##### Ubuntu 下载包和依赖 +```shell +#!/bin/bash + +logfile=123.log +ret="" +function getDepends() +{ + echo "fileName is" $1>>$logfile + # use tr to del < > + ret=`apt-cache depends $1|grep Depends |cut -d: -f2 |tr -d "<>"` + echo $ret|tee -a $logfile +} +# 需要获取其所依赖包的包 +libs="wget psmisc vim net-tools nfs-kernel-server telnet lvm2 git tar curl gcc keepalived haproxy bash-completion chrony sshpass ipvsadm ipset sysstat conntrack libseccomp" + +# download libs dependen. deep in 3 +i=0 +while [ $i -lt 3 ] ; +do + let i++ + echo $i + # download libs + newlist=" " + for j in $libs + do + added="$(getDepends $j)" + newlist="$newlist $added" + apt install $added --reinstall -d -y + done + + libs=$newlist +done + +# 创建源信息 +apt install dpkg-dev +sudo cp /var/cache/apt/archives/*.deb /data/ubuntu/ -r +dpkg-scanpackages . /dev/null |gzip > /data/ubuntu/Packages.gz -r + +# 拷贝包到内网机器上 +scp -r ubuntu/ root@192.168.0.31: +scp -r ubuntu/ root@192.168.0.32: +scp -r ubuntu/ root@192.168.0.33: +scp -r ubuntu/ root@192.168.0.34: +scp -r ubuntu/ root@192.168.0.35: + +# 在内网机器上配置apt源 +vim /etc/apt/sources.list +cat /etc/apt/sources.list +deb file:////root/ ubuntu/ + +# 安装deb包 +apt install ./*.deb + +``` + + +### 1.6.选择性下载需要工具 + +```shell +#!/bin/bash + +# 查看版本地址: +# +# https://github.com/containernetworking/plugins/releases/ +# https://github.com/containerd/containerd/releases/ +# https://github.com/kubernetes-sigs/cri-tools/releases/ +# https://github.com/Mirantis/cri-dockerd/releases/ +# https://github.com/etcd-io/etcd/releases/ +# https://github.com/cloudflare/cfssl/releases/ +# https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG +# https://download.docker.com/linux/static/stable/x86_64/ +# https://github.com/opencontainers/runc/releases/ +# https://mirrors.tuna.tsinghua.edu.cn/elrepo/kernel/el7/x86_64/RPMS/ +# https://github.com/helm/helm/tags +# http://nginx.org/download/ + +# Version numbers +cni_plugins_version='v1.3.0' +cri_containerd_cni_version='1.7.3' +crictl_version='v1.28.0' +cri_dockerd_version='0.3.4' +etcd_version='v3.5.9' +cfssl_version='1.6.4' +kubernetes_server_version='1.28.0' +docker_version='24.0.5' +runc_version='1.1.9' +kernel_version='5.4.254' +helm_version='3.12.3' +nginx_version='1.25.2' + +# URLs +base_url='https://ghproxy.com/https://github.com' +kernel_url="http://mirrors.tuna.tsinghua.edu.cn/elrepo/kernel/el7/x86_64/RPMS/kernel-lt-${kernel_version}-1.el7.elrepo.x86_64.rpm" +runc_url="${base_url}/opencontainers/runc/releases/download/v${runc_version}/runc.amd64" +docker_url="https://download.docker.com/linux/static/stable/x86_64/docker-${docker_version}.tgz" +cni_plugins_url="${base_url}/containernetworking/plugins/releases/download/${cni_plugins_version}/cni-plugins-linux-amd64-${cni_plugins_version}.tgz" +cri_containerd_cni_url="${base_url}/containerd/containerd/releases/download/v${cri_containerd_cni_version}/cri-containerd-cni-${cri_containerd_cni_version}-linux-amd64.tar.gz" +crictl_url="${base_url}/kubernetes-sigs/cri-tools/releases/download/${crictl_version}/crictl-${crictl_version}-linux-amd64.tar.gz" +cri_dockerd_url="${base_url}/Mirantis/cri-dockerd/releases/download/v${cri_dockerd_version}/cri-dockerd-${cri_dockerd_version}.amd64.tgz" +etcd_url="${base_url}/etcd-io/etcd/releases/download/${etcd_version}/etcd-${etcd_version}-linux-amd64.tar.gz" +cfssl_url="${base_url}/cloudflare/cfssl/releases/download/v${cfssl_version}/cfssl_${cfssl_version}_linux_amd64" +cfssljson_url="${base_url}/cloudflare/cfssl/releases/download/v${cfssl_version}/cfssljson_${cfssl_version}_linux_amd64" +helm_url="https://mirrors.huaweicloud.com/helm/v${helm_version}/helm-v${helm_version}-linux-amd64.tar.gz" +kubernetes_server_url="https://storage.googleapis.com/kubernetes-release/release/v${kubernetes_server_version}/kubernetes-server-linux-amd64.tar.gz" +nginx_url="http://nginx.org/download/nginx-${nginx_version}.tar.gz" + +# Download packages +packages=( + $kernel_url + $runc_url + $docker_url + $cni_plugins_url + $cri_containerd_cni_url + $crictl_url + $cri_dockerd_url + $etcd_url + $cfssl_url + $cfssljson_url + $helm_url + $kubernetes_server_url + $nginx_url +) + +for package_url in "${packages[@]}"; do + filename=$(basename "$package_url") + if curl --parallel --parallel-immediate -k -L -C - -o "$filename" "$package_url"; then + echo "Downloaded $filename" + else + echo "Failed to download $filename" + exit 1 + fi +done +``` + +### 1.7.关闭防火墙 + +```shell +# Ubuntu忽略,CentOS执行 +systemctl disable --now firewalld +``` + +### 1.8.关闭SELinux + +```shell +# Ubuntu忽略,CentOS执行 +setenforce 0 +sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config + +# 参数解释 +# +# setenforce 0 +# 此命令用于设置 SELinux 的执行模式。0 表示关闭 SELinux。 +# +# sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config +# 该命令使用 sed 工具来编辑 /etc/selinux/config 文件。其中 '-i' 参数表示直接修改原文件,而不是输出到终端或另一个文件。's#SELINUX=enforcing#SELINUX=disabled#g' 是 sed 的替换命令,它将文件中所有的 "SELINUX=enforcing" 替换为 "SELINUX=disabled"。这里的 '#' 是分隔符,用于替代传统的 '/' 分隔符,以避免与路径中的 '/' 冲突。 +``` + +### 1.9.关闭交换分区 + +```shell +sed -ri 's/.*swap.*/#&/' /etc/fstab +swapoff -a && sysctl -w vm.swappiness=0 + +cat /etc/fstab +# /dev/mapper/centos-swap swap swap defaults 0 0 + + +# 参数解释: +# +# -ri: 这个参数用于在原文件中替换匹配的模式。-r表示扩展正则表达式,-i允许直接修改文件。 +# 's/.*swap.*/#&/': 这是一个sed命令,用于在文件/etc/fstab中找到包含swap的行,并在行首添加#来注释掉该行。 +# /etc/fstab: 这是一个文件路径,即/etc/fstab文件,用于存储文件系统表。 +# swapoff -a: 这个命令用于关闭所有启用的交换分区。 +# sysctl -w vm.swappiness=0: 这个命令用于修改vm.swappiness参数的值为0,表示系统在物理内存充足时更倾向于使用物理内存而非交换分区。 +``` + +### 1.10.网络配置(俩种方式二选一) + +```shell +# Ubuntu忽略,CentOS执行 + +# 方式一 +# systemctl disable --now NetworkManager +# systemctl start network && systemctl enable network + +# 方式二 +cat > /etc/NetworkManager/conf.d/calico.conf << EOF +[keyfile] +unmanaged-devices=interface-name:cali*;interface-name:tunl* +EOF +systemctl restart NetworkManager + +# 参数解释 +# +# 这个参数用于指定不由 NetworkManager 管理的设备。它由以下两个部分组成 +# +# interface-name:cali* +# 表示以 "cali" 开头的接口名称被排除在 NetworkManager 管理之外。例如,"cali0", "cali1" 等接口不受 NetworkManager 管理。 +# +# interface-name:tunl* +# 表示以 "tunl" 开头的接口名称被排除在 NetworkManager 管理之外。例如,"tunl0", "tunl1" 等接口不受 NetworkManager 管理。 +# +# 通过使用这个参数,可以将特定的接口排除在 NetworkManager 的管理范围之外,以便其他工具或进程可以独立地管理和配置这些接口。 +``` + +### 1.11.进行时间同步 + +```shell +# 服务端 +# apt install chrony -y +yum install chrony -y +cat > /etc/chrony.conf << EOF +pool ntp.aliyun.com iburst +driftfile /var/lib/chrony/drift +makestep 1.0 3 +rtcsync +allow 192.168.0.0/24 +local stratum 10 +keyfile /etc/chrony.keys +leapsectz right/UTC +logdir /var/log/chrony +EOF + +systemctl restart chronyd ; systemctl enable chronyd + +# 客户端 +# apt install chrony -y +yum install chrony -y +cat > /etc/chrony.conf << EOF +pool 192.168.0.31 iburst +driftfile /var/lib/chrony/drift +makestep 1.0 3 +rtcsync +keyfile /etc/chrony.keys +leapsectz right/UTC +logdir /var/log/chrony +EOF + +systemctl restart chronyd ; systemctl enable chronyd + +#使用客户端进行验证 +chronyc sources -v + +# 参数解释 +# +# pool ntp.aliyun.com iburst +# 指定使用ntp.aliyun.com作为时间服务器池,iburst选项表示在初始同步时会发送多个请求以加快同步速度。 +# +# driftfile /var/lib/chrony/drift +# 指定用于保存时钟漂移信息的文件路径。 +# +# makestep 1.0 3 +# 设置当系统时间与服务器时间偏差大于1秒时,会以1秒的步长进行调整。如果偏差超过3秒,则立即进行时间调整。 +# +# rtcsync +# 启用硬件时钟同步功能,可以提高时钟的准确性。 +# +# allow 192.168.0.0/24 +# 允许192.168.0.0/24网段范围内的主机与chrony进行时间同步。 +# +# local stratum 10 +# 将本地时钟设为stratum 10,stratum值表示时钟的准确度,值越小表示准确度越高。 +# +# keyfile /etc/chrony.keys +# 指定使用的密钥文件路径,用于对时间同步进行身份验证。 +# +# leapsectz right/UTC +# 指定时区为UTC。 +# +# logdir /var/log/chrony +# 指定日志文件存放目录。 +``` + +### 1.12.配置ulimit + +```shell +ulimit -SHn 65535 +cat >> /etc/security/limits.conf <> /etc/modules-load.d/ipvs.conf < /etc/sysctl.d/k8s.conf +net.ipv4.ip_forward = 1 +net.bridge.bridge-nf-call-iptables = 1 +fs.may_detach_mounts = 1 +vm.overcommit_memory=1 +vm.panic_on_oom=0 +fs.inotify.max_user_watches=89100 +fs.file-max=52706963 +fs.nr_open=52706963 +net.netfilter.nf_conntrack_max=2310720 + +net.ipv4.tcp_keepalive_time = 600 +net.ipv4.tcp_keepalive_probes = 3 +net.ipv4.tcp_keepalive_intvl =15 +net.ipv4.tcp_max_tw_buckets = 36000 +net.ipv4.tcp_tw_reuse = 1 +net.ipv4.tcp_max_orphans = 327680 +net.ipv4.tcp_orphan_retries = 3 +net.ipv4.tcp_syncookies = 1 +net.ipv4.tcp_max_syn_backlog = 16384 +net.ipv4.ip_conntrack_max = 65536 +net.ipv4.tcp_max_syn_backlog = 16384 +net.ipv4.tcp_timestamps = 0 +net.core.somaxconn = 16384 + +net.ipv6.conf.all.disable_ipv6 = 0 +net.ipv6.conf.default.disable_ipv6 = 0 +net.ipv6.conf.lo.disable_ipv6 = 0 +net.ipv6.conf.all.forwarding = 1 +EOF + +sysctl --system + +# 这些是Linux系统的一些参数设置,用于配置和优化网络、文件系统和虚拟内存等方面的功能。以下是每个参数的详细解释: +# +# 1. net.ipv4.ip_forward = 1 +# - 这个参数启用了IPv4的IP转发功能,允许服务器作为网络路由器转发数据包。 +# +# 2. net.bridge.bridge-nf-call-iptables = 1 +# - 当使用网络桥接技术时,将数据包传递到iptables进行处理。 +# +# 3. fs.may_detach_mounts = 1 +# - 允许在挂载文件系统时,允许被其他进程使用。 +# +# 4. vm.overcommit_memory=1 +# - 该设置允许原始的内存过量分配策略,当系统的内存已经被完全使用时,系统仍然会分配额外的内存。 +# +# 5. vm.panic_on_oom=0 +# - 当系统内存不足(OOM)时,禁用系统崩溃和重启。 +# +# 6. fs.inotify.max_user_watches=89100 +# - 设置系统允许一个用户的inotify实例可以监控的文件数目的上限。 +# +# 7. fs.file-max=52706963 +# - 设置系统同时打开的文件数的上限。 +# +# 8. fs.nr_open=52706963 +# - 设置系统同时打开的文件描述符数的上限。 +# +# 9. net.netfilter.nf_conntrack_max=2310720 +# - 设置系统可以创建的网络连接跟踪表项的最大数量。 +# +# 10. net.ipv4.tcp_keepalive_time = 600 +# - 设置TCP套接字的空闲超时时间(秒),超过该时间没有活动数据时,内核会发送心跳包。 +# +# 11. net.ipv4.tcp_keepalive_probes = 3 +# - 设置未收到响应的TCP心跳探测次数。 +# +# 12. net.ipv4.tcp_keepalive_intvl = 15 +# - 设置TCP心跳探测的时间间隔(秒)。 +# +# 13. net.ipv4.tcp_max_tw_buckets = 36000 +# - 设置系统可以使用的TIME_WAIT套接字的最大数量。 +# +# 14. net.ipv4.tcp_tw_reuse = 1 +# - 启用TIME_WAIT套接字的重新利用,允许新的套接字使用旧的TIME_WAIT套接字。 +# +# 15. net.ipv4.tcp_max_orphans = 327680 +# - 设置系统可以同时存在的TCP套接字垃圾回收包裹数的最大数量。 +# +# 16. net.ipv4.tcp_orphan_retries = 3 +# - 设置系统对于孤立的TCP套接字的重试次数。 +# +# 17. net.ipv4.tcp_syncookies = 1 +# - 启用TCP SYN cookies保护,用于防止SYN洪泛攻击。 +# +# 18. net.ipv4.tcp_max_syn_backlog = 16384 +# - 设置新的TCP连接的半连接数(半连接队列)的最大长度。 +# +# 19. net.ipv4.ip_conntrack_max = 65536 +# - 设置系统可以创建的网络连接跟踪表项的最大数量。 +# +# 20. net.ipv4.tcp_timestamps = 0 +# - 关闭TCP时间戳功能,用于提供更好的安全性。 +# +# 21. net.core.somaxconn = 16384 +# - 设置系统核心层的连接队列的最大值。 +# +# 22. net.ipv6.conf.all.disable_ipv6 = 0 +# - 启用IPv6协议。 +# +# 23. net.ipv6.conf.default.disable_ipv6 = 0 +# - 启用IPv6协议。 +# +# 24. net.ipv6.conf.lo.disable_ipv6 = 0 +# - 启用IPv6协议。 +# +# 25. net.ipv6.conf.all.forwarding = 1 +# - 允许IPv6数据包转发。 +``` + +### 1.18.所有节点配置hosts本地解析 + +```shell +cat > /etc/hosts < /etc/systemd/system/containerd.service < /etc/containerd/certs.d/docker.io/hosts.toml << EOF +server = "https://docker.io" +[host."https://hub-mirror.c.163.com"] + capabilities = ["pull", "resolve"] +EOF + +# 注意! +# SystemdCgroup参数是containerd中的一个配置参数,用于设置containerd在运行过程中使用的Cgroup(控制组)路径。Containerd使用SystemdCgroup参数来指定应该使用哪个Cgroup来跟踪和管理容器的资源使用。 +# +# Cgroup是Linux内核提供的一种资源隔离和管理机制,可以用于限制、分配和监控进程组的资源使用。使用Cgroup,可以将容器的资源限制和隔离,以防止容器之间的资源争用和不公平的竞争。 +# +# 通过设置SystemdCgroup参数,可以确保containerd能够找到正确的Cgroup路径,并正确地限制和隔离容器的资源使用,确保容器可以按照预期的方式运行。如果未正确设置SystemdCgroup参数,可能会导致容器无法正确地使用资源,或者无法保证资源的公平分配和隔离。 +# +# 总而言之,SystemdCgroup参数的作用是为了确保containerd能够正确地管理容器的资源使用,以实现资源的限制、隔离和公平分配。 +``` + +### 2.1.5启动并设置为开机启动 + +```shell +systemctl daemon-reload +# 用于重新加载systemd管理的单位文件。当你新增或修改了某个单位文件(如.service文件、.socket文件等),需要运行该命令来刷新systemd对该文件的配置。 + +systemctl enable --now containerd.service +# 启用并立即启动docker.service单元。docker.service是Docker守护进程的systemd服务单元。 + +systemctl stop containerd.service +# 停止运行中的docker.service单元,即停止Docker守护进程。 + +systemctl start containerd.service +# 启动docker.service单元,即启动Docker守护进程。 + +systemctl restart containerd.service +# 重启docker.service单元,即重新启动Docker守护进程。 + +systemctl status containerd.service +# 显示docker.service单元的当前状态,包括运行状态、是否启用等信息。 +``` + +### 2.1.6配置crictl客户端连接的运行时位置 + +```shell +# https://github.com/kubernetes-sigs/cri-tools/releases/ +# wget https://ghproxy.com/https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.28.0/crictl-v1.28.0-linux-amd64.tar.gz + +#解压 +tar xf crictl-v*-linux-amd64.tar.gz -C /usr/bin/ +#生成配置文件 +cat > /etc/crictl.yaml </etc/systemd/system/containerd.service < /etc/systemd/system/docker.service < /etc/systemd/system/docker.socket </etc/docker/daemon.json < /usr/lib/systemd/system/cri-docker.service < /usr/lib/systemd/system/cri-docker.socket < ca-config.json << EOF +{ + "signing": { + "default": { + "expiry": "876000h" + }, + "profiles": { + "kubernetes": { + "usages": [ + "signing", + "key encipherment", + "server auth", + "client auth" + ], + "expiry": "876000h" + } + } + } +} +EOF +# 这段配置文件是用于配置加密和认证签名的一些参数。 +# +# 在这里,有两个部分:`signing`和`profiles`。 +# +# `signing`包含了默认签名配置和配置文件。 +# 默认签名配置`default`指定了证书的过期时间为`876000h`。`876000h`表示证书有效期为100年。 +# +# `profiles`部分定义了不同的证书配置文件。 +# 在这里,只有一个配置文件`kubernetes`。它包含了以下`usages`和过期时间`expiry`: +# +# 1. `signing`:用于对其他证书进行签名 +# 2. `key encipherment`:用于加密和解密传输数据 +# 3. `server auth`:用于服务器身份验证 +# 4. `client auth`:用于客户端身份验证 +# +# 对于`kubernetes`配置文件,证书的过期时间也是`876000h`,即100年。 + +cat > etcd-ca-csr.json << EOF +{ + "CN": "etcd", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "CN", + "ST": "Beijing", + "L": "Beijing", + "O": "etcd", + "OU": "Etcd Security" + } + ], + "ca": { + "expiry": "876000h" + } +} +EOF +# 这是一个用于生成证书签名请求(Certificate Signing Request,CSR)的JSON配置文件。JSON配置文件指定了生成证书签名请求所需的数据。 +# +# - "CN": "etcd" 指定了希望生成的证书的CN字段(Common Name),即证书的主题,通常是该证书标识的实体的名称。 +# - "key": {} 指定了生成证书所使用的密钥的配置信息。"algo": "rsa" 指定了密钥的算法为RSA,"size": 2048 指定了密钥的长度为2048位。 +# - "names": [] 包含了生成证书时所需的实体信息。在这个例子中,只包含了一个实体,其相关信息如下: +# - "C": "CN" 指定了实体的国家/地区代码,这里是中国。 +# - "ST": "Beijing" 指定了实体所在的省/州。 +# - "L": "Beijing" 指定了实体所在的城市。 +# - "O": "etcd" 指定了实体的组织名称。 +# - "OU": "Etcd Security" 指定了实体所属的组织单位。 +# - "ca": {} 指定了生成证书时所需的CA(Certificate Authority)配置信息。 +# - "expiry": "876000h" 指定了证书的有效期,这里是876000小时。 +# +# 生成证书签名请求时,可以使用这个JSON配置文件作为输入,根据配置文件中的信息生成相应的CSR文件。然后,可以将CSR文件发送给CA进行签名,以获得有效的证书。 + +# 生成etcd证书和etcd证书的key(如果你觉得以后可能会扩容,可以在ip那多写几个预留出来) +# 若没有IPv6 可删除可保留 + +cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca +# 具体的解释如下: +# +# cfssl是一个用于生成TLS/SSL证书的工具,它支持PKI、JSON格式配置文件以及与许多其他集成工具的配合使用。 +# +# gencert参数表示生成证书的操作。-initca参数表示初始化一个CA(证书颁发机构)。CA是用于签发其他证书的根证书。etcd-ca-csr.json是一个JSON格式的配置文件,其中包含了CA的详细信息,如私钥、公钥、有效期等。这个文件提供了生成CA证书所需的信息。 +# +# | 符号表示将上一个命令的输出作为下一个命令的输入。 +# +# cfssljson是cfssl工具的一个子命令,用于格式化cfssl生成的JSON数据。 -bare参数表示直接输出裸证书,即只生成证书文件,不包含其他格式的文件。/etc/etcd/ssl/etcd-ca是指定生成的证书文件的路径和名称。 +# +# 所以,这条命令的含义是使用cfssl工具根据配置文件ca-csr.json生成一个CA证书,并将证书文件保存在/etc/etcd/ssl/etcd-ca路径下。 + +cat > etcd-csr.json << EOF +{ + "CN": "etcd", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "CN", + "ST": "Beijing", + "L": "Beijing", + "O": "etcd", + "OU": "Etcd Security" + } + ] +} +EOF +# 这段代码是一个JSON格式的配置文件,用于生成一个证书签名请求(Certificate Signing Request,CSR)。 +# +# 首先,"CN"字段指定了该证书的通用名称(Common Name),这里设为"etcd"。 +# +# 接下来,"key"字段指定了密钥的算法("algo"字段)和长度("size"字段),此处使用的是RSA算法,密钥长度为2048位。 +# +# 最后,"names"字段是一个数组,其中包含了一个名字对象,用于指定证书中的一些其他信息。这个名字对象包含了以下字段: +# - "C"字段指定了国家代码(Country),这里设置为"CN"。 +# - "ST"字段指定了省份(State)或地区,这里设置为"Beijing"。 +# - "L"字段指定了城市(Locality),这里设置为"Beijing"。 +# - "O"字段指定了组织(Organization),这里设置为"etcd"。 +# - "OU"字段指定了组织单元(Organizational Unit),这里设置为"Etcd Security"。 +# +# 这些字段将作为证书的一部分,用于标识和验证证书的使用范围和颁发者等信息。 + +cfssl gencert \ + -ca=/etc/etcd/ssl/etcd-ca.pem \ + -ca-key=/etc/etcd/ssl/etcd-ca-key.pem \ + -config=ca-config.json \ + -hostname=127.0.0.1,k8s-master01,k8s-master02,k8s-master03,192.168.0.31,192.168.0.32,192.168.0.33,fc00:43f4:1eea:1::10,fc00:43f4:1eea:1::20,fc00:43f4:1eea:1::30,::1 \ + -profile=kubernetes \ + etcd-csr.json | cfssljson -bare /etc/etcd/ssl/etcd +# 这是一条使用cfssl生成etcd证书的命令,下面是各个参数的解释: +# +# -ca=/etc/etcd/ssl/etcd-ca.pem:指定用于签名etcd证书的CA文件的路径。 +# -ca-key=/etc/etcd/ssl/etcd-ca-key.pem:指定用于签名etcd证书的CA私钥文件的路径。 +# -config=ca-config.json:指定CA配置文件的路径,该文件定义了证书的有效期、加密算法等设置。 +# -hostname=xxxx:指定要为etcd生成证书的主机名和IP地址列表。 +# -profile=kubernetes:指定使用的证书配置文件,该文件定义了证书的用途和扩展属性。 +# etcd-csr.json:指定etcd证书请求的JSON文件的路径,该文件包含了证书请求的详细信息。 +# | cfssljson -bare /etc/etcd/ssl/etcd:通过管道将cfssl命令的输出传递给cfssljson命令,并使用-bare参数指定输出文件的前缀路径,这里将生成etcd证书的.pem和-key.pem文件。 +# +# 这条命令的作用是使用指定的CA证书和私钥,根据证书请求的JSON文件和配置文件生成etcd的证书文件。 +``` + +### 3.1.3将证书复制到其他节点 + +```shell +Master='k8s-master02 k8s-master03' +for NODE in $Master; do ssh $NODE "mkdir -p /etc/etcd/ssl"; for FILE in etcd-ca-key.pem etcd-ca.pem etcd-key.pem etcd.pem; do scp /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE}; done; done + +# 这个命令是一个简单的for循环,在一个由`$Master`存储的主机列表中迭代执行。对于每个主机,它使用`ssh`命令登录到主机,并在远程主机上创建一个名为`/etc/etcd/ssl`的目录(如果不存在)。接下来,它使用`scp`将本地主机上`/etc/etcd/ssl`目录中的四个文件(`etcd-ca-key.pem`,`etcd-ca.pem`,`etcd-key.pem`和`etcd.pem`)复制到远程主机的`/etc/etcd/ssl`目录中。最终的结果是,远程主机上的`/etc/etcd/ssl`目录中包含与本地主机上相同的四个文件的副本。 +``` + +## 3.2.生成k8s相关证书 + +特别说明除外,以下操作在所有master节点操作 + +### 3.2.1 所有k8s节点创建证书存放目录 + +```shell +mkdir -p /etc/kubernetes/pki +``` + +### 3.2.2 master01节点生成k8s证书 + +```shell +# 写入生成证书所需的配置文件 +cat > ca-csr.json << EOF +{ + "CN": "kubernetes", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "CN", + "ST": "Beijing", + "L": "Beijing", + "O": "Kubernetes", + "OU": "Kubernetes-manual" + } + ], + "ca": { + "expiry": "876000h" + } +} +EOF +# 这是一个用于生成 Kubernetes 相关证书的配置文件。该配置文件中包含以下信息: +# +# - CN:CommonName,即用于标识证书的通用名称。在此配置中,CN 设置为 "kubernetes",表示该证书是用于 Kubernetes。 +# - key:用于生成证书的算法和大小。在此配置中,使用的算法是 RSA,大小是 2048 位。 +# - names:用于证书中的名称字段的详细信息。在此配置中,有以下字段信息: +# - C:Country,即国家。在此配置中,设置为 "CN"。 +# - ST:State,即省/州。在此配置中,设置为 "Beijing"。 +# - L:Locality,即城市。在此配置中,设置为 "Beijing"。 +# - O:Organization,即组织。在此配置中,设置为 "Kubernetes"。 +# - OU:Organization Unit,即组织单位。在此配置中,设置为 "Kubernetes-manual"。 +# - ca:用于证书签名的证书颁发机构(CA)的配置信息。在此配置中,设置了证书的有效期为 876000 小时。 +# +# 这个配置文件可以用于生成 Kubernetes 相关的证书,以确保集群中的通信安全性。 + +cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca + +# 具体的解释如下: +# +# cfssl是一个用于生成TLS/SSL证书的工具,它支持PKI、JSON格式配置文件以及与许多其他集成工具的配合使用。 +# +# gencert参数表示生成证书的操作。-initca参数表示初始化一个CA(证书颁发机构)。CA是用于签发其他证书的根证书。ca-csr.json是一个JSON格式的配置文件,其中包含了CA的详细信息,如私钥、公钥、有效期等。这个文件提供了生成CA证书所需的信息。 +# +# | 符号表示将上一个命令的输出作为下一个命令的输入。 +# +# cfssljson是cfssl工具的一个子命令,用于格式化cfssl生成的JSON数据。 -bare参数表示直接输出裸证书,即只生成证书文件,不包含其他格式的文件。/etc/kubernetes/pki/ca是指定生成的证书文件的路径和名称。 +# +# 所以,这条命令的含义是使用cfssl工具根据配置文件ca-csr.json生成一个CA证书,并将证书文件保存在/etc/kubernetes/pki/ca路径下。 + +cat > apiserver-csr.json << EOF +{ + "CN": "kube-apiserver", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "CN", + "ST": "Beijing", + "L": "Beijing", + "O": "Kubernetes", + "OU": "Kubernetes-manual" + } + ] +} +EOF + +# 这是一个用于生成 Kubernetes 相关证书的配置文件。该配置文件中包含以下信息: +# +# - `CN` 字段指定了证书的通用名称 (Common Name),这里设置为 "kube-apiserver",表示该证书用于 Kubernetes API Server。 +# - `key` 字段指定了生成证书时所选用的加密算法和密钥长度。这里选用了 RSA 算法,密钥长度为 2048 位。 +# - `names` 字段包含了一组有关证书持有者信息的项。这里使用了以下信息: +# - `C` 表示国家代码 (Country),这里设置为 "CN" 表示中国。 +# - `ST` 表示州或省份 (State),这里设置为 "Beijing" 表示北京市。 +# - `L` 表示城市或地区 (Location),这里设置为 "Beijing" 表示北京市。 +# - `O` 表示组织名称 (Organization),这里设置为 "Kubernetes" 表示 Kubernetes。 +# - `OU` 表示组织单位 (Organizational Unit),这里设置为 "Kubernetes-manual" 表示手动管理的 Kubernetes 集群。 +# +# 这个配置文件可以用于生成 Kubernetes 相关的证书,以确保集群中的通信安全性。 + + +# 生成一个根证书 ,多写了一些IP作为预留IP,为将来添加node做准备 +# 10.96.0.1是service网段的第一个地址,需要计算,192.168.0.36为高可用vip地址 +# 若没有IPv6 可删除可保留 + +cfssl gencert \ +-ca=/etc/kubernetes/pki/ca.pem \ +-ca-key=/etc/kubernetes/pki/ca-key.pem \ +-config=ca-config.json \ +-hostname=10.96.0.1,192.168.0.36,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,x.oiox.cn,k.oiox.cn,l.oiox.cn,o.oiox.cn,192.168.0.31,192.168.0.32,192.168.0.33,192.168.0.34,192.168.0.35,192.168.0.36,192.168.0.37,192.168.0.38,192.168.0.39,192.168.1.70,fc00:43f4:1eea:1::10,fc00:43f4:1eea:1::20,fc00:43f4:1eea:1::30,fc00:43f4:1eea:1::40,fc00:43f4:1eea:1::50,fc00:43f4:1eea:1::60,fc00:43f4:1eea:1::70,fc00:43f4:1eea:1::80,fc00:43f4:1eea:1::90,fc00:43f4:1eea:1::100,::1 \ +-profile=kubernetes apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver + +# 这个命令是使用cfssl工具生成Kubernetes API Server的证书。 +# +# 命令的参数解释如下: +# - `-ca=/etc/kubernetes/pki/ca.pem`:指定证书的颁发机构(CA)文件路径。 +# - `-ca-key=/etc/kubernetes/pki/ca-key.pem`:指定证书的颁发机构(CA)私钥文件路径。 +# - `-config=ca-config.json`:指定证书生成的配置文件路径,配置文件中包含了证书的有效期、加密算法等信息。 +# - `-hostname=10.96.0.1,192.168.0.36,127.0.0.1,fc00:43f4:1eea:1::10`:指定证书的主机名或IP地址列表。 +# - `-profile=kubernetes`:指定证书生成的配置文件中的配置文件名。 +# - `apiserver-csr.json`:API Server的证书签名请求配置文件路径。 +# - `| cfssljson -bare /etc/kubernetes/pki/apiserver`:通过管道将生成的证书输出到cfssljson工具,将其转换为PEM编码格式,并保存到 `/etc/kubernetes/pki/apiserver.pem` 和 `/etc/kubernetes/pki/apiserver-key.pem` 文件中。 +# +# 最终,这个命令将会生成API Server的证书和私钥,并保存到指定的文件中。 + +``` + +### 3.2.3 生成apiserver聚合证书 + +```shell +cat > front-proxy-ca-csr.json << EOF +{ + "CN": "kubernetes", + "key": { + "algo": "rsa", + "size": 2048 + }, + "ca": { + "expiry": "876000h" + } +} +EOF + +# 这个JSON文件表示了生成一个名为"kubernetes"的证书的配置信息。这个证书是用来进行Kubernetes集群的身份验证和安全通信。 +# +# 配置信息包括以下几个部分: +# +# 1. "CN": "kubernetes":这表示了证书的通用名称(Common Name),也就是证书所代表的实体的名称。在这里,证书的通用名称被设置为"kubernetes",表示这个证书是用来代表Kubernetes集群。 +# +# 2. "key":这是用来生成证书的密钥相关的配置。在这里,配置使用了RSA算法,并且设置了密钥的大小为2048位。 +# +# 3. "ca":这个字段指定了证书的颁发机构(Certificate Authority)相关的配置。在这里,配置指定了证书的有效期为876000小时,即100年。这意味着该证书在100年内将被视为有效,过期后需要重新生成。 +# +# 总之,这个JSON文件中的配置信息描述了如何生成一个用于Kubernetes集群的证书,包括证书的通用名称、密钥算法和大小以及证书的有效期。 + +cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca +# 具体的解释如下: +# +# cfssl是一个用于生成TLS/SSL证书的工具,它支持PKI、JSON格式配置文件以及与许多其他集成工具的配合使用。 +# +# gencert参数表示生成证书的操作。-initca参数表示初始化一个CA(证书颁发机构)。CA是用于签发其他证书的根证书。front-proxy-ca-csr.json是一个JSON格式的配置文件,其中包含了CA的详细信息,如私钥、公钥、有效期等。这个文件提供了生成CA证书所需的信息。 +# +# | 符号表示将上一个命令的输出作为下一个命令的输入。 +# +# cfssljson是cfssl工具的一个子命令,用于格式化cfssl生成的JSON数据。 -bare参数表示直接输出裸证书,即只生成证书文件,不包含其他格式的文件。/etc/kubernetes/pki/front-proxy-ca是指定生成的证书文件的路径和名称。 +# +# 所以,这条命令的含义是使用cfssl工具根据配置文件ca-csr.json生成一个CA证书,并将证书文件保存在/etc/kubernetes/pki/front-proxy-ca路径下。 + +cat > front-proxy-client-csr.json << EOF +{ + "CN": "front-proxy-client", + "key": { + "algo": "rsa", + "size": 2048 + } +} +EOF + +# 这是一个JSON格式的配置文件,用于描述一个名为"front-proxy-client"的配置。配置包括两个字段:CN和key。 +# +# - CN(Common Name)字段表示证书的通用名称,这里为"front-proxy-client"。 +# - key字段描述了密钥的算法和大小。"algo"表示使用RSA算法,"size"表示密钥大小为2048位。 +# +# 该配置文件用于生成一个SSL证书,用于在前端代理客户端进行认证和数据传输的加密。这个证书中的通用名称是"front-proxy-client",使用RSA算法生成,密钥大小为2048位。 + +cfssl gencert \ +-ca=/etc/kubernetes/pki/front-proxy-ca.pem \ +-ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem \ +-config=ca-config.json \ +-profile=kubernetes front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client + +# 这个命令使用cfssl工具生成一个用于Kubernetes的front-proxy-client证书。 +# +# 主要参数解释如下: +# - `-ca=/etc/kubernetes/pki/front-proxy-ca.pem`: 指定用于签署证书的根证书文件路径。 +# - `-ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem`: 指定用于签署证书的根证书的私钥文件路径。 +# - `-config=ca-config.json`: 指定用于配置证书签署的配置文件路径。该配置文件描述了证书生成的一些规则,如加密算法和有效期等。 +# - `-profile=kubernetes`: 指定生成证书时使用的配置文件中定义的profile,其中包含了一些默认的参数。 +# - `front-proxy-client-csr.json`: 指定用于生成证书的CSR文件路径,该文件包含了证书请求的相关信息。 +# - `| cfssljson -bare /etc/kubernetes/pki/front-proxy-client`: 通过管道将生成的证书输出到cfssljson工具进行解析,并通过`-bare`参数将证书和私钥分别保存到指定路径。 +# +# 这个命令的作用是根据提供的CSR文件和配置信息,使用指定的根证书和私钥生成一个前端代理客户端的证书,并将证书和私钥分别保存到`/etc/kubernetes/pki/front-proxy-client.pem`和`/etc/kubernetes/pki/front-proxy-client-key.pem`文件中。 +``` + +### 3.2.4 生成controller-manage的证书 + +在《5.高可用配置》选择使用那种高可用方案 +若使用 haproxy、keepalived 那么为 `--server=https://192.168.0.36:9443` +若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443` + +```shell +cat > manager-csr.json << EOF +{ + "CN": "system:kube-controller-manager", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "CN", + "ST": "Beijing", + "L": "Beijing", + "O": "system:kube-controller-manager", + "OU": "Kubernetes-manual" + } + ] +} +EOF +# 这是一个用于生成密钥对(公钥和私钥)的JSON配置文件。下面是针对该文件中每个字段的详细解释: +# +# - "CN": 值为"system:kube-controller-manager",代表通用名称(Common Name),是此密钥对的主题(subject)。 +# - "key": 这个字段用来定义密钥算法和大小。 +# - "algo": 值为"rsa",表示使用RSA算法。 +# - "size": 值为2048,表示生成的密钥大小为2048位。 +# - "names": 这个字段用来定义密钥对的各个名称字段。 +# - "C": 值为"CN",表示国家(Country)名称是"CN"(中国)。 +# - "ST": 值为"Beijing",表示省/州(State/Province)名称是"Beijing"(北京)。 +# - "L": 值为"Beijing",表示城市(Locality)名称是"Beijing"(北京)。 +# - "O": 值为"system:kube-controller-manager",表示组织(Organization)名称是"system:kube-controller-manager"。 +# - "OU": 值为"Kubernetes-manual",表示组织单位(Organizational Unit)名称是"Kubernetes-manual"。 +# +# 这个JSON配置文件基本上是告诉生成密钥对的工具,生成一个带有特定名称和属性的密钥对。 + + +cfssl gencert \ + -ca=/etc/kubernetes/pki/ca.pem \ + -ca-key=/etc/kubernetes/pki/ca-key.pem \ + -config=ca-config.json \ + -profile=kubernetes \ + manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager +# 这是一个命令行操作,使用cfssl工具生成证书。 +# +# 1. `cfssl gencert` 是cfssl工具的命令,用于生成证书。 +# 2. `-ca` 指定根证书的路径和文件名,这里是`/etc/kubernetes/pki/ca.pem`。 +# 3. `-ca-key` 指定根证书的私钥的路径和文件名,这里是`/etc/kubernetes/pki/ca-key.pem`。 +# 4. `-config` 指定配置文件的路径和文件名,这里是`ca-config.json`。 +# 5. `-profile` 指定证书使用的配置文件中的配置模板,这里是`kubernetes`。 +# 6. `manager-csr.json` 是证书签发请求的配置文件,用于生成证书签发请求。 +# 7. `|` 管道操作符,将前一条命令的输出作为后一条命令的输入。 +# 8. `cfssljson -bare` 是 cfssl 工具的命令,作用是将证书签发请求的输出转换为PKCS#1、PKCS#8和x509 PEM文件。 +# 9. `/etc/kubernetes/pki/controller-manager` 是转换后的 PEM 文件的存储位置和文件名。 +# +# 这个命令的作用是根据根证书和私钥、配置文件以及证书签发请求的配置文件,生成经过签发的控制器管理器证书和私钥,并将转换后的 PEM 文件保存到指定的位置。 + + +# 设置一个集群项 +# 在《5.高可用配置》选择使用那种高可用方案 +# 若使用 haproxy、keepalived 那么为 `--server=https://192.168.0.36:8443` +# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443` +kubectl config set-cluster kubernetes \ + --certificate-authority=/etc/kubernetes/pki/ca.pem \ + --embed-certs=true \ + --server=https://127.0.0.1:8443 \ + --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig +# kubectl config set-cluster命令用于配置集群信息。 +# --certificate-authority选项指定了集群的证书颁发机构(CA)的路径,这个CA会验证kube-apiserver提供的证书是否合法。 +# --embed-certs选项用于将证书嵌入到生成的kubeconfig文件中,这样就不需要在kubeconfig文件中单独指定证书文件路径。 +# --server选项指定了kube-apiserver的地址,这里使用的是127.0.0.1:8443,表示使用本地主机上的kube-apiserver,默认端口为8443。 +# --kubeconfig选项指定了生成的kubeconfig文件的路径和名称,这里指定为/etc/kubernetes/controller-manager.kubeconfig。 +# 综上所述,kubectl config set-cluster命令的作用是在kubeconfig文件中设置集群信息,包括证书颁发机构、证书、kube-apiserver地址等。 + + +# 设置一个环境项,一个上下文 +kubectl config set-context system:kube-controller-manager@kubernetes \ + --cluster=kubernetes \ + --user=system:kube-controller-manager \ + --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig +# 这个命令用于配置 Kubernetes 控制器管理器的上下文信息。下面是各个参数的详细解释: +# 1. `kubectl config set-context system:kube-controller-manager@kubernetes`: 设置上下文的名称为 `system:kube-controller-manager@kubernetes`,这是一个标识符,用于唯一标识该上下文。 +# 2. `--cluster=kubernetes`: 指定集群的名称为 `kubernetes`,这是一个现有集群的标识符,表示要管理的 Kubernetes 集群。 +# 3. `--user=system:kube-controller-manager`: 指定使用的用户身份为 `system:kube-controller-manager`。这是一个特殊的用户身份,具有控制 Kubernetes 控制器管理器的权限。 +# 4. `--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig`: 指定 kubeconfig 文件的路径为 `/etc/kubernetes/controller-manager.kubeconfig`。kubeconfig 文件是一个用于管理 Kubernetes 配置的文件,包含了集群、用户和上下文的相关信息。 +# 通过运行这个命令,可以将这些配置信息保存到 `/etc/kubernetes/controller-manager.kubeconfig` 文件中,以便在后续的操作中使用。 + + + +# 设置一个用户项 +kubectl config set-credentials system:kube-controller-manager \ + --client-certificate=/etc/kubernetes/pki/controller-manager.pem \ + --client-key=/etc/kubernetes/pki/controller-manager-key.pem \ + --embed-certs=true \ + --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig +# 上述命令是用于设置 Kubernetes 的 controller-manager 组件的客户端凭据。下面是每个参数的详细解释: +# +# - `kubectl config`: 是使用 kubectl 命令行工具的配置子命令。 +# - `set-credentials`: 是定义一个新的用户凭据配置的子命令。 +# - `system:kube-controller-manager`: 是设置用户凭据的名称,`system:` 是 Kubernetes API Server 内置的身份验证器使用的用户标识符前缀,它表示是一个系统用户,在本例中是 kube-controller-manager 组件使用的身份。 +# - `--client-certificate=/etc/kubernetes/pki/controller-manager.pem`: 指定 controller-manager.pem 客户端证书的路径。 +# - `--client-key=/etc/kubernetes/pki/controller-manager-key.pem`: 指定 controller-manager-key.pem 客户端私钥的路径。 +# - `--embed-certs=true`: 表示将证书和私钥直接嵌入到生成的 kubeconfig 文件中,而不是通过引用外部文件。 +# - `--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig`: 指定生成的 kubeconfig 文件的路径和文件名,即 controller-manager.kubeconfig。 +# +# 通过运行上述命令,将根据提供的证书和私钥信息,为 kube-controller-manager 创建一个 kubeconfig 文件,以便后续使用该文件进行身份验证和访问 Kubernetes API。 + + +# 设置默认环境 +kubectl config use-context system:kube-controller-manager@kubernetes \ + --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig +# 这个命令是用来指定kubectl使用指定的上下文环境来执行操作。上下文环境是kubectl用来确定要连接到哪个Kubernetes集群以及使用哪个身份验证信息的配置。 +# +# 在这个命令中,`kubectl config use-context`是用来设置当前上下文环境的命令。 `system:kube-controller-manager@kubernetes`是指定的上下文名称,它告诉kubectl要使用的Kubernetes集群和身份验证信息。 +# `--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig`是用来指定使用的kubeconfig文件的路径。kubeconfig文件是存储集群连接和身份验证信息的配置文件。 +# 通过执行这个命令,kubectl将使用指定的上下文来执行后续的操作,包括部署和管理Kubernetes资源。 + + +``` + +### 3.2.5 生成kube-scheduler的证书 + +```shell +cat > scheduler-csr.json << EOF +{ + "CN": "system:kube-scheduler", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "CN", + "ST": "Beijing", + "L": "Beijing", + "O": "system:kube-scheduler", + "OU": "Kubernetes-manual" + } + ] +} +EOF +# 这个命令是用来创建一个叫做scheduler-csr.json的文件,并将其中的内容赋值给该文件。 +# +# 文件内容是一个JSON格式的文本,包含了一个描述证书请求的结构。 +# +# 具体内容如下: +# +# - "CN": "system:kube-scheduler":Common Name字段,表示该证书的名称为system:kube-scheduler。 +# - "key": {"algo": "rsa", "size": 2048}:key字段指定生成证书时使用的加密算法是RSA,并且密钥的长度为2048位。 +# - "names": [...]:names字段定义了证书中的另外一些标识信息。 +# - "C": "CN":Country字段,表示国家/地区为中国。 +# - "ST": "Beijing":State字段,表示省/市为北京。 +# - "L": "Beijing":Locality字段,表示所在城市为北京。 +# - "O": "system:kube-scheduler":Organization字段,表示组织为system:kube-scheduler。 +# - "OU": "Kubernetes-manual":Organizational Unit字段,表示组织单元为Kubernetes-manual。 +# +# 而EOF是一个占位符,用于标记开始和结束的位置。在开始的EOF之后到结束的EOF之间的内容将会被写入到scheduler-csr.json文件中。 +# +# 总体来说,这个命令用于生成一个描述kube-scheduler证书请求的JSON文件。 + +cfssl gencert \ + -ca=/etc/kubernetes/pki/ca.pem \ + -ca-key=/etc/kubernetes/pki/ca-key.pem \ + -config=ca-config.json \ + -profile=kubernetes \ + scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler +# 上述命令是使用cfssl工具生成Kubernetes Scheduler的证书。 +# +# 具体解释如下: +# +# 1. `cfssl gencert`:使用cfssl工具生成证书。 +# 2. `-ca=/etc/kubernetes/pki/ca.pem`:指定根证书文件的路径。在这里,是指定根证书的路径为`/etc/kubernetes/pki/ca.pem`。 +# 3. `-ca-key=/etc/kubernetes/pki/ca-key.pem`:指定根证书私钥文件的路径。在这里,是指定根证书私钥的路径为`/etc/kubernetes/pki/ca-key.pem`。 +# 4. `-config=ca-config.json`:指定证书配置文件的路径。在这里,是指定证书配置文件的路径为`ca-config.json`。 +# 5. `-profile=kubernetes`:指定证书的配置文件中的一个配置文件模板。在这里,是指定配置文件中的`kubernetes`配置模板。 +# 6. `scheduler-csr.json`:指定Scheduler的证书签名请求文件(CSR)的路径。在这里,是指定请求文件的路径为`scheduler-csr.json`。 +# 7. `|`(管道符号):将前一个命令的输出作为下一个命令的输入。 +# 8. `cfssljson`:将cfssl工具生成的证书签名请求(CSR)进行解析。 +# 9. `-bare /etc/kubernetes/pki/scheduler`:指定输出路径和前缀。在这里,是将解析的证书签名请求生成以下文件:`/etc/kubernetes/pki/scheduler.pem`(包含了证书)、`/etc/kubernetes/pki/scheduler-key.pem`(包含了私钥)。 +# +# 总结来说,这个命令的目的是根据根证书、根证书私钥、证书配置文件、CSR文件等生成Kubernetes Scheduler的证书和私钥文件。 + + + +# 在《5.高可用配置》选择使用那种高可用方案 +# 若使用 haproxy、keepalived 那么为 `--server=https://192.168.0.36:8443` +# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443` + +kubectl config set-cluster kubernetes \ + --certificate-authority=/etc/kubernetes/pki/ca.pem \ + --embed-certs=true \ + --server=https://127.0.0.1:8443 \ + --kubeconfig=/etc/kubernetes/scheduler.kubeconfig +# 该命令用于配置一个名为"kubernetes"的集群,并将其应用到/etc/kubernetes/scheduler.kubeconfig文件中。 +# +# 该命令的解释如下: +# - `kubectl config set-cluster kubernetes`: 设置一个集群并命名为"kubernetes"。 +# - `--certificate-authority=/etc/kubernetes/pki/ca.pem`: 指定集群使用的证书授权机构的路径。 +# - `--embed-certs=true`: 该标志指示将证书嵌入到生成的kubeconfig文件中。 +# - `--server=https://127.0.0.1:8443`: 指定集群的 API server 位置。 +# - `--kubeconfig=/etc/kubernetes/scheduler.kubeconfig`: 指定要保存 kubeconfig 文件的路径和名称。 + +kubectl config set-credentials system:kube-scheduler \ + --client-certificate=/etc/kubernetes/pki/scheduler.pem \ + --client-key=/etc/kubernetes/pki/scheduler-key.pem \ + --embed-certs=true \ + --kubeconfig=/etc/kubernetes/scheduler.kubeconfig +# 这段命令是用于设置 kube-scheduler 组件的身份验证凭据,并生成相应的 kubeconfig 文件。 +# +# 解释每个选项的含义如下: +# - `kubectl config set-credentials system:kube-scheduler`:设置 `system:kube-scheduler` 用户的身份验证凭据。 +# - `--client-certificate=/etc/kubernetes/pki/scheduler.pem`:指定一个客户端证书文件,用于基于证书的身份验证。在这种情况下,指定了 kube-scheduler 组件的证书文件路径。 +# - `--client-key=/etc/kubernetes/pki/scheduler-key.pem`:指定与客户端证书相对应的客户端私钥文件。 +# - `--embed-certs=true`:将客户端证书和私钥嵌入到生成的 kubeconfig 文件中。 +# - `--kubeconfig=/etc/kubernetes/scheduler.kubeconfig`:指定生成的 kubeconfig 文件的路径和名称。 +# +# 该命令的目的是为 kube-scheduler 组件生成一个 kubeconfig 文件,以便进行身份验证和访问集群资源。kubeconfig 文件是一个包含了连接到 Kubernetes 集群所需的所有配置信息的文件,包括服务器地址、证书和秘钥等。 + +kubectl config set-context system:kube-scheduler@kubernetes \ + --cluster=kubernetes \ + --user=system:kube-scheduler \ + --kubeconfig=/etc/kubernetes/scheduler.kubeconfig +# 该命令用于设置一个名为"system:kube-scheduler@kubernetes"的上下文,具体配置如下: +# +# 1. --cluster=kubernetes: 指定集群的名称为"kubernetes",这个集群是在当前的kubeconfig文件中已经定义好的。 +# 2. --user=system:kube-scheduler: 指定用户的名称为"system:kube-scheduler",这个用户也是在当前的kubeconfig文件中已经定义好的。这个用户用于认证和授权kube-scheduler组件访问Kubernetes集群的权限。 +# 3. --kubeconfig=/etc/kubernetes/scheduler.kubeconfig: 指定kubeconfig文件的路径为"/etc/kubernetes/scheduler.kubeconfig",这个文件将被用来保存上下文的配置信息。 +# +# 这个命令的作用是将上述的配置信息保存到指定的kubeconfig文件中,以便后续使用该文件进行认证和授权访问Kubernetes集群。 + +kubectl config use-context system:kube-scheduler@kubernetes \ + --kubeconfig=/etc/kubernetes/scheduler.kubeconfig +# 上述命令是使用`kubectl`命令来配置Kubernetes集群中的调度器组件。 +# +# `kubectl config use-context`命令用于切换`kubectl`当前使用的上下文。上下文是Kubernetes集群、用户和命名空间的组合,用于确定`kubectl`的连接目标。下面解释这个命令的不同部分: +# +# - `system:kube-scheduler@kubernetes`是一个上下文名称。它指定了使用`kube-scheduler`用户和`kubernetes`命名空间的系统级别上下文。系统级别上下文用于操作Kubernetes核心组件。 +# +# - `--kubeconfig=/etc/kubernetes/scheduler.kubeconfig`用于指定Kubernetes配置文件的路径。Kubernetes配置文件包含连接到Kubernetes集群所需的身份验证和连接信息。 +# +# 通过运行以上命令,`kubectl`将使用指定的上下文和配置文件,以便在以后的命令中能正确地与Kubernetes集群中的调度器组件进行交互。 +``` + +### 3.2.6 生成admin的证书配置 + +```shell +cat > admin-csr.json << EOF +{ + "CN": "admin", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "CN", + "ST": "Beijing", + "L": "Beijing", + "O": "system:masters", + "OU": "Kubernetes-manual" + } + ] +} +EOF +# 这段代码是一个JSON格式的配置文件,用于创建和配置一个名为"admin"的Kubernetes凭证。 +# +# 这个凭证包含以下字段: +# +# - "CN": "admin": 这是凭证的通用名称,表示这是一个管理员凭证。 +# - "key": 这是一个包含证书密钥相关信息的对象。 +# - "algo": "rsa":这是使用的加密算法类型,这里是RSA加密算法。 +# - "size": 2048:这是密钥的大小,这里是2048位。 +# - "names": 这是一个包含证书名称信息的数组。 +# - "C": "CN":这是证书的国家/地区字段,这里是中国。 +# - "ST": "Beijing":这是证书的省/州字段,这里是北京。 +# - "L": "Beijing":这是证书的城市字段,这里是北京。 +# - "O": "system:masters":这是证书的组织字段,这里是system:masters,表示系统的管理员组。 +# - "OU": "Kubernetes-manual":这是证书的部门字段,这里是Kubernetes-manual。 +# +# 通过这个配置文件创建的凭证将具有管理员权限,并且可以用于管理Kubernetes集群。 + +cfssl gencert \ + -ca=/etc/kubernetes/pki/ca.pem \ + -ca-key=/etc/kubernetes/pki/ca-key.pem \ + -config=ca-config.json \ + -profile=kubernetes \ + admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin +# 上述命令是使用cfssl工具生成Kubernetes admin的证书。 +# +# 具体解释如下: +# +# 1. `cfssl gencert`:使用cfssl工具生成证书。 +# 2. `-ca=/etc/kubernetes/pki/ca.pem`:指定根证书文件的路径。在这里,是指定根证书的路径为`/etc/kubernetes/pki/ca.pem`。 +# 3. `-ca-key=/etc/kubernetes/pki/ca-key.pem`:指定根证书私钥文件的路径。在这里,是指定根证书私钥的路径为`/etc/kubernetes/pki/ca-key.pem`。 +# 4. `-config=ca-config.json`:指定证书配置文件的路径。在这里,是指定证书配置文件的路径为`ca-config.json`。 +# 5. `-profile=kubernetes`:指定证书的配置文件中的一个配置文件模板。在这里,是指定配置文件中的`kubernetes`配置模板。 +# 6. `admin-csr.json`:指定admin的证书签名请求文件(CSR)的路径。在这里,是指定请求文件的路径为`admin-csr.json`。 +# 7. `|`(管道符号):将前一个命令的输出作为下一个命令的输入。 +# 8. `cfssljson`:将cfssl工具生成的证书签名请求(CSR)进行解析。 +# 9. `-bare /etc/kubernetes/pki/admin`:指定输出路径和前缀。在这里,是将解析的证书签名请求生成以下文件:`/etc/kubernetes/pki/admin.pem`(包含了证书)、`/etc/kubernetes/pki/admin-key.pem`(包含了私钥)。 +# +# 总结来说,这个命令的目的是根据根证书、根证书私钥、证书配置文件、CSR文件等生成Kubernetes Scheduler的证书和私钥文件。 + +# 在《5.高可用配置》选择使用那种高可用方案 +# 若使用 haproxy、keepalived 那么为 `--server=https://192.168.0.36:8443` +# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443` + +kubectl config set-cluster kubernetes \ + --certificate-authority=/etc/kubernetes/pki/ca.pem \ + --embed-certs=true \ + --server=https://127.0.0.1:8443 \ + --kubeconfig=/etc/kubernetes/admin.kubeconfig +# 该命令用于配置一个名为"kubernetes"的集群,并将其应用到/etc/kubernetes/scheduler.kubeconfig文件中。 +# +# 该命令的解释如下: +# - `kubectl config set-cluster kubernetes`: 设置一个集群并命名为"kubernetes"。 +# - `--certificate-authority=/etc/kubernetes/pki/ca.pem`: 指定集群使用的证书授权机构的路径。 +# - `--embed-certs=true`: 该标志指示将证书嵌入到生成的kubeconfig文件中。 +# - `--server=https://127.0.0.1:8443`: 指定集群的 API server 位置。 +# - `--kubeconfig=/etc/kubernetes/admin.kubeconfig`: 指定要保存 kubeconfig 文件的路径和名称。 + +kubectl config set-credentials kubernetes-admin \ + --client-certificate=/etc/kubernetes/pki/admin.pem \ + --client-key=/etc/kubernetes/pki/admin-key.pem \ + --embed-certs=true \ + --kubeconfig=/etc/kubernetes/admin.kubeconfig +# 这段命令是用于设置 kubernetes-admin 组件的身份验证凭据,并生成相应的 kubeconfig 文件。 +# +# 解释每个选项的含义如下: +# - `kubectl config set-credentials kubernetes-admin`:设置 `kubernetes-admin` 用户的身份验证凭据。 +# - `--client-certificate=/etc/kubernetes/pki/admin.pem`:指定一个客户端证书文件,用于基于证书的身份验证。在这种情况下,指定了 admin 组件的证书文件路径。 +# - `--client-key=/etc/kubernetes/pki/admin-key.pem`:指定与客户端证书相对应的客户端私钥文件。 +# - `--embed-certs=true`:将客户端证书和私钥嵌入到生成的 kubeconfig 文件中。 +# - `--kubeconfig=/etc/kubernetes/admin.kubeconfig`:指定生成的 kubeconfig 文件的路径和名称。 +# +# 该命令的目的是为 admin 组件生成一个 kubeconfig 文件,以便进行身份验证和访问集群资源。kubeconfig 文件是一个包含了连接到 Kubernetes 集群所需的所有配置信息的文件,包括服务器地址、证书和秘钥等。 + + +kubectl config set-context kubernetes-admin@kubernetes \ + --cluster=kubernetes \ + --user=kubernetes-admin \ + --kubeconfig=/etc/kubernetes/admin.kubeconfig +# 该命令用于设置一个名为"kubernetes-admin@kubernetes"的上下文,具体配置如下: +# +# 1. --cluster=kubernetes: 指定集群的名称为"kubernetes",这个集群是在当前的kubeconfig文件中已经定义好的。 +# 2. --user=kubernetes-admin: 指定用户的名称为"kubernetes-admin",这个用户也是在当前的kubeconfig文件中已经定义好的。这个用户用于认证和授权admin组件访问Kubernetes集群的权限。 +# 3. --kubeconfig=/etc/kubernetes/admin.kubeconfig: 指定kubeconfig文件的路径为"/etc/kubernetes/admin.kubeconfig",这个文件将被用来保存上下文的配置信息。 +# +# 这个命令的作用是将上述的配置信息保存到指定的kubeconfig文件中,以便后续使用该文件进行认证和授权访问Kubernetes集群。 + + +kubectl config use-context kubernetes-admin@kubernetes --kubeconfig=/etc/kubernetes/admin.kubeconfig +# 上述命令是使用`kubectl`命令来配置Kubernetes集群中的调度器组件。 +# +# `kubectl config use-context`命令用于切换`kubectl`当前使用的上下文。上下文是Kubernetes集群、用户和命名空间的组合,用于确定`kubectl`的连接目标。下面解释这个命令的不同部分: +# +# - `kubernetes-admin@kubernetes`是一个上下文名称。它指定了使用`kubernetes-admin`用户和`kubernetes`命名空间的系统级别上下文。系统级别上下文用于操作Kubernetes核心组件。 +# +# - `--kubeconfig=/etc/kubernetes/admin.kubeconfig`用于指定Kubernetes配置文件的路径。Kubernetes配置文件包含连接到Kubernetes集群所需的身份验证和连接信息。 +# +# 通过运行以上命令,`kubectl`将使用指定的上下文和配置文件,以便在以后的命令中能正确地与Kubernetes集群中的调度器组件进行交互。 +``` + +### 3.2.7 创建kube-proxy证书 + +在《5.高可用配置》选择使用那种高可用方案 +若使用 haproxy、keepalived 那么为 `--server=https://192.168.0.36:8443` +若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443` + +```shell +cat > kube-proxy-csr.json << EOF +{ + "CN": "system:kube-proxy", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "CN", + "ST": "Beijing", + "L": "Beijing", + "O": "system:kube-proxy", + "OU": "Kubernetes-manual" + } + ] +} +EOF +# 这段代码是一个JSON格式的配置文件,用于创建和配置一个名为"kube-proxy-csr"的Kubernetes凭证。 +# +# 这个凭证包含以下字段: +# +# - "CN": "system:kube-proxy": 这是凭证的通用名称,表示这是一个管理员凭证。 +# - "key": 这是一个包含证书密钥相关信息的对象。 +# - "algo": "rsa":这是使用的加密算法类型,这里是RSA加密算法。 +# - "size": 2048:这是密钥的大小,这里是2048位。 +# - "names": 这是一个包含证书名称信息的数组。 +# - "C": "CN":这是证书的国家/地区字段,这里是中国。 +# - "ST": "Beijing":这是证书的省/州字段,这里是北京。 +# - "L": "Beijing":这是证书的城市字段,这里是北京。 +# - "O": "system:kube-proxy":这是证书的组织字段,这里是system:kube-proxy。 +# - "OU": "Kubernetes-manual":这是证书的部门字段,这里是Kubernetes-manual。 +# +# 通过这个配置文件创建的凭证将具有管理员权限,并且可以用于管理Kubernetes集群。 + +cfssl gencert \ + -ca=/etc/kubernetes/pki/ca.pem \ + -ca-key=/etc/kubernetes/pki/ca-key.pem \ + -config=ca-config.json \ + -profile=kubernetes \ + kube-proxy-csr.json | cfssljson -bare /etc/kubernetes/pki/kube-proxy +# 上述命令是使用cfssl工具生成Kubernetes admin的证书。 +# +# 具体解释如下: +# +# 1. `cfssl gencert`:使用cfssl工具生成证书。 +# 2. `-ca=/etc/kubernetes/pki/ca.pem`:指定根证书文件的路径。在这里,是指定根证书的路径为`/etc/kubernetes/pki/ca.pem`。 +# 3. `-ca-key=/etc/kubernetes/pki/ca-key.pem`:指定根证书私钥文件的路径。在这里,是指定根证书私钥的路径为`/etc/kubernetes/pki/ca-key.pem`。 +# 4. `-config=ca-config.json`:指定证书配置文件的路径。在这里,是指定证书配置文件的路径为`ca-config.json`。 +# 5. `-profile=kubernetes`:指定证书的配置文件中的一个配置文件模板。在这里,是指定配置文件中的`kubernetes`配置模板。 +# 6. `kube-proxy-csr.json`:指定admin的证书签名请求文件(CSR)的路径。在这里,是指定请求文件的路径为`kube-proxy-csr.json`。 +# 7. `|`(管道符号):将前一个命令的输出作为下一个命令的输入。 +# 8. `cfssljson`:将cfssl工具生成的证书签名请求(CSR)进行解析。 +# 9. `-bare /etc/kubernetes/pki/kube-proxy`:指定输出路径和前缀。在这里,是将解析的证书签名请求生成以下文件:`/etc/kubernetes/pki/kube-proxy.pem`(包含了证书)、`/etc/kubernetes/pki/kube-proxy-key.pem`(包含了私钥)。 +# +# 总结来说,这个命令的目的是根据根证书、根证书私钥、证书配置文件、CSR文件等生成Kubernetes Scheduler的证书和私钥文件。 + + +# 在《5.高可用配置》选择使用那种高可用方案 +# 若使用 haproxy、keepalived 那么为 `--server=https://192.168.0.36:8443` +# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443` + +kubectl config set-cluster kubernetes \ + --certificate-authority=/etc/kubernetes/pki/ca.pem \ + --embed-certs=true \ + --server=https://127.0.0.1:8443 \ + --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig +# 该命令用于配置一个名为"kubernetes"的集群,并将其应用到/etc/kubernetes/kube-proxy.kubeconfig文件中。 +# +# 该命令的解释如下: +# - `kubectl config set-cluster kubernetes`: 设置一个集群并命名为"kubernetes"。 +# - `--certificate-authority=/etc/kubernetes/pki/ca.pem`: 指定集群使用的证书授权机构的路径。 +# - `--embed-certs=true`: 该标志指示将证书嵌入到生成的kubeconfig文件中。 +# - `--server=https://127.0.0.1:8443`: 指定集群的 API server 位置。 +# - `--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig`: 指定要保存 kubeconfig 文件的路径和名称。 + +kubectl config set-credentials kube-proxy \ + --client-certificate=/etc/kubernetes/pki/kube-proxy.pem \ + --client-key=/etc/kubernetes/pki/kube-proxy-key.pem \ + --embed-certs=true \ + --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig +# 这段命令是用于设置 kube-proxy 组件的身份验证凭据,并生成相应的 kubeconfig 文件。 +# +# 解释每个选项的含义如下: +# - `kubectl config set-credentials kube-proxy`:设置 `kube-proxy` 用户的身份验证凭据。 +# - `--client-certificate=/etc/kubernetes/pki/kube-proxy.pem`:指定一个客户端证书文件,用于基于证书的身份验证。在这种情况下,指定了 kube-proxy 组件的证书文件路径。 +# - `--client-key=/etc/kubernetes/pki/kube-proxy-key.pem`:指定与客户端证书相对应的客户端私钥文件。 +# - `--embed-certs=true`:将客户端证书和私钥嵌入到生成的 kubeconfig 文件中。 +# - `--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig`:指定生成的 kubeconfig 文件的路径和名称。 +# +# 该命令的目的是为 kube-proxy 组件生成一个 kubeconfig 文件,以便进行身份验证和访问集群资源。kubeconfig 文件是一个包含了连接到 Kubernetes 集群所需的所有配置信息的文件,包括服务器地址、证书和秘钥等。 + +kubectl config set-context kube-proxy@kubernetes \ + --cluster=kubernetes \ + --user=kube-proxy \ + --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig +# 该命令用于设置一个名为"kube-proxy@kubernetes"的上下文,具体配置如下: +# +# 1. --cluster=kubernetes: 指定集群的名称为"kubernetes",这个集群是在当前的kubeconfig文件中已经定义好的。 +# 2. --user=kube-proxy: 指定用户的名称为"kube-proxy",这个用户也是在当前的kubeconfig文件中已经定义好的。这个用户用于认证和授权kube-proxy组件访问Kubernetes集群的权限。 +# 3. --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig: 指定kubeconfig文件的路径为"/etc/kubernetes/kube-proxy.kubeconfig",这个文件将被用来保存上下文的配置信息。 +# +# 这个命令的作用是将上述的配置信息保存到指定的kubeconfig文件中,以便后续使用该文件进行认证和授权访问Kubernetes集群。 + +kubectl config use-context kube-proxy@kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig +# 上述命令是使用`kubectl`命令来配置Kubernetes集群中的调度器组件。 +# +# `kubectl config use-context`命令用于切换`kubectl`当前使用的上下文。上下文是Kubernetes集群、用户和命名空间的组合,用于确定`kubectl`的连接目标。下面解释这个命令的不同部分: +# +# - `kube-proxy@kubernetes`是一个上下文名称。它指定了使用`kube-proxy`用户和`kubernetes`命名空间的系统级别上下文。系统级别上下文用于操作Kubernetes核心组件。 +# +# - `--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig`用于指定Kubernetes配置文件的路径。Kubernetes配置文件包含连接到Kubernetes集群所需的身份验证和连接信息。 +# +# 通过运行以上命令,`kubectl`将使用指定的上下文和配置文件,以便在以后的命令中能正确地与Kubernetes集群中的调度器组件进行交互。 +``` + + + +### 3.2.8 创建ServiceAccount Key ——secret + +```shell +openssl genrsa -out /etc/kubernetes/pki/sa.key 2048 +openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub + +# 这两个命令是使用OpenSSL工具生成RSA密钥对。 +# +# 命令1:openssl genrsa -out /etc/kubernetes/pki/sa.key 2048 +# 该命令用于生成私钥文件。具体解释如下: +# - openssl:openssl命令行工具。 +# - genrsa:生成RSA密钥对。 +# - -out /etc/kubernetes/pki/sa.key:指定输出私钥文件的路径和文件名。 +# - 2048:指定密钥长度为2048位。 +# +# 命令2:openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub +# 该命令用于从私钥中导出公钥。具体解释如下: +# - openssl:openssl命令行工具。 +# - rsa:与私钥相关的RSA操作。 +# - -in /etc/kubernetes/pki/sa.key:指定输入私钥文件的路径和文件名。 +# - -pubout:指定输出公钥。 +# - -out /etc/kubernetes/pki/sa.pub:指定输出公钥文件的路径和文件名。 +# +# 总结:通过以上两个命令,我们可以使用OpenSSL工具生成一个RSA密钥对,并将私钥保存在/etc/kubernetes/pki/sa.key文件中,将公钥保存在/etc/kubernetes/pki/sa.pub文件中。 +``` + +### 3.2.9 将证书发送到其他master节点 + +```shell +#其他节点创建目录 +# mkdir /etc/kubernetes/pki/ -p + +for NODE in k8s-master02 k8s-master03; do for FILE in $(ls /etc/kubernetes/pki | grep -v etcd); do scp /etc/kubernetes/pki/${FILE} $NODE:/etc/kubernetes/pki/${FILE}; done; for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig; do scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE}; done; done +``` + +### 3.2.10 查看证书 + +```shell +ls /etc/kubernetes/pki/ +admin.csr controller-manager.csr kube-proxy.csr +admin-key.pem controller-manager-key.pem kube-proxy-key.pem +admin.pem controller-manager.pem kube-proxy.pem +apiserver.csr front-proxy-ca.csr sa.key +apiserver-key.pem front-proxy-ca-key.pem sa.pub +apiserver.pem front-proxy-ca.pem scheduler.csr +ca.csr front-proxy-client.csr scheduler-key.pem +ca-key.pem front-proxy-client-key.pem scheduler.pem +ca.pem front-proxy-client.pem + +# 一共26个就对了 +ls /etc/kubernetes/pki/ |wc -l +26 +``` + +# 4.k8s系统组件配置 + +## 4.1.etcd配置 + +```shell +这个配置文件是用于 etcd 集群的配置,其中包含了一些重要的参数和选项: + +- `name`:指定了当前节点的名称,用于集群中区分不同的节点。 +- `data-dir`:指定了 etcd 数据的存储目录。 +- `wal-dir`:指定了 etcd 数据写入磁盘的目录。 +- `snapshot-count`:指定了触发快照的事务数量。 +- `heartbeat-interval`:指定了 etcd 集群中节点之间的心跳间隔。 +- `election-timeout`:指定了选举超时时间。 +- `quota-backend-bytes`:指定了存储的限额,0 表示无限制。 +- `listen-peer-urls`:指定了节点之间通信的 URL,使用 HTTPS 协议。 +- `listen-client-urls`:指定了客户端访问 etcd 集群的 URL,同时提供了本地访问的 URL。 +- `max-snapshots`:指定了快照保留的数量。 +- `max-wals`:指定了日志保留的数量。 +- `initial-advertise-peer-urls`:指定了节点之间通信的初始 URL。 +- `advertise-client-urls`:指定了客户端访问 etcd 集群的初始 URL。 +- `discovery`:定义了 etcd 集群发现相关的选项。 +- `initial-cluster`:指定了 etcd 集群的初始成员。 +- `initial-cluster-token`:指定了集群的 token。 +- `initial-cluster-state`:指定了集群的初始状态。 +- `strict-reconfig-check`:指定了严格的重新配置检查选项。 +- `enable-v2`:启用了 v2 API。 +- `enable-pprof`:启用了性能分析。 +- `proxy`:设置了代理模式。 +- `client-transport-security`:客户端的传输安全配置。 +- `peer-transport-security`:节点之间的传输安全配置。 +- `debug`:是否启用调试模式。 +- `log-package-levels`:日志的输出级别。 +- `log-outputs`:指定了日志的输出类型。 +- `force-new-cluster`:是否强制创建一个新的集群。 + +这些参数和选项可以根据实际需求进行调整和配置。 +``` + +### 4.1.1master01配置 + +```shell +# 如果要用IPv6那么把IPv4地址修改为IPv6即可 +cat > /etc/etcd/etcd.config.yml << EOF +name: 'k8s-master01' +data-dir: /var/lib/etcd +wal-dir: /var/lib/etcd/wal +snapshot-count: 5000 +heartbeat-interval: 100 +election-timeout: 1000 +quota-backend-bytes: 0 +listen-peer-urls: 'https://192.168.0.31:2380' +listen-client-urls: 'https://192.168.0.31:2379,http://127.0.0.1:2379' +max-snapshots: 3 +max-wals: 5 +cors: +initial-advertise-peer-urls: 'https://192.168.0.31:2380' +advertise-client-urls: 'https://192.168.0.31:2379' +discovery: +discovery-fallback: 'proxy' +discovery-proxy: +discovery-srv: +initial-cluster: 'k8s-master01=https://192.168.0.31:2380,k8s-master02=https://192.168.0.32:2380,k8s-master03=https://192.168.0.33:2380' +initial-cluster-token: 'etcd-k8s-cluster' +initial-cluster-state: 'new' +strict-reconfig-check: false +enable-v2: true +enable-pprof: true +proxy: 'off' +proxy-failure-wait: 5000 +proxy-refresh-interval: 30000 +proxy-dial-timeout: 1000 +proxy-write-timeout: 5000 +proxy-read-timeout: 0 +client-transport-security: + cert-file: '/etc/kubernetes/pki/etcd/etcd.pem' + key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem' + client-cert-auth: true + trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem' + auto-tls: true +peer-transport-security: + cert-file: '/etc/kubernetes/pki/etcd/etcd.pem' + key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem' + peer-client-cert-auth: true + trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem' + auto-tls: true +debug: false +log-package-levels: +log-outputs: [default] +force-new-cluster: false +EOF +``` + +### 4.1.2master02配置 + +```shell +# 如果要用IPv6那么把IPv4地址修改为IPv6即可 +cat > /etc/etcd/etcd.config.yml << EOF +name: 'k8s-master02' +data-dir: /var/lib/etcd +wal-dir: /var/lib/etcd/wal +snapshot-count: 5000 +heartbeat-interval: 100 +election-timeout: 1000 +quota-backend-bytes: 0 +listen-peer-urls: 'https://192.168.0.32:2380' +listen-client-urls: 'https://192.168.0.32:2379,http://127.0.0.1:2379' +max-snapshots: 3 +max-wals: 5 +cors: +initial-advertise-peer-urls: 'https://192.168.0.32:2380' +advertise-client-urls: 'https://192.168.0.32:2379' +discovery: +discovery-fallback: 'proxy' +discovery-proxy: +discovery-srv: +initial-cluster: 'k8s-master01=https://192.168.0.31:2380,k8s-master02=https://192.168.0.32:2380,k8s-master03=https://192.168.0.33:2380' +initial-cluster-token: 'etcd-k8s-cluster' +initial-cluster-state: 'new' +strict-reconfig-check: false +enable-v2: true +enable-pprof: true +proxy: 'off' +proxy-failure-wait: 5000 +proxy-refresh-interval: 30000 +proxy-dial-timeout: 1000 +proxy-write-timeout: 5000 +proxy-read-timeout: 0 +client-transport-security: + cert-file: '/etc/kubernetes/pki/etcd/etcd.pem' + key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem' + client-cert-auth: true + trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem' + auto-tls: true +peer-transport-security: + cert-file: '/etc/kubernetes/pki/etcd/etcd.pem' + key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem' + peer-client-cert-auth: true + trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem' + auto-tls: true +debug: false +log-package-levels: +log-outputs: [default] +force-new-cluster: false +EOF +``` + +### 4.1.3master03配置 + +```shell +# 如果要用IPv6那么把IPv4地址修改为IPv6即可 +cat > /etc/etcd/etcd.config.yml << EOF +name: 'k8s-master03' +data-dir: /var/lib/etcd +wal-dir: /var/lib/etcd/wal +snapshot-count: 5000 +heartbeat-interval: 100 +election-timeout: 1000 +quota-backend-bytes: 0 +listen-peer-urls: 'https://192.168.0.33:2380' +listen-client-urls: 'https://192.168.0.33:2379,http://127.0.0.1:2379' +max-snapshots: 3 +max-wals: 5 +cors: +initial-advertise-peer-urls: 'https://192.168.0.33:2380' +advertise-client-urls: 'https://192.168.0.33:2379' +discovery: +discovery-fallback: 'proxy' +discovery-proxy: +discovery-srv: +initial-cluster: 'k8s-master01=https://192.168.0.31:2380,k8s-master02=https://192.168.0.32:2380,k8s-master03=https://192.168.0.33:2380' +initial-cluster-token: 'etcd-k8s-cluster' +initial-cluster-state: 'new' +strict-reconfig-check: false +enable-v2: true +enable-pprof: true +proxy: 'off' +proxy-failure-wait: 5000 +proxy-refresh-interval: 30000 +proxy-dial-timeout: 1000 +proxy-write-timeout: 5000 +proxy-read-timeout: 0 +client-transport-security: + cert-file: '/etc/kubernetes/pki/etcd/etcd.pem' + key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem' + client-cert-auth: true + trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem' + auto-tls: true +peer-transport-security: + cert-file: '/etc/kubernetes/pki/etcd/etcd.pem' + key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem' + peer-client-cert-auth: true + trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem' + auto-tls: true +debug: false +log-package-levels: +log-outputs: [default] +force-new-cluster: false +EOF +``` + +## 4.2.创建service(所有master节点操作) + +### 4.2.1创建etcd.service并启动 + +```shell +cat > /usr/lib/systemd/system/etcd.service << EOF + +[Unit] +Description=Etcd Service +Documentation=https://coreos.com/etcd/docs/latest/ +After=network.target + +[Service] +Type=notify +ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yml +Restart=on-failure +RestartSec=10 +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target +Alias=etcd3.service + +EOF +# 这是一个系统服务配置文件,用于启动和管理Etcd服务。 +# +# [Unit] 部分包含了服务的一些基本信息,它定义了服务的描述和文档链接,并指定了服务应在网络连接之后启动。 +# +# [Service] 部分定义了服务的具体配置。在这里,服务的类型被设置为notify,意味着当服务成功启动时,它将通知系统。ExecStart 指定了启动服务时要执行的命令,这里是运行 /usr/local/bin/etcd 命令并传递一个配置文件 /etc/etcd/etcd.config.yml。Restart 设置为 on-failure,意味着当服务失败时将自动重启,并且在10秒后进行重启。LimitNOFILE 指定了服务的最大文件打开数。 +# +# [Install] 部分定义了服务的安装配置。WantedBy 指定了服务应该被启动的目标,这里是 multi-user.target,表示在系统进入多用户模式时启动。Alias 定义了一个别名,可以通过etcd3.service来引用这个服务。 +# +# 这个配置文件描述了如何启动和管理Etcd服务,并将其安装到系统中。通过这个配置文件,可以确保Etcd服务在系统启动后自动启动,并在出现问题时进行重启。 +``` + +### 4.2.2创建etcd证书目录 + +```shell +mkdir /etc/kubernetes/pki/etcd +ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/ + +systemctl daemon-reload +# 用于重新加载systemd管理的单位文件。当你新增或修改了某个单位文件(如.service文件、.socket文件等),需要运行该命令来刷新systemd对该文件的配置。 + +systemctl enable --now etcd.service +# 启用并立即启动etcd.service单元。etcd.service是etcd守护进程的systemd服务单元。 + +systemctl restart etcd.service +# 重启etcd.service单元,即重新启动etcd守护进程。 + +systemctl status etcd.service +# etcd.service单元的当前状态,包括运行状态、是否启用等信息。 +``` + +### 4.2.3查看etcd状态 + +```shell +# 如果要用IPv6那么把IPv4地址修改为IPv6即可 +export ETCDCTL_API=3 +etcdctl --endpoints="192.168.0.33:2379,192.168.0.32:2379,192.168.0.31:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint status --write-out=table ++-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+ +| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS | ++-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+ +| 192.168.0.33:2379 | 6ae2196f75cd6d95 | 3.5.9 | 20 kB | false | false | 2 | 9 | 9 | | +| 192.168.0.32:2379 | 46cbf93f7713a252 | 3.5.9 | 20 kB | false | false | 2 | 9 | 9 | | +| 192.168.0.31:2379 | ec6051ffc7487dd7 | 3.5.9 | 20 kB | true | false | 2 | 9 | 9 | | ++-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+ + +# 这个命令是使用etcdctl工具,用于查看指定etcd集群的健康状态。下面是每个参数的详细解释: +# +# - `--endpoints`:指定要连接的etcd集群节点的地址和端口。在这个例子中,指定了3个节点的地址和端口,分别是`192.168.0.33:2379,192.168.0.32:2379,192.168.0.31:2379`。 +# - `--cacert`:指定用于验证etcd服务器证书的CA证书的路径。在这个例子中,指定了CA证书的路径为`/etc/kubernetes/pki/etcd/etcd-ca.pem`。CA证书用于验证etcd服务器证书的有效性。 +# - `--cert`:指定用于与etcd服务器进行通信的客户端证书的路径。在这个例子中,指定了客户端证书的路径为`/etc/kubernetes/pki/etcd/etcd.pem`。客户端证书用于在与etcd服务器建立安全通信时进行身份验证。 +# - `--key`:指定与客户端证书配对的私钥的路径。在这个例子中,指定了私钥的路径为`/etc/kubernetes/pki/etcd/etcd-key.pem`。私钥用于对通信进行加密解密和签名验证。 +# - `endpoint status`:子命令,用于检查etcd集群节点的健康状态。 +# - `--write-out`:指定输出的格式。在这个例子中,指定以表格形式输出。 +# +# 通过执行这个命令,可以获取到etcd集群节点的健康状态,并以表格形式展示。 + + + +etcdctl --endpoints="192.168.0.33:2379,192.168.0.32:2379,192.168.0.31:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem cluster-health +``` + +# 5.高可用配置(在Master服务器上操作) + +**注意* 5.1.1 和5.1.2 二选一即可** + +选择使用那种高可用方案,同时可以俩种都选用,实现内外兼顾的效果,比如: +5.1 的 NGINX方案实现集群内的高可用 +5.2 的 haproxy、keepalived 方案实现集群外访问 + +在《3.2.生成k8s相关证书》 + +若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443` +若使用 haproxy、keepalived 那么为 `--server=https://192.168.0.36:9443` + +## 5.1 NGINX高可用方案 +### 5.1.1 进行编译 +```shell +# 安装编译环境 +yum install gcc -y + +# 下载解压nginx二进制文件 +# wget http://nginx.org/download/nginx-1.25.1.tar.gz +tar xvf nginx-*.tar.gz +cd nginx-* + +# 进行编译 +./configure --with-stream --without-http --without-http_uwsgi_module --without-http_scgi_module --without-http_fastcgi_module +make && make install + +# 拷贝编译好的nginx +node='k8s-master02 k8s-master03 k8s-node01 k8s-node02' +for NODE in $node; do scp -r /usr/local/nginx/ $NODE:/usr/local/nginx/; done + +# 这是一系列命令行指令,用于编译和安装软件。 +# +# 1. `./configure` 是用于配置软件的命令。在这个例子中,配置的软件是一个Web服务器,指定了一些选项来启用流模块,并禁用了HTTP、uwsgi、scgi和fastcgi模块。 +# 2. `--with-stream` 指定启用流模块。流模块通常用于代理TCP和UDP流量。 +# 3. `--without-http` 指定禁用HTTP模块。这意味着编译的软件将没有HTTP服务器功能。 +# 4. `--without-http_uwsgi_module` 指定禁用uwsgi模块。uwsgi是一种Web服务器和应用服务器之间的通信协议。 +# 5. `--without-http_scgi_module` 指定禁用scgi模块。scgi是一种用于将Web服务器请求传递到应用服务器的协议。 +# 6. `--without-http_fastcgi_module` 指定禁用fastcgi模块。fastcgi是一种用于在Web服务器和应用服务器之间交换数据的协议。 +# 7. `make` 是用于编译软件的命令。该命令将根据之前的配置生成可执行文件。 +# 8. `make install` 用于安装软件。该命令将生成的可执行文件和其他必要文件复制到系统的适当位置,以便可以使用该软件。 +# +# 总之,这个命令序列用于编译一个配置了特定选项的Web服务器,并将其安装到系统中。 +``` + +### 5.1.2 写入启动配置 +在所有主机上执行 +```shell +# 写入nginx配置文件 +cat > /usr/local/nginx/conf/kube-nginx.conf < /etc/systemd/system/kube-nginx.service </etc/haproxy/haproxy.cfg<<"EOF" +global + maxconn 2000 + ulimit-n 16384 + log 127.0.0.1 local0 err + stats timeout 30s + +defaults + log global + mode http + option httplog + timeout connect 5000 + timeout client 50000 + timeout server 50000 + timeout http-request 15s + timeout http-keep-alive 15s + + +frontend monitor-in + bind *:33305 + mode http + option httplog + monitor-uri /monitor + +frontend k8s-master + bind 0.0.0.0:9443 + bind 127.0.0.1:9443 + mode tcp + option tcplog + tcp-request inspect-delay 5s + default_backend k8s-master + + +backend k8s-master + mode tcp + option tcplog + option tcp-check + balance roundrobin + default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100 + server k8s-master01 192.168.0.31:6443 check + server k8s-master02 192.168.0.32:6443 check + server k8s-master03 192.168.0.33:6443 check +EOF +``` +参数 + +```shell +这段配置代码是指定了一个HAProxy负载均衡器的配置。下面对各部分进行详细解释: +1. global: + - maxconn 2000: 设置每个进程的最大连接数为2000。 + - ulimit-n 16384: 设置每个进程的最大文件描述符数为16384。 + - log 127.0.0.1 local0 err: 指定日志的输出地址为本地主机的127.0.0.1,并且只记录错误级别的日志。 + - stats timeout 30s: 设置查看负载均衡器统计信息的超时时间为30秒。 + +2. defaults: + - log global: 使默认日志与global部分相同。 + - mode http: 设定负载均衡器的工作模式为HTTP模式。 + - option httplog: 使负载均衡器记录HTTP协议的日志。 + - timeout connect 5000: 设置与后端服务器建立连接的超时时间为5秒。 + - timeout client 50000: 设置与客户端的连接超时时间为50秒。 + - timeout server 50000: 设置与后端服务器连接的超时时间为50秒。 + - timeout http-request 15s: 设置处理HTTP请求的超时时间为15秒。 + - timeout http-keep-alive 15s: 设置保持HTTP连接的超时时间为15秒。 + +3. frontend monitor-in: + - bind *:33305: 监听所有IP地址的33305端口。 + - mode http: 设定frontend的工作模式为HTTP模式。 + - option httplog: 记录HTTP协议的日志。 + - monitor-uri /monitor: 设置监控URI为/monitor。 + +4. frontend k8s-master: + - bind 0.0.0.0:9443: 监听所有IP地址的9443端口。 + - bind 127.0.0.1:9443: 监听本地主机的9443端口。 + - mode tcp: 设定frontend的工作模式为TCP模式。 + - option tcplog: 记录TCP协议的日志。 + - tcp-request inspect-delay 5s: 设置在接收到请求后延迟5秒进行检查。 + - default_backend k8s-master: 设置默认的后端服务器组为k8s-master。 + +5. backend k8s-master: + - mode tcp: 设定backend的工作模式为TCP模式。 + - option tcplog: 记录TCP协议的日志。 + - option tcp-check: 启用TCP检查功能。 + - balance roundrobin: 使用轮询算法进行负载均衡。 + - default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100: 设置默认的服务器参数。 + - server k8s-master01 192.168.0.31:6443 check: 增加一个名为k8s-master01的服务器,IP地址为192.168.0.31,端口号为6443,并对其进行健康检查。 + - server k8s-master02 192.168.0.32:6443 check: 增加一个名为k8s-master02的服务器,IP地址为192.168.0.32,端口号为6443,并对其进行健康检查。 + - server k8s-master03 192.168.0.33:6443 check: 增加一个名为k8s-master03的服务器,IP地址为192.168.0.33,端口号为6443,并对其进行健康检查。 + +以上就是这段配置代码的详细解释。它主要定义了全局配置、默认配置、前端监听和后端服务器组的相关参数和设置。通过这些配置,可以实现负载均衡和监控功能。 +``` + +### 5.2.3Master01配置keepalived master节点 + +```shell +#cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak + +cat > /etc/keepalived/keepalived.conf << EOF +! Configuration File for keepalived + +global_defs { + router_id LVS_DEVEL +} +vrrp_script chk_apiserver { + script "/etc/keepalived/check_apiserver.sh" + interval 5 + weight -5 + fall 2 + rise 1 +} +vrrp_instance VI_1 { + state MASTER + # 注意网卡名 + interface eth0 + mcast_src_ip 192.168.0.31 + virtual_router_id 51 + priority 100 + nopreempt + advert_int 2 + authentication { + auth_type PASS + auth_pass K8SHA_KA_AUTH + } + virtual_ipaddress { + 192.168.0.36 + } + track_script { + chk_apiserver +} } + +EOF +``` + +### 5.2.4Master02配置keepalived backup节点 + +```shell +# cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak + +cat > /etc/keepalived/keepalived.conf << EOF +! Configuration File for keepalived + +global_defs { + router_id LVS_DEVEL +} +vrrp_script chk_apiserver { + script "/etc/keepalived/check_apiserver.sh" + interval 5 + weight -5 + fall 2 + rise 1 + +} +vrrp_instance VI_1 { + state BACKUP + # 注意网卡名 + interface eth0 + mcast_src_ip 192.168.0.32 + virtual_router_id 51 + priority 80 + nopreempt + advert_int 2 + authentication { + auth_type PASS + auth_pass K8SHA_KA_AUTH + } + virtual_ipaddress { + 192.168.0.36 + } + track_script { + chk_apiserver +} } + +EOF +``` + +### 5.2.5Master03配置keepalived backup节点 + +```shell +# cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak + +cat > /etc/keepalived/keepalived.conf << EOF +! Configuration File for keepalived + +global_defs { + router_id LVS_DEVEL +} +vrrp_script chk_apiserver { + script "/etc/keepalived/check_apiserver.sh" + interval 5 + weight -5 + fall 2 + rise 1 + +} +vrrp_instance VI_1 { + state BACKUP + # 注意网卡名 + interface eth0 + mcast_src_ip 192.168.0.33 + virtual_router_id 51 + priority 50 + nopreempt + advert_int 2 + authentication { + auth_type PASS + auth_pass K8SHA_KA_AUTH + } + virtual_ipaddress { + 192.168.0.36 + } + track_script { + chk_apiserver +} } + +EOF +``` +参数 + +```shell +这是一个用于配置keepalived的配置文件。下面是对每个部分的详细解释: + +- `global_defs`部分定义了全局参数。 +- `router_id`参数指定了当前路由器的标识,这里设置为"LVS_DEVEL"。 + +- `vrrp_script`部分定义了一个VRRP脚本。`chk_apiserver`是脚本的名称, + - `script`参数指定了脚本的路径。该脚本每5秒执行一次,返回值为0表示服务正常,返回值为1表示服务异常。 + - `weight`参数指定了根据脚本返回的值来调整优先级,这里设置为-5。 + - `fall`参数指定了失败阈值,当连续2次脚本返回值为1时认为服务异常。 + - `rise`参数指定了恢复阈值,当连续1次脚本返回值为0时认为服务恢复正常。 + +- `vrrp_instance`部分定义了一个VRRP实例。`VI_1`是实例的名称。 + - `state`参数指定了当前实例的状态,这里设置为MASTER表示当前实例是主节点。 + - `interface`参数指定了要监听的网卡,这里设置为eth0。 + - `mcast_src_ip`参数指定了VRRP报文的源IP地址,这里设置为192.168.0.31。 + - `virtual_router_id`参数指定了虚拟路由器的ID,这里设置为51。 + - `priority`参数指定了实例的优先级,优先级越高(数值越大)越有可能被选为主节点。 + - `nopreempt`参数指定了当主节点失效后不要抢占身份,即不要自动切换为主节点。 + - `advert_int`参数指定了发送广播的间隔时间,这里设置为2秒。 + - `authentication`部分指定了认证参数 + - `auth_type`参数指定了认证类型,这里设置为PASS表示使用密码认证, + - `auth_pass`参数指定了认证密码,这里设置为K8SHA_KA_AUTH。 + - `virtual_ipaddress`部分指定了虚拟IP地址,这里设置为192.168.0.36。 + - `track_script`部分指定了要跟踪的脚本,这里跟踪了chk_apiserver脚本。 +``` + + +### 5.2.6健康检查脚本配置(lb主机) + +```shell +cat > /etc/keepalived/check_apiserver.sh << EOF +#!/bin/bash + +err=0 +for k in \$(seq 1 3) +do + check_code=\$(pgrep haproxy) + if [[ \$check_code == "" ]]; then + err=\$(expr \$err + 1) + sleep 1 + continue + else + err=0 + break + fi +done + +if [[ \$err != "0" ]]; then + echo "systemctl stop keepalived" + /usr/bin/systemctl stop keepalived + exit 1 +else + exit 0 +fi +EOF + +# 给脚本授权 + +chmod +x /etc/keepalived/check_apiserver.sh + +# 这段脚本是一个简单的bash脚本,主要用来检查是否有名为haproxy的进程正在运行。 +# +# 脚本的主要逻辑如下: +# 1. 首先设置一个变量err为0,用来记录错误次数。 +# 2. 使用一个循环,在循环内部执行以下操作: +# a. 使用pgrep命令检查是否有名为haproxy的进程在运行。如果不存在该进程,将err加1,并暂停1秒钟,然后继续下一次循环。 +# b. 如果存在haproxy进程,将err重置为0,并跳出循环。 +# 3. 检查err的值,如果不为0,表示检查失败,输出一条错误信息并执行“systemctl stop keepalived”命令停止keepalived进程,并退出脚本返回1。 +# 4. 如果err的值为0,表示检查成功,退出脚本返回0。 +# +# 该脚本的主要作用是检查是否存在运行中的haproxy进程,如果无法检测到haproxy进程,将停止keepalived进程并返回错误状态。如果haproxy进程存在,则返回成功状态。这个脚本可能是作为一个健康检查脚本的一部分,在确保haproxy服务可用的情况下,才继续运行其他操作。 +``` + +### 5.2.7启动服务 + +```shell +systemctl daemon-reload +# 用于重新加载systemd管理的单位文件。当你新增或修改了某个单位文件(如.service文件、.socket文件等),需要运行该命令来刷新systemd对该文件的配置。 +systemctl enable --now haproxy.service +# 启用并立即启动haproxy.service单元。haproxy.service是haproxy守护进程的systemd服务单元。 +systemctl enable --now keepalived.service +# 启用并立即启动keepalived.service单元。keepalived.service是keepalived守护进程的systemd服务单元。 +systemctl status haproxy.service +# haproxy.service单元的当前状态,包括运行状态、是否启用等信息。 +systemctl status keepalived.service +# keepalived.service单元的当前状态,包括运行状态、是否启用等信息。 +``` + +### 5.2.8测试高可用 + +```shell +# 能ping同 + +[root@k8s-node02 ~]# ping 192.168.0.36 + +# 能telnet访问 + +[root@k8s-node02 ~]# telnet 192.168.0.36 9443 + +# 关闭主节点,看vip是否漂移到备节点 +``` + +# 6.k8s组件配置 + +所有k8s节点创建以下目录 + +```shell +mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes +``` + +## 6.1.创建apiserver(所有master节点) + +### 6.1.1master01节点配置 +```shell +cat > /usr/lib/systemd/system/kube-apiserver.service << EOF + +[Unit] +Description=Kubernetes API Server +Documentation=https://github.com/kubernetes/kubernetes +After=network.target + +[Service] +ExecStart=/usr/local/bin/kube-apiserver \\ + --v=2 \\ + --allow-privileged=true \\ + --bind-address=0.0.0.0 \\ + --secure-port=6443 \\ + --advertise-address=192.168.0.31 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ + --service-node-port-range=30000-32767 \\ + --etcd-servers=https://192.168.0.31:2379,https://192.168.0.32:2379,https://192.168.0.33:2379 \\ + --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\ + --etcd-certfile=/etc/etcd/ssl/etcd.pem \\ + --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\ + --client-ca-file=/etc/kubernetes/pki/ca.pem \\ + --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \\ + --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \\ + --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \\ + --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \\ + --service-account-key-file=/etc/kubernetes/pki/sa.pub \\ + --service-account-signing-key-file=/etc/kubernetes/pki/sa.key \\ + --service-account-issuer=https://kubernetes.default.svc.cluster.local \\ + --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\ + --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \ + --authorization-mode=Node,RBAC \\ + --enable-bootstrap-token-auth=true \\ + --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \\ + --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \\ + --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \\ + --requestheader-allowed-names=aggregator \\ + --requestheader-group-headers=X-Remote-Group \\ + --requestheader-extra-headers-prefix=X-Remote-Extra- \\ + --requestheader-username-headers=X-Remote-User \\ + --enable-aggregator-routing=true +Restart=on-failure +RestartSec=10s +LimitNOFILE=65535 + +[Install] +WantedBy=multi-user.target + +EOF +``` + +### 6.1.2master02节点配置 +```shell +cat > /usr/lib/systemd/system/kube-apiserver.service << EOF +[Unit] +Description=Kubernetes API Server +Documentation=https://github.com/kubernetes/kubernetes +After=network.target + +[Service] +ExecStart=/usr/local/bin/kube-apiserver \\ + --v=2 \\ + --allow-privileged=true \\ + --bind-address=0.0.0.0 \\ + --secure-port=6443 \\ + --advertise-address=192.168.0.32 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ + --service-node-port-range=30000-32767 \\ + --etcd-servers=https://192.168.0.31:2379,https://192.168.0.32:2379,https://192.168.0.33:2379 \\ + --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\ + --etcd-certfile=/etc/etcd/ssl/etcd.pem \\ + --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\ + --client-ca-file=/etc/kubernetes/pki/ca.pem \\ + --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \\ + --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \\ + --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \\ + --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \\ + --service-account-key-file=/etc/kubernetes/pki/sa.pub \\ + --service-account-signing-key-file=/etc/kubernetes/pki/sa.key \\ + --service-account-issuer=https://kubernetes.default.svc.cluster.local \\ + --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\ + --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \\ + --authorization-mode=Node,RBAC \\ + --enable-bootstrap-token-auth=true \\ + --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \\ + --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \\ + --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \\ + --requestheader-allowed-names=aggregator \\ + --requestheader-group-headers=X-Remote-Group \\ + --requestheader-extra-headers-prefix=X-Remote-Extra- \\ + --requestheader-username-headers=X-Remote-User \\ + --enable-aggregator-routing=true + +Restart=on-failure +RestartSec=10s +LimitNOFILE=65535 + +[Install] +WantedBy=multi-user.target + +EOF +``` + +### 6.1.3master03节点配置 +```shell +cat > /usr/lib/systemd/system/kube-apiserver.service << EOF + +[Unit] +Description=Kubernetes API Server +Documentation=https://github.com/kubernetes/kubernetes +After=network.target + +[Service] +ExecStart=/usr/local/bin/kube-apiserver \\ + --v=2 \\ + --allow-privileged=true \\ + --bind-address=0.0.0.0 \\ + --secure-port=6443 \\ + --advertise-address=192.168.0.33 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ + --service-node-port-range=30000-32767 \\ + --etcd-servers=https://192.168.0.31:2379,https://192.168.0.32:2379,https://192.168.0.33:2379 \\ + --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\ + --etcd-certfile=/etc/etcd/ssl/etcd.pem \\ + --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\ + --client-ca-file=/etc/kubernetes/pki/ca.pem \\ + --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \\ + --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \\ + --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \\ + --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \\ + --service-account-key-file=/etc/kubernetes/pki/sa.pub \\ + --service-account-signing-key-file=/etc/kubernetes/pki/sa.key \\ + --service-account-issuer=https://kubernetes.default.svc.cluster.local \\ + --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\ + --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \\ + --authorization-mode=Node,RBAC \\ + --enable-bootstrap-token-auth=true \\ + --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \\ + --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \\ + --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \\ + --requestheader-allowed-names=aggregator \\ + --requestheader-group-headers=X-Remote-Group \\ + --requestheader-extra-headers-prefix=X-Remote-Extra- \\ + --requestheader-username-headers=X-Remote-User \\ + --enable-aggregator-routing=true + +Restart=on-failure +RestartSec=10s +LimitNOFILE=65535 + +[Install] +WantedBy=multi-user.target + +EOF +``` +参数 +```shell +该配置文件是用于定义Kubernetes API Server的systemd服务的配置。systemd是一个用于启动和管理Linux系统服务的守护进程。 + +[Unit] +- Description: 服务的描述信息,用于显示在日志和系统管理工具中。 +- Documentation: 提供关于服务的文档链接。 +- After: 规定服务依赖于哪些其他服务或单元。在这个例子中,API Server服务在网络目标启动之后启动。 + +[Service] +- ExecStart: 定义服务的命令行参数和命令。这里指定了API Server的启动命令,包括各种参数选项。 +- Restart: 指定当服务退出时应该如何重新启动。在这个例子中,服务在失败时将被重新启动。 +- RestartSec: 指定两次重新启动之间的等待时间。 +- LimitNOFILE: 指定进程可以打开的文件描述符的最大数量。 + +[Install] +- WantedBy: 指定服务应该安装到哪个系统目标。在这个例子中,服务将被安装到multi-user.target目标,以便在多用户模式下启动。 + +上述配置文件中定义的kube-apiserver服务将以指定的参数运行,这些参数包括: + +- `--v=2` 指定日志级别为2,打印详细的API Server日志。 +- `--allow-privileged=true` 允许特权容器运行。 +- `--bind-address=0.0.0.0` 绑定API Server监听的IP地址。 +- `--secure-port=6443` 指定API Server监听的安全端口。 +- `--advertise-address=192.168.0.31` 广告API Server的地址。 +- `--service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112` 指定服务CIDR范围。 +- `--service-node-port-range=30000-32767` 指定NodePort的范围。 +- `--etcd-servers=https://192.168.0.31:2379,https://192.168.0.32:2379,https://192.168.0.33:2379` 指定etcd服务器的地址。 +- `--etcd-cafile` 指定etcd服务器的CA证书。 +- `--etcd-certfile` 指定etcd服务器的证书。 +- `--etcd-keyfile` 指定etcd服务器的私钥。 +- `--client-ca-file` 指定客户端CA证书。 +- `--tls-cert-file` 指定服务的证书。 +- `--tls-private-key-file` 指定服务的私钥。 +- `--kubelet-client-certificate` 和 `--kubelet-client-key` 指定与kubelet通信的客户端证书和私钥。 +- `--service-account-key-file` 指定服务账户公钥文件。 +- `--service-account-signing-key-file` 指定服务账户签名密钥文件。 +- `--service-account-issuer` 指定服务账户的发布者。 +- `--kubelet-preferred-address-types` 指定kubelet通信时的首选地址类型。 +- `--enable-admission-plugins` 启用一系列准入插件。 +- `--authorization-mode` 指定授权模式。 +- `--enable-bootstrap-token-auth` 启用引导令牌认证。 +- `--requestheader-client-ca-file` 指定请求头中的客户端CA证书。 +- `--proxy-client-cert-file` 和 `--proxy-client-key-file` 指定代理客户端的证书和私钥。 +- `--requestheader-allowed-names` 指定请求头中允许的名字。 +- `--requestheader-group-headers` 指定请求头中的组头。 +- `--requestheader-extra-headers-prefix` 指定请求头中的额外头前缀。 +- `--requestheader-username-headers` 指定请求头中的用户名头。 +- `--enable-aggregator-routing` 启用聚合路由。 + +整个配置文件为Kubernetes API Server提供了必要的参数,以便正确地启动和运行。 +``` + + +### 6.1.4启动apiserver(所有master节点) + +```shell +systemctl daemon-reload +# 用于重新加载systemd管理的单位文件。当你新增或修改了某个单位文件(如.service文件、.socket文件等),需要运行该命令来刷新systemd对该文件的配置。 + +systemctl enable --now kube-apiserver.service +# 启用并立即启动kube-apiserver.service单元。kube-apiserver.service是kube-apiserver守护进程的systemd服务单元。 + +systemctl restart kube-apiserver.service +# 重启kube-apiserver.service单元,即重新启动etcd守护进程。 + +systemctl status kube-apiserver.service +# kube-apiserver.service单元的当前状态,包括运行状态、是否启用等信息。 +``` + +## 6.2.配置kube-controller-manager service +```shell +# 所有master节点配置,且配置相同 +# 172.16.0.0/12为pod网段,按需求设置你自己的网段 + +cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF + +[Unit] +Description=Kubernetes Controller Manager +Documentation=https://github.com/kubernetes/kubernetes +After=network.target + +[Service] +ExecStart=/usr/local/bin/kube-controller-manager \\ + --v=2 \\ + --bind-address=0.0.0.0 \\ + --root-ca-file=/etc/kubernetes/pki/ca.pem \\ + --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \\ + --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \\ + --service-account-private-key-file=/etc/kubernetes/pki/sa.key \\ + --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \\ + --leader-elect=true \\ + --use-service-account-credentials=true \\ + --node-monitor-grace-period=40s \\ + --node-monitor-period=5s \\ + --controllers=*,bootstrapsigner,tokencleaner \\ + --allocate-node-cidrs=true \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ + --cluster-cidr=172.16.0.0/12,fc00:2222::/112 \\ + --node-cidr-mask-size-ipv4=24 \\ + --node-cidr-mask-size-ipv6=120 \\ + --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem + +Restart=always +RestartSec=10s + +[Install] +WantedBy=multi-user.target + +EOF +``` +参数 +```shell +这是一个用于启动 Kubernetes 控制器管理器的 systemd 服务单元文件。下面是对每个部分的详细解释: + +[Unit]:单元的基本信息部分,用于描述和标识这个服务单元。 +Description:服务单元的描述信息,说明了该服务单元的作用,这里是 Kubernetes 控制器管理器。 +Documentation:可选项,提供了关于该服务单元的文档链接。 +After:定义了该服务单元在哪些其他单元之后启动,这里是 network.target,即在网络服务启动之后启动。 + +[Service]:定义了服务的运行参数和行为。 +ExecStart:指定服务启动时执行的命令,这里是 /usr/local/bin/kube-controller-manager,并通过后续的行继续传递了一系列的参数设置。 +Restart:定义了服务在退出后的重新启动策略,这里设置为 always,表示总是重新启动服务。 +RestartSec:定义了重新启动服务的时间间隔,这里设置为 10 秒。 + +[Install]:定义了如何安装和启用服务单元。 +WantedBy:指定了服务单元所属的 target,这里是 multi-user.target,表示启动多用户模式下的服务。 +在 ExecStart 中传递的参数说明如下: + +--v=2:设置日志的详细级别为 2。 +--bind-address=0.0.0.0:绑定的 IP 地址,用于监听 Kubernetes 控制平面的请求,这里设置为 0.0.0.0,表示监听所有网络接口上的请求。 +--root-ca-file:根证书文件的路径,用于验证其他组件的证书。 +--cluster-signing-cert-file:用于签名集群证书的证书文件路径。 +--cluster-signing-key-file:用于签名集群证书的私钥文件路径。 +--service-account-private-key-file:用于签名服务账户令牌的私钥文件路径。 +--kubeconfig:kubeconfig 文件的路径,包含了与 Kubernetes API 服务器通信所需的配置信息。 +--leader-elect=true:启用 Leader 选举机制,确保只有一个控制器管理器作为 leader 在运行。 +--use-service-account-credentials=true:使用服务账户的凭据进行认证和授权。 +--node-monitor-grace-period=40s:节点监控的优雅退出时间,节点长时间不响应时会触发节点驱逐。 +--node-monitor-period=5s:节点监控的检测周期,用于检测节点是否正常运行。 +--controllers:指定要运行的控制器类型,在这里使用了通配符 *,表示运行所有的控制器,同时还包括了 bootstrapsigner 和 tokencleaner 控制器。 +--allocate-node-cidrs=true:为节点分配 CIDR 子网,用于分配 Pod 网络地址。 +--service-cluster-ip-range:定义 Service 的 IP 范围,这里设置为 10.96.0.0/12 和 fd00::/108。 +--cluster-cidr:定义集群的 CIDR 范围,这里设置为 172.16.0.0/12 和 fc00::/48。 +--node-cidr-mask-size-ipv4:分配给每个节点的 IPv4 子网掩码大小,默认是 24。 +--node-cidr-mask-size-ipv6:分配给每个节点的 IPv6 子网掩码大小,默认是 120。 +--requestheader-client-ca-file:设置请求头中客户端 CA 的证书文件路径,用于认证请求头中的 CA 证书。 + +这个服务单元文件描述了 Kubernetes 控制器管理器的启动参数和行为,并且定义了服务的依赖关系和重新启动策略。通过 systemd 启动该服务单元,即可启动 Kubernetes 控制器管理器组件。 +``` +### 6.2.1启动kube-controller-manager,并查看状态 + +```shell +systemctl daemon-reload +# 用于重新加载systemd管理的单位文件。当你新增或修改了某个单位文件(如.service文件、.socket文件等),需要运行该命令来刷新systemd对该文件的配置。 + +systemctl enable --now kube-controller-manager.service +# 启用并立即启动kube-controller-manager.service单元。kube-controller-manager.service是kube-controller-manager守护进程的systemd服务单元。 + +systemctl restart kube-controller-manager.service +# 重启kube-controller-manager.service单元,即重新启动etcd守护进程。 + +systemctl status kube-controller-manager.service +# kube-controller-manager.service单元的当前状态,包括运行状态、是否启用等信息。 +``` + +## 6.3.配置kube-scheduler service + +### 6.3.1所有master节点配置,且配置相同 + +```shell +cat > /usr/lib/systemd/system/kube-scheduler.service << EOF + +[Unit] +Description=Kubernetes Scheduler +Documentation=https://github.com/kubernetes/kubernetes +After=network.target + +[Service] +ExecStart=/usr/local/bin/kube-scheduler \\ + --v=2 \\ + --bind-address=0.0.0.0 \\ + --leader-elect=true \\ + --kubeconfig=/etc/kubernetes/scheduler.kubeconfig + +Restart=always +RestartSec=10s + +[Install] +WantedBy=multi-user.target + +EOF +``` +参数 +```shell +这是一个用于启动 Kubernetes 调度器的 systemd 服务单元文件。下面是对每个部分的详细解释: + +[Unit]:单元的基本信息部分,用于描述和标识这个服务单元。 +Description:服务单元的描述信息,说明了该服务单元的作用,这里是 Kubernetes 调度器。 +Documentation:可选项,提供了关于该服务单元的文档链接。 +After:定义了该服务单元在哪些其他单元之后启动,这里是 network.target,即在网络服务启动之后启动。 + +[Service]:定义了服务的运行参数和行为。 +ExecStart:指定服务启动时执行的命令,这里是 /usr/local/bin/kube-scheduler,并通过后续的行继续传递了一系列的参数设置。 +Restart:定义了服务在退出后的重新启动策略,这里设置为 always,表示总是重新启动服务。 +RestartSec:定义了重新启动服务的时间间隔,这里设置为 10 秒。 + +[Install]:定义了如何安装和启用服务单元。 +WantedBy:指定了服务单元所属的 target,这里是 multi-user.target,表示启动多用户模式下的服务。 + +在 ExecStart 中传递的参数说明如下: + +--v=2:设置日志的详细级别为 2。 +--bind-address=0.0.0.0:绑定的 IP 地址,用于监听 Kubernetes 控制平面的请求,这里设置为 0.0.0.0,表示监听所有网络接口上的请求。 +--leader-elect=true:启用 Leader 选举机制,确保只有一个调度器作为 leader 在运行。 +--kubeconfig=/etc/kubernetes/scheduler.kubeconfig:kubeconfig 文件的路径,包含了与 Kubernetes API 服务器通信所需的配置信息。 + +这个服务单元文件描述了 Kubernetes 调度器的启动参数和行为,并且定义了服务的依赖关系和重新启动策略。通过 systemd 启动该服务单元,即可启动 Kubernetes 调度器组件。 +``` +### 6.3.2启动并查看服务状态 + +```shell +systemctl daemon-reload +# 用于重新加载systemd管理的单位文件。当你新增或修改了某个单位文件(如.service文件、.socket文件等),需要运行该命令来刷新systemd对该文件的配置。 + +systemctl enable --now kube-scheduler.service +# 启用并立即启动kube-scheduler.service单元。kube-scheduler.service是kube-scheduler守护进程的systemd服务单元。 + +systemctl restart kube-scheduler.service +# 重启kube-scheduler.service单元,即重新启动etcd守护进程。 + +systemctl status kube-scheduler.service +# kube-scheduler.service单元的当前状态,包括运行状态、是否启用等信息。 +``` + +# 7.TLS Bootstrapping配置 + +## 7.1在master01上配置 + +```shell +# 在《5.高可用配置》选择使用那种高可用方案 +# 若使用 haproxy、keepalived 那么为 `--server=https://192.168.0.36:8443` +# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443` + +cd bootstrap + +kubectl config set-cluster kubernetes \ +--certificate-authority=/etc/kubernetes/pki/ca.pem \ +--embed-certs=true --server=https://127.0.0.1:8443 \ +--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig +# 这是一个使用 kubectl 命令设置 Kubernetes 集群配置的命令示例。下面是对每个选项的详细解释: +# +# config set-cluster kubernetes:指定要设置的集群名称为 "kubernetes",表示要修改名为 "kubernetes" 的集群配置。 +# --certificate-authority=/etc/kubernetes/pki/ca.pem:指定证书颁发机构(CA)的证书文件路径,用于验证服务器证书的有效性。 +# --embed-certs=true:将证书文件嵌入到生成的 kubeconfig 文件中。这样可以避免在 kubeconfig 文件中引用外部证书文件。 +# --server=https://127.0.0.1:8443:指定 Kubernetes API 服务器的地址和端口,这里使用的是 https 协议和本地地址(127.0.0.1),端口号为 8443。你可以根据实际环境修改该参数。 +# --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig:指定 kubeconfig 文件的路径和名称,这里是 /etc/kubernetes/bootstrap-kubelet.kubeconfig。 +# 通过执行此命令,你可以设置名为 "kubernetes" 的集群配置,并提供 CA 证书、API 服务器地址和端口,并将这些配置信息嵌入到 bootstrap-kubelet.kubeconfig 文件中。这个 kubeconfig 文件可以用于认证和授权 kubelet 组件与 Kubernetes API 服务器之间的通信。请确保路径和文件名与实际环境中的配置相匹配。 + +kubectl config set-credentials tls-bootstrap-token-user \ +--token=c8ad9c.2e4d610cf3e7426e \ +--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig +# 这是一个使用 kubectl 命令设置凭证信息的命令示例。下面是对每个选项的详细解释: +# +# config set-credentials tls-bootstrap-token-user:指定要设置的凭证名称为 "tls-bootstrap-token-user",表示要修改名为 "tls-bootstrap-token-user" 的用户凭证配置。 +# --token=c8ad9c.2e4d610cf3e7426e:指定用户的身份验证令牌(token)。在这个示例中,令牌是 c8ad9c.2e4d610cf3e7426e。你可以根据实际情况修改该令牌。 +# --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig:指定 kubeconfig 文件的路径和名称,这里是 /etc/kubernetes/bootstrap-kubelet.kubeconfig。 +# 通过执行此命令,你可以设置名为 "tls-bootstrap-token-user" 的用户凭证,并将令牌信息加入到 bootstrap-kubelet.kubeconfig 文件中。这个 kubeconfig 文件可以用于认证和授权 kubelet 组件与 Kubernetes API 服务器之间的通信。请确保路径和文件名与实际环境中的配置相匹配。 + +kubectl config set-context tls-bootstrap-token-user@kubernetes \ +--cluster=kubernetes \ +--user=tls-bootstrap-token-user \ +--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig +# 这是一个使用 kubectl 命令设置上下文信息的命令示例。下面是对每个选项的详细解释: +# +# config set-context tls-bootstrap-token-user@kubernetes:指定要设置的上下文名称为 "tls-bootstrap-token-user@kubernetes",表示要修改名为 "tls-bootstrap-token-user@kubernetes" 的上下文配置。 +# --cluster=kubernetes:指定上下文关联的集群名称为 "kubernetes",表示使用名为 "kubernetes" 的集群配置。 +# --user=tls-bootstrap-token-user:指定上下文关联的用户凭证名称为 "tls-bootstrap-token-user",表示使用名为 "tls-bootstrap-token-user" 的用户凭证配置。 +# --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig:指定 kubeconfig 文件的路径和名称,这里是 /etc/kubernetes/bootstrap-kubelet.kubeconfig。 +# 通过执行此命令,你可以设置名为 "tls-bootstrap-token-user@kubernetes" 的上下文,并将其关联到名为 "kubernetes" 的集群配置和名为 "tls-bootstrap-token-user" 的用户凭证配置。这样,bootstrap-kubelet.kubeconfig 文件就包含了完整的上下文信息,可以用于指定与 Kubernetes 集群建立连接时要使用的集群和凭证。请确保路径和文件名与实际环境中的配置相匹配。 + +kubectl config use-context tls-bootstrap-token-user@kubernetes \ +--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig +# 这是一个使用 kubectl 命令设置当前上下文的命令示例。下面是对每个选项的详细解释: +# +# config use-context tls-bootstrap-token-user@kubernetes:指定要使用的上下文名称为 "tls-bootstrap-token-user@kubernetes",表示要将当前上下文切换为名为 "tls-bootstrap-token-user@kubernetes" 的上下文。 +# --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig:指定 kubeconfig 文件的路径和名称,这里是 /etc/kubernetes/bootstrap-kubelet.kubeconfig。 +# 通过执行此命令,你可以将当前上下文设置为名为 "tls-bootstrap-token-user@kubernetes" 的上下文。这样,当你执行其他 kubectl 命令时,它们将使用该上下文与 Kubernetes 集群进行交互。请确保路径和文件名与实际环境中的配置相匹配。 + + +# token的位置在bootstrap.secret.yaml,如果修改的话到这个文件修改 +mkdir -p /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config +``` + +## 7.2查看集群状态,没问题的话继续后续操作 + +```shell +kubectl get cs +Warning: v1 ComponentStatus is deprecated in v1.19+ +NAME STATUS MESSAGE ERROR +scheduler Healthy ok +controller-manager Healthy ok +etcd-0 Healthy {"health":"true","reason":""} +etcd-2 Healthy {"health":"true","reason":""} +etcd-1 Healthy {"health":"true","reason":""} + +# 切记执行,别忘记!!! +kubectl create -f bootstrap.secret.yaml +``` + +# 8.node节点配置 + +## 8.1.在master01上将证书复制到node节点 + +```shell +cd /etc/kubernetes/ + +for NODE in k8s-master02 k8s-master03 k8s-node01 k8s-node02; do ssh $NODE mkdir -p /etc/kubernetes/pki; for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig kube-proxy.kubeconfig; do scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}; done; done +``` + +## 8.2.kubelet配置 + +**注意 : 8.2.1 和 8.2.2 需要和 上方 2.1 和 2.2 对应起来** + +### 8.2.1当使用docker作为Runtime + +```shell +cat > /usr/lib/systemd/system/kubelet.service << EOF + +[Unit] +Description=Kubernetes Kubelet +Documentation=https://github.com/kubernetes/kubernetes + +[Service] +ExecStart=/usr/local/bin/kubelet \\ + --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \\ + --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\ + --config=/etc/kubernetes/kubelet-conf.yml \\ + --container-runtime-endpoint=unix:///run/cri-dockerd.sock \\ + --node-labels=node.kubernetes.io/node= + +[Install] +WantedBy=multi-user.target +EOF + +# 这是一个表示 Kubernetes Kubelet 服务的 systemd 单位文件示例。下面是对每个节([Unit]、[Service]、[Install])的详细解释: +# +# [Unit] +# +# Description=Kubernetes Kubelet:指定了此单位文件对应的服务描述信息为 "Kubernetes Kubelet"。 +# Documentation=...:指定了对该服务的文档链接。 +# [Service] +# +# ExecStart=/usr/local/bin/kubelet ...:指定了启动 Kubelet 服务的命令和参数。这里使用的是 /usr/local/bin/kubelet 命令,并传递了一系列参数来配置 Kubelet 的运行。这些参数包括: +# --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig:指定了用于引导 kubelet 的 kubeconfig 文件的路径和名称。 +# --kubeconfig=/etc/kubernetes/kubelet.kubeconfig:指定了 kubelet 的 kubeconfig 文件的路径和名称。 +# --config=/etc/kubernetes/kubelet-conf.yml:指定了 kubelet 的配置文件的路径和名称。 +# --container-runtime-endpoint=unix:///run/cri-dockerd.sock:指定了容器运行时接口的端点地址,这里使用的是 Docker 运行时(cri-dockerd)的 UNIX 套接字。 +# --node-labels=node.kubernetes.io/node=:指定了节点的标签。这里的示例只给节点添加了一个简单的标签 node.kubernetes.io/node=。 +# [Install] +# +# WantedBy=multi-user.target:指定了在 multi-user.target 被启动时,该服务应该被启用。 +# 通过这个单位文件,你可以配置 Kubelet 服务的启动参数,指定相关的配置文件和凭证文件,以及定义节点的标签。请确认路径和文件名与你的实际环境中的配置相匹配。 +``` + +### 8.2.2当使用Containerd作为Runtime (推荐) + +```shell +mkdir -p /var/lib/kubelet /var/log/kubernetes /etc/systemd/system/kubelet.service.d /etc/kubernetes/manifests/ + +# 所有k8s节点配置kubelet service +cat > /usr/lib/systemd/system/kubelet.service << EOF + +[Unit] +Description=Kubernetes Kubelet +Documentation=https://github.com/kubernetes/kubernetes +After=containerd.service +Requires=containerd.service + +[Service] +ExecStart=/usr/local/bin/kubelet \\ + --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \\ + --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\ + --config=/etc/kubernetes/kubelet-conf.yml \\ + --container-runtime-endpoint=unix:///run/containerd/containerd.sock \\ + --node-labels=node.kubernetes.io/node= + +[Install] +WantedBy=multi-user.target +EOF + +# 这是一个表示 Kubernetes Kubelet 服务的 systemd 单位文件示例。与之前相比,添加了 After 和 Requires 字段来指定依赖关系。 +# +# [Unit] +# +# Description=Kubernetes Kubelet:指定了此单位文件对应的服务描述信息为 "Kubernetes Kubelet"。 +# Documentation=...:指定了对该服务的文档链接。 +# After=containerd.service:指定了该服务在 containerd.service 服务之后启动。这表示 Kubelet 服务依赖于 containerd 服务的启动。 +# Requires=containerd.service:指定了该服务需要 containerd.service 服务存在。这表示 Kubelet 服务依赖于 containerd 服务的存在。 +# [Service] +# +# ExecStart=/usr/local/bin/kubelet ...:指定了启动 Kubelet 服务的命令和参数,与之前的示例相同。 +# --container-runtime-endpoint=unix:///run/containerd/containerd.sock:修改了容器运行时接口的端点地址,将其更改为使用 containerd 运行时(通过 UNIX 套接字)。 +# [Install] +# +# WantedBy=multi-user.target:指定了在 multi-user.target 被启动时,该服务应该被启用。 +# 通过这个单位文件,你可以配置 Kubelet 服务的启动参数,并指定了它依赖的 containerd 服务。确保路径和文件名与你实际环境中的配置相匹配。 +``` + + +### 8.2.3所有k8s节点创建kubelet的配置文件 + +```shell +cat > /etc/kubernetes/kubelet-conf.yml < 18s v1.28.0 +k8s-master02 Ready 16s v1.28.0 +k8s-master03 Ready 16s v1.28.0 +k8s-node01 Ready 14s v1.28.0 +k8s-node02 Ready 14s v1.28.0 +[root@k8s-master01 ~]# +``` + +### 8.2.6查看容器运行时 + +```shell +[root@k8s-master01 ~]# kubectl describe node | grep Runtime + Container Runtime Version: containerd://1.7.3 + Container Runtime Version: containerd://1.7.3 + Container Runtime Version: containerd://1.7.3 + Container Runtime Version: containerd://1.7.3 + Container Runtime Version: containerd://1.7.3 +[root@k8s-master01 ~]# kubectl describe node | grep Runtime + Container Runtime Version: docker://24.0.5 + Container Runtime Version: docker://24.0.5 + Container Runtime Version: docker://24.0.5 + Container Runtime Version: docker://24.0.5 + Container Runtime Version: docker://24.0.5 + +``` + + +## 8.3.kube-proxy配置 + +### 8.3.1将kubeconfig发送至其他节点 + +```shell +# master-1执行 +for NODE in k8s-master02 k8s-master03 k8s-node01 k8s-node02; do scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig; done +``` + +### 8.3.2所有k8s节点添加kube-proxy的service文件 + +```shell +cat > /usr/lib/systemd/system/kube-proxy.service << EOF +[Unit] +Description=Kubernetes Kube Proxy +Documentation=https://github.com/kubernetes/kubernetes +After=network.target + +[Service] +ExecStart=/usr/local/bin/kube-proxy \\ + --config=/etc/kubernetes/kube-proxy.yaml \\ + --v=2 +Restart=always +RestartSec=10s + +[Install] +WantedBy=multi-user.target + +EOF + +# 这是一个 systemd 服务单元文件的示例,用于配置 Kubernetes Kube Proxy 服务。下面是对其中一些字段的详细解释: +# +# [Unit] +# +# Description: 描述了该服务单元的用途,这里是 Kubernetes Kube Proxy。 +# Documentation: 指定了该服务单元的文档地址,即 https://github.com/kubernetes/kubernetes。 +# After: 指定该服务单元应在 network.target(网络目标)之后启动。 +# [Service] +# +# ExecStart: 指定了启动 Kube Proxy 服务的命令。通过 /usr/local/bin/kube-proxy 命令启动,并指定了配置文件的路径为 /etc/kubernetes/kube-proxy.yaml,同时指定了日志级别为 2。 +# Restart: 配置了服务在失败或退出后自动重启。 +# RestartSec: 配置了重启间隔,这里是每次重启之间的等待时间为 10 秒。 +# [Install] +# +# WantedBy: 指定了该服务单元的安装目标为 multi-user.target(多用户目标),表示该服务将在多用户模式下启动。 +# 通过配置这些字段,你可以启动和管理 Kubernetes Kube Proxy 服务。请注意,你需要根据实际情况修改 ExecStart 中的路径和文件名,确保与你的环境一致。另外,可以根据需求修改其他字段的值,以满足你的特定要求。 +``` + +### 8.3.3所有k8s节点添加kube-proxy的配置 +```shell +cat > /etc/kubernetes/kube-proxy.yaml << EOF +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +bindAddress: 0.0.0.0 +clientConnection: + acceptContentTypes: "" + burst: 10 + contentType: application/vnd.kubernetes.protobuf + kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig + qps: 5 +clusterCIDR: 172.16.0.0/12,fc00:2222::/112 +configSyncPeriod: 15m0s +conntrack: + max: null + maxPerCore: 32768 + min: 131072 + tcpCloseWaitTimeout: 1h0m0s + tcpEstablishedTimeout: 24h0m0s +enableProfiling: false +healthzBindAddress: 0.0.0.0:10256 +hostnameOverride: "" +iptables: + masqueradeAll: false + masqueradeBit: 14 + minSyncPeriod: 0s + syncPeriod: 30s +ipvs: + masqueradeAll: true + minSyncPeriod: 5s + scheduler: "rr" + syncPeriod: 30s +kind: KubeProxyConfiguration +metricsBindAddress: 127.0.0.1:10249 +mode: "ipvs" +nodePortAddresses: null +oomScoreAdj: -999 +portRange: "" +udpIdleTimeout: 250ms +EOF + +# 这是一个Kubernetes的kube-proxy组件配置文件示例。以下是每个配置项的详细解释: +# +# 1. apiVersion: kubeproxy.config.k8s.io/v1alpha1 +# - 指定该配置文件的API版本。 +# +# 2. bindAddress: 0.0.0.0 +# - 指定kube-proxy使用的监听地址。0.0.0.0表示监听所有网络接口。 +# +# 3. clientConnection: +# - 客户端连接配置项。 +# +# a. acceptContentTypes: "" +# - 指定接受的内容类型。 +# +# b. burst: 10 +# - 客户端请求超出qps设置时的最大突发请求数。 +# +# c. contentType: application/vnd.kubernetes.protobuf +# - 指定客户端请求的内容类型。 +# +# d. kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig +# - kube-proxy使用的kubeconfig文件路径。 +# +# e. qps: 5 +# - 每秒向API服务器发送的请求数量。 +# +# 4. clusterCIDR: 172.16.0.0/12,fc00:2222::/112 +# - 指定集群使用的CIDR范围,用于自动分配Pod IP。 +# +# 5. configSyncPeriod: 15m0s +# - 指定kube-proxy配置同步到节点的频率。 +# +# 6. conntrack: +# - 连接跟踪设置。 +# +# a. max: null +# - 指定连接跟踪的最大值。 +# +# b. maxPerCore: 32768 +# - 指定每个核心的最大连接跟踪数。 +# +# c. min: 131072 +# - 指定最小的连接跟踪数。 +# +# d. tcpCloseWaitTimeout: 1h0m0s +# - 指定处于CLOSE_WAIT状态的TCP连接的超时时间。 +# +# e. tcpEstablishedTimeout: 24h0m0s +# - 指定已建立的TCP连接的超时时间。 +# +# 7. enableProfiling: false +# - 是否启用性能分析。 +# +# 8. healthzBindAddress: 0.0.0.0:10256 +# - 指定健康检查监听地址和端口。 +# +# 9. hostnameOverride: "" +# - 指定覆盖默认主机名的值。 +# +# 10. iptables: +# - iptables设置。 +# +# a. masqueradeAll: false +# - 是否对所有流量使用IP伪装。 +# +# b. masqueradeBit: 14 +# - 指定伪装的Bit标记。 +# +# c. minSyncPeriod: 0s +# - 指定同步iptables规则的最小间隔。 +# +# d. syncPeriod: 30s +# - 指定同步iptables规则的时间间隔。 +# +# 11. ipvs: +# - ipvs设置。 +# +# a. masqueradeAll: true +# - 是否对所有流量使用IP伪装。 +# +# b. minSyncPeriod: 5s +# - 指定同步ipvs规则的最小间隔。 +# +# c. scheduler: "rr" +# - 指定ipvs默认使用的调度算法。 +# +# d. syncPeriod: 30s +# - 指定同步ipvs规则的时间间隔。 +# +# 12. kind: KubeProxyConfiguration +# - 指定该配置文件的类型。 +# +# 13. metricsBindAddress: 127.0.0.1:10249 +# - 指定指标绑定的地址和端口。 +# +# 14. mode: "ipvs" +# - 指定kube-proxy的模式。这里指定为ipvs,使用IPVS代理模式。 +# +# 15. nodePortAddresses: null +# - 指定可用于NodePort的网络地址。 +# +# 16. oomScoreAdj: -999 +# - 指定kube-proxy的OOM优先级。 +# +# 17. portRange: "" +# - 指定可用于服务端口范围。 +# +# 18. udpIdleTimeout: 250ms +# - 指定UDP连接的空闲超时时间。 +``` + +### 8.3.4启动kube-proxy + +```shell + systemctl daemon-reload +# 用于重新加载systemd管理的单位文件。当你新增或修改了某个单位文件(如.service文件、.socket文件等),需要运行该命令来刷新systemd对该文件的配置。 + +systemctl enable --now kube-proxy.service +# 启用并立即启动kube-proxy.service单元。kube-proxy.service是kube-proxy守护进程的systemd服务单元。 + +systemctl restart kube-proxy.service +# 重启kube-proxy.service单元,即重新启动kube-proxy守护进程。 + +systemctl status kube-proxy.service +# kube-proxy.service单元的当前状态,包括运行状态、是否启用等信息。 +``` + +# 9.安装网络插件 + +**注意 9.1 和 9.2 二选其一即可,建议在此处创建好快照后在进行操作,后续出问题可以回滚** + +** centos7 要升级libseccomp 不然 无法安装网络插件** + +```shell +# https://github.com/opencontainers/runc/releases +# 升级runc +# wget https://ghproxy.com/https://github.com/opencontainers/runc/releases/download/v1.1.9/runc.amd64 + +install -m 755 runc.amd64 /usr/local/sbin/runc +cp -p /usr/local/sbin/runc /usr/local/bin/runc +cp -p /usr/local/sbin/runc /usr/bin/runc + +#下载高于2.4以上的包 +yum -y install http://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/libseccomp-2.5.1-1.el8.x86_64.rpm +# 清华源 +yum -y install https://mirrors.tuna.tsinghua.edu.cn/centos/8-stream/BaseOS/x86_64/os/Packages/libseccomp-2.5.1-1.el8.x86_64.rpm + +#查看当前版本 +[root@k8s-master-1 ~]# rpm -qa | grep libseccomp +libseccomp-2.5.1-1.el8.x86_64 +``` + +## 9.1安装Calico + +### 9.1.1更改calico网段 + +```shell +wget https://mirrors.chenby.cn/https://github.com/projectcalico/calico/blob/master/manifests/calico-typha.yaml + +cp calico-typha.yaml calico.yaml +cp calico-typha.yaml calico-ipv6.yaml + +vim calico.yaml +# calico-config ConfigMap处 + "ipam": { + "type": "calico-ipam", + }, + - name: IP + value: "autodetect" + + - name: CALICO_IPV4POOL_CIDR + value: "172.16.0.0/12" + +# vim calico-ipv6.yaml +# calico-config ConfigMap处 + "ipam": { + "type": "calico-ipam", + "assign_ipv4": "true", + "assign_ipv6": "true" + }, + - name: IP + value: "autodetect" + + - name: IP6 + value: "autodetect" + + - name: CALICO_IPV4POOL_CIDR + value: "172.16.0.0/12" + + - name: CALICO_IPV6POOL_CIDR + value: "fc00:2222::/112" + + - name: FELIX_IPV6SUPPORT + value: "true" + + +# 若docker镜像拉不下来,可以使用国内的仓库 +sed -i "s#docker.io/calico/#m.daocloud.io/docker.io/calico/#g" calico.yaml +sed -i "s#docker.io/calico/#m.daocloud.io/docker.io/calico/#g" calico-ipv6.yaml + +sed -i "s#m.daocloud.io/docker.io/calico/#docker.io/calico/#g" calico.yaml +sed -i "s#m.daocloud.io/docker.io/calico/#docker.io/calico/#g" calico-ipv6.yaml + +# 本地没有公网 IPv6 使用 calico.yaml +kubectl apply -f calico.yaml + +# 本地有公网 IPv6 使用 calico-ipv6.yaml +# kubectl apply -f calico-ipv6.yaml + +``` + +### 9.1.2查看容器状态 + +```shell +# calico 初始化会很慢 需要耐心等待一下,大约十分钟左右 +[root@k8s-master01 ~]# kubectl get pod -A +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system calico-kube-controllers-6747f75cdc-fbvvc 1/1 Running 0 61s +kube-system calico-node-fs7hl 1/1 Running 0 61s +kube-system calico-node-jqz58 1/1 Running 0 61s +kube-system calico-node-khjlg 1/1 Running 0 61s +kube-system calico-node-wmf8q 1/1 Running 0 61s +kube-system calico-node-xc6gn 1/1 Running 0 61s +kube-system calico-typha-6cdc4b4fbc-57snb 1/1 Running 0 61s +``` + +## 9.2 安装cilium + +### 9.2.1 安装helm + +```shell +# [root@k8s-master01 ~]# curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 +# [root@k8s-master01 ~]# chmod 700 get_helm.sh +# [root@k8s-master01 ~]# ./get_helm.sh + +wget https://mirrors.huaweicloud.com/helm/v3.12.3/helm-v3.12.3-linux-amd64.tar.gz +tar xvf helm-*-linux-amd64.tar.gz +cp linux-amd64/helm /usr/local/bin/ +``` + +### 9.2.2 安装cilium + +```shell +# 添加源 +helm repo add cilium https://helm.cilium.io + +# 修改为国内源 +helm pull cilium/cilium +tar xvf cilium-*.tgz +cd cilium/ +sed -i "s#quay.io/#m.daocloud.io/quay.io/#g" values.yaml + +# 默认参数安装 +helm install cilium ./cilium/ -n kube-system + +# 启用ipv6 +# helm install cilium cilium/cilium --namespace kube-system --set ipv6.enabled=true + +# 启用路由信息和监控插件 +# helm install cilium cilium/cilium --namespace kube-system --set hubble.relay.enabled=true --set hubble.ui.enabled=true --set prometheus.enabled=true --set operator.prometheus.enabled=true --set hubble.enabled=true --set hubble.metrics.enabled="{dns,drop,tcp,flow,port-distribution,icmp,http}" + +``` + +### 9.2.3 查看 + +```shell +[root@k8s-master01 ~]# kubectl get pod -A | grep cil +kube-system cilium-gmr6c 1/1 Running 0 5m3s +kube-system cilium-kzgdj 1/1 Running 0 5m3s +kube-system cilium-operator-69b677f97c-6pw4k 1/1 Running 0 5m3s +kube-system cilium-operator-69b677f97c-xzzdk 1/1 Running 0 5m3s +kube-system cilium-q2rnr 1/1 Running 0 5m3s +kube-system cilium-smx5v 1/1 Running 0 5m3s +kube-system cilium-tdjq4 1/1 Running 0 5m3s +[root@k8s-master01 ~]# +``` + +### 9.2.4 下载专属监控面板 + +安装时候没有创建 监控可以忽略 + +```shell +[root@k8s-master01 yaml]# wget https://mirrors.chenby.cn/https://raw.githubusercontent.com/cilium/cilium/1.12.1/examples/kubernetes/addons/prometheus/monitoring-example.yaml + +[root@k8s-master01 yaml]# sed -i "s#docker.io/#m.daocloud.io/docker.io/#g" monitoring-example.yaml + +[root@k8s-master01 yaml]# kubectl apply -f monitoring-example.yaml +namespace/cilium-monitoring created +serviceaccount/prometheus-k8s created +configmap/grafana-config created +configmap/grafana-cilium-dashboard created +configmap/grafana-cilium-operator-dashboard created +configmap/grafana-hubble-dashboard created +configmap/prometheus created +clusterrole.rbac.authorization.k8s.io/prometheus created +clusterrolebinding.rbac.authorization.k8s.io/prometheus created +service/grafana created +service/prometheus created +deployment.apps/grafana created +deployment.apps/prometheus created +[root@k8s-master01 yaml]# +``` + +### 9.2.5 下载部署测试用例 + +说明 测试用例 需要在 安装CoreDNS 之后即可完成 + +```shell +wget https://mirrors.chenby.cn/https://raw.githubusercontent.com/cilium/cilium/master/examples/kubernetes/connectivity-check/connectivity-check.yaml + +sed -i "s#google.com#baidu.cn#g" connectivity-check.yaml +sed -i "s#quay.io/#m.daocloud.io/quay.io/#g" connectivity-check.yaml + +kubectl apply -f connectivity-check.yaml +``` + +### 9.2.6 查看pod + +```shell +[root@k8s-master01 yaml]# kubectl get pod -A +NAMESPACE NAME READY STATUS RESTARTS AGE +cilium-monitoring grafana-59957b9549-6zzqh 1/1 Running 0 10m +cilium-monitoring prometheus-7c8c9684bb-4v9cl 1/1 Running 0 10m +default chenby-75b5d7fbfb-7zjsr 1/1 Running 0 27h +default chenby-75b5d7fbfb-hbvr8 1/1 Running 0 27h +default chenby-75b5d7fbfb-ppbzg 1/1 Running 0 27h +default echo-a-6799dff547-pnx6w 1/1 Running 0 10m +default echo-b-fc47b659c-4bdg9 1/1 Running 0 10m +default echo-b-host-67fcfd59b7-28r9s 1/1 Running 0 10m +default host-to-b-multi-node-clusterip-69c57975d6-z4j2z 1/1 Running 0 10m +default host-to-b-multi-node-headless-865899f7bb-frrmc 1/1 Running 0 10m +default pod-to-a-allowed-cnp-5f9d7d4b9d-hcd8x 1/1 Running 0 10m +default pod-to-a-denied-cnp-65cc5ff97b-2rzb8 1/1 Running 0 10m +default pod-to-a-dfc64f564-p7xcn 1/1 Running 0 10m +default pod-to-b-intra-node-nodeport-677868746b-trk2l 1/1 Running 0 10m +default pod-to-b-multi-node-clusterip-76bbbc677b-knfq2 1/1 Running 0 10m +default pod-to-b-multi-node-headless-698c6579fd-mmvd7 1/1 Running 0 10m +default pod-to-b-multi-node-nodeport-5dc4b8cfd6-8dxmz 1/1 Running 0 10m +default pod-to-external-1111-8459965778-pjt9b 1/1 Running 0 10m +default pod-to-external-fqdn-allow-google-cnp-64df9fb89b-l9l4q 1/1 Running 0 10m +kube-system cilium-7rfj6 1/1 Running 0 56s +kube-system cilium-d4cch 1/1 Running 0 56s +kube-system cilium-h5x8r 1/1 Running 0 56s +kube-system cilium-operator-5dbddb6dbf-flpl5 1/1 Running 0 56s +kube-system cilium-operator-5dbddb6dbf-gcznc 1/1 Running 0 56s +kube-system cilium-t2xlz 1/1 Running 0 56s +kube-system cilium-z65z7 1/1 Running 0 56s +kube-system coredns-665475b9f8-jkqn8 1/1 Running 1 (36h ago) 36h +kube-system hubble-relay-59d8575-9pl9z 1/1 Running 0 56s +kube-system hubble-ui-64d4995d57-nsv9j 2/2 Running 0 56s +kube-system metrics-server-776f58c94b-c6zgs 1/1 Running 1 (36h ago) 37h +[root@k8s-master01 yaml]# +``` + +### 9.2.7 修改为NodePort + +安装时候没有创建 监控可以忽略 + +```shell +[root@k8s-master01 yaml]# kubectl edit svc -n kube-system hubble-ui +service/hubble-ui edited +[root@k8s-master01 yaml]# +[root@k8s-master01 yaml]# kubectl edit svc -n cilium-monitoring grafana +service/grafana edited +[root@k8s-master01 yaml]# +[root@k8s-master01 yaml]# kubectl edit svc -n cilium-monitoring prometheus +service/prometheus edited +[root@k8s-master01 yaml]# + +type: NodePort +``` + +### 9.2.8 查看端口 + +安装时候没有创建 监控可以忽略 + +```shell +[root@k8s-master01 yaml]# kubectl get svc -A | grep monit +cilium-monitoring grafana NodePort 10.100.250.17 3000:30707/TCP 15m +cilium-monitoring prometheus NodePort 10.100.131.243 9090:31155/TCP 15m +[root@k8s-master01 yaml]# +[root@k8s-master01 yaml]# kubectl get svc -A | grep hubble +kube-system hubble-metrics ClusterIP None 9965/TCP 5m12s +kube-system hubble-peer ClusterIP 10.100.150.29 443/TCP 5m12s +kube-system hubble-relay ClusterIP 10.109.251.34 80/TCP 5m12s +kube-system hubble-ui NodePort 10.102.253.59 80:31219/TCP 5m12s +[root@k8s-master01 yaml]# +``` + +### 9.2.9 访问 + +安装时候没有创建 监控可以忽略 + +```shell +http://192.168.0.31:30707 +http://192.168.0.31:31155 +http://192.168.0.31:31219 +``` + +# 10.安装CoreDNS + +## 10.1以下步骤只在master01操作 + +### 10.1.1修改文件 + +```shell +# 下载tgz包 +helm repo add coredns https://coredns.github.io/helm +helm pull coredns/coredns +tar xvf coredns-*.tgz +cd coredns/ + +# 修改IP地址 +vim values.yaml +cat values.yaml | grep clusterIP: +clusterIP: "10.96.0.10" + +# 示例 +--- +service: +# clusterIP: "" +# clusterIPs: [] +# loadBalancerIP: "" +# externalIPs: [] +# externalTrafficPolicy: "" +# ipFamilyPolicy: "" + # The name of the Service + # If not set, a name is generated using the fullname template + clusterIP: "10.96.0.10" + name: "" + annotations: {} +--- + +# 修改为国内源 docker源可选 +sed -i "s#coredns/#m.daocloud.io/docker.io/coredns/#g" values.yaml +sed -i "s#registry.k8s.io/#m.daocloud.io/registry.k8s.io/#g" values.yaml + +# 默认参数安装 +helm install coredns ./coredns/ -n kube-system +``` + +# 11.安装Metrics Server + +## 11.1以下步骤只在master01操作 + +### 11.1.1安装Metrics-server + +在新版的Kubernetes中系统资源的采集均使用Metrics-server,可以通过Metrics采集节点和Pod的内存、磁盘、CPU和网络的使用率 + +```shell +# 单机版 +wget https://mirrors.chenby.cn/https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml +# 高可用版本 +wget https://mirrors.chenby.cn/https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/high-availability.yaml + + +# 修改配置 +vim components.yaml +vim high-availability.yaml + +--- +# 1 +defaultArgs: + - --cert-dir=/tmp + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + - --kubelet-insecure-tls + - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem + - --requestheader-username-headers=X-Remote-User + - --requestheader-group-headers=X-Remote-Group + - --requestheader-extra-headers-prefix=X-Remote-Extra- + +# 2 + volumeMounts: + - mountPath: /tmp + name: tmp-dir + - name: ca-ssl + mountPath: /etc/kubernetes/pki + +# 3 + volumes: + - emptyDir: {} + name: tmp-dir + - name: ca-ssl + hostPath: + path: /etc/kubernetes/pki +--- + + +# 修改为国内源 docker源可选 +sed -i "s#registry.k8s.io/#m.daocloud.io/registry.k8s.io/#g" *.yaml + +# 二选一 +kubectl apply -f components.yaml +# kubectl apply -f high-availability.yaml + +``` + +### 11.1.2稍等片刻查看状态 + +```shell +kubectl top node +NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% +k8s-master01 197m 4% 1497Mi 39% +k8s-master02 152m 3% 1315Mi 34% +k8s-master03 112m 2% 1274Mi 33% +k8s-node01 142m 3% 777Mi 20% +k8s-node02 71m 1% 682Mi 17% +``` + +# 12.集群验证 + +## 12.1部署pod资源 + +```shell +cat< 443/TCP 17h + +# 进行解析 +kubectl exec busybox -n default -- nslookup kubernetes +3Server: 10.96.0.10 +Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local + +Name: kubernetes +Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local +``` + +## 12.3测试跨命名空间是否可以解析 + +```shell +# 查看有那些name +kubectl get svc -A +NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +default kubernetes ClusterIP 10.96.0.1 443/TCP 76m +kube-system calico-typha ClusterIP 10.105.100.82 5473/TCP 35m +kube-system coredns-coredns ClusterIP 10.96.0.10 53/UDP,53/TCP 8m14s +kube-system metrics-server ClusterIP 10.105.60.31 443/TCP 109s + +# 进行解析 +kubectl exec busybox -n default -- nslookup coredns-coredns.kube-system +Server: 10.96.0.10 +Address 1: 10.96.0.10 coredns-coredns.kube-system.svc.cluster.local + +Name: coredns-coredns.kube-system +Address 1: 10.96.0.10 coredns-coredns.kube-system.svc.cluster.local +[root@k8s-master01 metrics-server]# +``` + +## 12.4每个节点都必须要能访问Kubernetes的kubernetes svc 443和kube-dns的service 53 + +```shell +telnet 10.96.0.1 443 +Trying 10.96.0.1... +Connected to 10.96.0.1. +Escape character is '^]'. + + telnet 10.96.0.10 53 +Trying 10.96.0.10... +Connected to 10.96.0.10. +Escape character is '^]'. + +curl 10.96.0.10:53 +curl: (52) Empty reply from server +``` + +## 12.5Pod和Pod之前要能通 + +```shell +kubectl get po -owide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +busybox 1/1 Running 0 17m 172.27.14.193 k8s-node02 + +kubectl get po -n kube-system -owide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +calico-kube-controllers-76754ff848-pw4xg 1/1 Running 0 38m 172.25.244.193 k8s-master01 +calico-node-97m55 1/1 Running 0 38m 192.168.0.34 k8s-node01 +calico-node-hlz7j 1/1 Running 0 38m 192.168.0.32 k8s-master02 +calico-node-jtlck 1/1 Running 0 38m 192.168.0.33 k8s-master03 +calico-node-lxfkf 1/1 Running 0 38m 192.168.0.35 k8s-node02 +calico-node-t667x 1/1 Running 0 38m 192.168.0.31 k8s-master01 +calico-typha-59d75c5dd4-gbhfp 1/1 Running 0 38m 192.168.0.35 k8s-node02 +coredns-coredns-c5c6d4d9b-bd829 1/1 Running 0 10m 172.25.92.65 k8s-master02 +metrics-server-7c8b55c754-w7q8v 1/1 Running 0 3m56s 172.17.125.3 k8s-node01 + +# 进入busybox ping其他节点上的pod + +kubectl exec -ti busybox -- sh +/ # ping 192.168.0.34 +PING 192.168.0.34 (192.168.0.34): 56 data bytes +64 bytes from 192.168.0.34: seq=0 ttl=63 time=0.358 ms +64 bytes from 192.168.0.34: seq=1 ttl=63 time=0.668 ms +64 bytes from 192.168.0.34: seq=2 ttl=63 time=0.637 ms +64 bytes from 192.168.0.34: seq=3 ttl=63 time=0.624 ms +64 bytes from 192.168.0.34: seq=4 ttl=63 time=0.907 ms + +# 可以连通证明这个pod是可以跨命名空间和跨主机通信的 +``` + +## 12.6创建三个副本,可以看到3个副本分布在不同的节点上(用完可以删了) + +```shell +cat > deployments.yaml << EOF +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx + ports: + - containerPort: 80 + +EOF + +kubectl apply -f deployments.yaml +deployment.apps/nginx-deployment created + +kubectl get pod +NAME READY STATUS RESTARTS AGE +busybox 1/1 Running 0 6m25s +nginx-deployment-9456bbbf9-4bmvk 1/1 Running 0 8s +nginx-deployment-9456bbbf9-9rcdk 1/1 Running 0 8s +nginx-deployment-9456bbbf9-dqv8s 1/1 Running 0 8s + +# 删除nginx + +[root@k8s-master01 ~]# kubectl delete -f deployments.yaml +``` + +# 13.安装dashboard + +```shell +helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/ +helm install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --namespace kube-system +``` + +## 13.1更改dashboard的svc为NodePort,如果已是请忽略 + +```shell +kubectl edit svc kubernetes-dashboard -n kube-system + + type: NodePort +``` + +## 13.2查看端口号 + +```shell +kubectl get svc kubernetes-dashboard -n kube-system +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +kubernetes-dashboard NodePort 10.108.120.110 443:30034/TCP 34s +``` + +## 13.3创建token + +```shell +cat > dashboard-user.yaml << EOF +apiVersion: v1 +kind: ServiceAccount +metadata: + name: admin-user + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: admin-user +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: admin-user + namespace: kube-system +EOF + +kubectl apply -f dashboard-user.yaml + +# 创建token +kubectl -n kube-system create token admin-user +eyJhbGciOiJSUzI1NiIsImtpZCI6IksxY2U2U19KUWlRMzJSVXdtU2wzak1PdXpJYXVxQTBlbGJHUWlQZWN0ZU0ifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNjkyNDQ3NzA3LCJpYXQiOjE2OTI0NDQxMDcsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiNmE4MWEwY2ItM2U0Yi00ZTNhLTk0N2EtY2ViNDNkOTNjZmUzIn19LCJuYmYiOjE2OTI0NDQxMDcsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTphZG1pbi11c2VyIn0.Ww8zpmguHtxAuUn1EWtNCP2A-d25PGOYO3_FkHyOtj6f0iLm_HTvwM0InlTgIAWnfWMDOHzBLc9m1gYzoaC5efgBVtZkpy900NIhW_-yiQK3cMpiNasKOH7jiPnNMXNXczw3ElZWMqFYXkYRmQRVgVd6t0DmYK_TCXjDiZIU9jCzIDdSWDDI9nIieRGQwY8CzfEM9CKeYYC4a5wOG6t4ZuTcnRAYdZ1KZ7PZ1R73JLauessAtiDUArTIB2xWcWxy_b_J4-wXtsQyW5YOYOQ3Ie9NbERQj9wlprNSLhFqSxq-RUwizGBZ7z7t1RmW134DStU25uA4GkSJBQWK4b1cWA +``` + +## 13.3登录dashboard + +https://192.168.0.31:30034/ + +# 14.ingress安装 + +## 14.1执行部署 + +```shell +wget https://mirrors.chenby.cn/https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/cloud/deploy.yaml + +# 修改为国内源 docker源可选 +sed -i "s#registry.k8s.io/#m.daocloud.io/registry.k8s.io/#g" *.yaml + +cat > backend.yaml << EOF +apiVersion: apps/v1 +kind: Deployment +metadata: + name: default-http-backend + labels: + app.kubernetes.io/name: default-http-backend + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: default-http-backend + template: + metadata: + labels: + app.kubernetes.io/name: default-http-backend + spec: + terminationGracePeriodSeconds: 60 + containers: + - name: default-http-backend + image: registry.cn-hangzhou.aliyuncs.com/chenby/defaultbackend-amd64:1.5 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + ports: + - containerPort: 8080 + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: default-http-backend + namespace: kube-system + labels: + app.kubernetes.io/name: default-http-backend +spec: + ports: + - port: 80 + targetPort: 8080 + selector: + app.kubernetes.io/name: default-http-backend +EOF + +kubectl apply -f deploy.yaml +kubectl apply -f backend.yaml + + +cat > ingress-demo-app.yaml << EOF +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hello-server +spec: + replicas: 2 + selector: + matchLabels: + app: hello-server + template: + metadata: + labels: + app: hello-server + spec: + containers: + - name: hello-server + image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/hello-server + ports: + - containerPort: 9000 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: nginx-demo + name: nginx-demo +spec: + replicas: 2 + selector: + matchLabels: + app: nginx-demo + template: + metadata: + labels: + app: nginx-demo + spec: + containers: + - image: nginx + name: nginx +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: nginx-demo + name: nginx-demo +spec: + selector: + app: nginx-demo + ports: + - port: 8000 + protocol: TCP + targetPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: hello-server + name: hello-server +spec: + selector: + app: hello-server + ports: + - port: 8000 + protocol: TCP + targetPort: 9000 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ingress-host-bar +spec: + ingressClassName: nginx + rules: + - host: "hello.chenby.cn" + http: + paths: + - pathType: Prefix + path: "/" + backend: + service: + name: hello-server + port: + number: 8000 + - host: "demo.chenby.cn" + http: + paths: + - pathType: Prefix + path: "/nginx" + backend: + service: + name: nginx-demo + port: + number: 8000 +EOF + +# 等创建完成后在执行: +kubectl apply -f ingress-demo-app.yaml + +kubectl get ingress +NAME CLASS HOSTS ADDRESS PORTS AGE +ingress-host-bar nginx hello.chenby.cn,demo.chenby.cn 192.168.0.32 80 7s + +``` + +## 14.2过滤查看ingress端口 + +```shell +# 修改为nodeport +kubectl edit svc -n ingress-nginx ingress-nginx-controller +type: NodePort + +[root@hello ~/yaml]# kubectl get svc -A | grep ingress +ingress-nginx ingress-nginx-controller NodePort 10.104.231.36 80:32636/TCP,443:30579/TCP 104s +ingress-nginx ingress-nginx-controller-admission ClusterIP 10.101.85.88 443/TCP 105s +[root@hello ~/yaml]# +``` + +# 15.IPv6测试 + + + +```shell +#部署应用 + +cat< 80:30779/TCP 5s +[root@k8s-master01 ~]# + +#使用内网访问 +[root@localhost yaml]# curl -I http://[fd00::a29c] +HTTP/1.1 200 OK +Server: nginx/1.21.6 +Date: Thu, 05 May 2022 10:20:35 GMT +Content-Type: text/html +Content-Length: 615 +Last-Modified: Tue, 25 Jan 2022 15:03:52 GMT +Connection: keep-alive +ETag: "61f01158-267" +Accept-Ranges: bytes + +[root@localhost yaml]# curl -I http://192.168.0.31:30779 +HTTP/1.1 200 OK +Server: nginx/1.21.6 +Date: Thu, 05 May 2022 10:20:59 GMT +Content-Type: text/html +Content-Length: 615 +Last-Modified: Tue, 25 Jan 2022 15:03:52 GMT +Connection: keep-alive +ETag: "61f01158-267" +Accept-Ranges: bytes + +[root@localhost yaml]# + +#使用公网访问 +[root@localhost yaml]# curl -I http://[2409:8a10:9e18:9020::10]:30779 +HTTP/1.1 200 OK +Server: nginx/1.21.6 +Date: Thu, 05 May 2022 10:20:54 GMT +Content-Type: text/html +Content-Length: 615 +Last-Modified: Tue, 25 Jan 2022 15:03:52 GMT +Connection: keep-alive +ETag: "61f01158-267" +Accept-Ranges: bytes +``` + +# 16.安装命令行自动补全功能 + +```shell +yum install bash-completion -y +source /usr/share/bash-completion/bash_completion +source <(kubectl completion bash) +echo "source <(kubectl completion bash)" >> ~/.bashrc +``` + +# 附录 +```shell +# 镜像加速器可以使用DaoCloud仓库,替换规则如下 +cr.l5d.io/ ===> m.daocloud.io/cr.l5d.io/ +docker.elastic.co/ ===> m.daocloud.io/docker.elastic.co/ +docker.io/ ===> m.daocloud.io/docker.io/ +gcr.io/ ===> m.daocloud.io/gcr.io/ +ghcr.io/ ===> m.daocloud.io/ghcr.io/ +k8s.gcr.io/ ===> m.daocloud.io/k8s.gcr.io/ +mcr.microsoft.com/ ===> m.daocloud.io/mcr.microsoft.com/ +nvcr.io/ ===> m.daocloud.io/nvcr.io/ +quay.io/ ===> m.daocloud.io/quay.io/ +registry.jujucharms.com/ ===> m.daocloud.io/registry.jujucharms.com/ +registry.k8s.io/ ===> m.daocloud.io/registry.k8s.io/ +registry.opensource.zalan.do/ ===> m.daocloud.io/registry.opensource.zalan.do/ +rocks.canonical.com/ ===> m.daocloud.io/rocks.canonical.com/ + + + + +# 镜像版本要自行查看,因为镜像版本是随时更新的,文档无法做到实时更新 + +# docker pull 镜像 + +docker pull registry.cn-hangzhou.aliyuncs.com/chenby/cni:master +docker pull registry.cn-hangzhou.aliyuncs.com/chenby/node:master +docker pull registry.cn-hangzhou.aliyuncs.com/chenby/kube-controllers:master +docker pull registry.cn-hangzhou.aliyuncs.com/chenby/typha:master +docker pull registry.cn-hangzhou.aliyuncs.com/chenby/coredns:v1.10.0 +docker pull registry.cn-hangzhou.aliyuncs.com/chenby/pause:3.6 +docker pull registry.cn-hangzhou.aliyuncs.com/chenby/metrics-server:v0.5.2 +docker pull kubernetesui/dashboard:v2.7.0 +docker pull kubernetesui/metrics-scraper:v1.0.8 +docker pull quay.io/cilium/cilium:v1.12.6 +docker pull quay.io/cilium/certgen:v0.1.8 +docker pull quay.io/cilium/hubble-relay:v1.12.6 +docker pull quay.io/cilium/hubble-ui-backend:v0.9.2 +docker pull quay.io/cilium/hubble-ui:v0.9.2 +docker pull quay.io/cilium/cilium-etcd-operator:v2.0.7 +docker pull quay.io/cilium/operator:v1.12.6 +docker pull quay.io/cilium/clustermesh-apiserver:v1.12.6 +docker pull quay.io/coreos/etcd:v3.5.4 +docker pull quay.io/cilium/startup-script:d69851597ea019af980891a4628fb36b7880ec26 + +# docker 保存镜像 +docker save registry.cn-hangzhou.aliyuncs.com/chenby/cni:master -o cni.tar +docker save registry.cn-hangzhou.aliyuncs.com/chenby/node:master -o node.tar +docker save registry.cn-hangzhou.aliyuncs.com/chenby/typha:master -o typha.tar +docker save registry.cn-hangzhou.aliyuncs.com/chenby/kube-controllers:master -o kube-controllers.tar +docker save registry.cn-hangzhou.aliyuncs.com/chenby/coredns:v1.10.0 -o coredns.tar +docker save registry.cn-hangzhou.aliyuncs.com/chenby/pause:3.6 -o pause.tar +docker save registry.cn-hangzhou.aliyuncs.com/chenby/metrics-server:v0.5.2 -o metrics-server.tar +docker save kubernetesui/dashboard:v2.7.0 -o dashboard.tar +docker save kubernetesui/metrics-scraper:v1.0.8 -o metrics-scraper.tar +docker save quay.io/cilium/cilium:v1.12.6 -o cilium.tar +docker save quay.io/cilium/certgen:v0.1.8 -o certgen.tar +docker save quay.io/cilium/hubble-relay:v1.12.6 -o hubble-relay.tar +docker save quay.io/cilium/hubble-ui-backend:v0.9.2 -o hubble-ui-backend.tar +docker save quay.io/cilium/hubble-ui:v0.9.2 -o hubble-ui.tar +docker save quay.io/cilium/cilium-etcd-operator:v2.0.7 -o cilium-etcd-operator.tar +docker save quay.io/cilium/operator:v1.12.6 -o operator.tar +docker save quay.io/cilium/clustermesh-apiserver:v1.12.6 -o clustermesh-apiserver.tar +docker save quay.io/coreos/etcd:v3.5.4 -o etcd.tar +docker save quay.io/cilium/startup-script:d69851597ea019af980891a4628fb36b7880ec26 -o startup-script.tar + +# 传输到各个节点 +for NODE in k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02; do scp -r images/ $NODE:/root/ ; done + +# 创建命名空间 +ctr ns create k8s.io + +# 导入镜像 +ctr --namespace k8s.io image import images/cni.tar +ctr --namespace k8s.io image import images/node.tar +ctr --namespace k8s.io image import images/typha.tar +ctr --namespace k8s.io image import images/kube-controllers.tar +ctr --namespace k8s.io image import images/coredns.tar +ctr --namespace k8s.io image import images/pause.tar +ctr --namespace k8s.io image import images/metrics-server.tar +ctr --namespace k8s.io image import images/dashboard.tar +ctr --namespace k8s.io image import images/metrics-scraper.tar +ctr --namespace k8s.io image import images/dashboard.tar +ctr --namespace k8s.io image import images/metrics-scraper.tar +ctr --namespace k8s.io image import images/cilium.tar +ctr --namespace k8s.io image import images/certgen.tar +ctr --namespace k8s.io image import images/hubble-relay.tar +ctr --namespace k8s.io image import images/hubble-ui-backend.tar +ctr --namespace k8s.io image import images/hubble-ui.tar +ctr --namespace k8s.io image import images/cilium-etcd-operator.tar +ctr --namespace k8s.io image import images/operator.tar +ctr --namespace k8s.io image import images/clustermesh-apiserver.tar +ctr --namespace k8s.io image import images/etcd.tar +ctr --namespace k8s.io image import images/startup-script.tar + +# pull tar包 解压后 +helm pull cilium/cilium + +# 查看镜像版本 +root@hello:~/cilium# cat values.yaml| grep tag: -C1 + repository: "quay.io/cilium/cilium" + tag: "v1.12.6" + pullPolicy: "IfNotPresent" +-- + repository: "quay.io/cilium/certgen" + tag: "v0.1.8@sha256:4a456552a5f192992a6edcec2febb1c54870d665173a33dc7d876129b199ddbd" + pullPolicy: "IfNotPresent" +-- + repository: "quay.io/cilium/hubble-relay" + tag: "v1.12.6" + # hubble-relay-digest +-- + repository: "quay.io/cilium/hubble-ui-backend" + tag: "v0.9.2@sha256:a3ac4d5b87889c9f7cc6323e86d3126b0d382933bd64f44382a92778b0cde5d7" + pullPolicy: "IfNotPresent" +-- + repository: "quay.io/cilium/hubble-ui" + tag: "v0.9.2@sha256:d3596efc94a41c6b772b9afe6fe47c17417658956e04c3e2a28d293f2670663e" + pullPolicy: "IfNotPresent" +-- + repository: "quay.io/cilium/cilium-etcd-operator" + tag: "v2.0.7@sha256:04b8327f7f992693c2cb483b999041ed8f92efc8e14f2a5f3ab95574a65ea2dc" + pullPolicy: "IfNotPresent" +-- + repository: "quay.io/cilium/operator" + tag: "v1.12.6" + # operator-generic-digest +-- + repository: "quay.io/cilium/startup-script" + tag: "d69851597ea019af980891a4628fb36b7880ec26" + pullPolicy: "IfNotPresent" +-- + repository: "quay.io/cilium/cilium" + tag: "v1.12.6" + # cilium-digest +-- + repository: "quay.io/cilium/clustermesh-apiserver" + tag: "v1.12.6" + # clustermesh-apiserver-digest +-- + repository: "quay.io/coreos/etcd" + tag: "v3.5.4@sha256:795d8660c48c439a7c3764c2330ed9222ab5db5bb524d8d0607cac76f7ba82a3" + pullPolicy: "IfNotPresent" + +``` + +> **关于** +> +> https://www.oiox.cn/ +> +> https://www.oiox.cn/index.php/start-page.html +> +> **CSDN、GitHub、知乎、开源中国、思否、掘金、简书、华为云、阿里云、腾讯云、哔哩哔哩、今日头条、新浪微博、个人博客** +> +> **全网可搜《小陈运维》** +> +> **文章主要发布于微信公众号:《Linux运维交流社区》** diff --git a/images/1.jpg b/images/1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7b7a49e4a321e9e93550fe62a9a5864d84f989f GIT binary patch literal 115374 zcmeFa2Urwaw=G%-f?~h`DhM=6QUOt-#0Hfll0%a z2Nyqr<1jDhfdf3kJiG#e$4?w*M~IvfId+Qw*zseVH=$!uwz|~&hJjAef>%JqWl^OTz-DEmy$13I<{%RZV*}RWzm%ifp#BbR@*v@?5 z;30MnegVN_$4^K|N=cuQkv(@_MO95*D_2q1t{a%%zH`^i+``hq(aG7x)y>`S z;UoWm$ALi+kw_x({BG!G2d;hXTx9Reca9~ehJTZ%l<3#0X~Ug{>9CveQ()+tYHuS zU$yM-hW*p79%v^$9q2szy$}*2rUo5L-17VQZG+z!_>FF=8>oAa!f$p(rVaMe1J~-Xq%T9F19d=vPsaji>u*(^5LIxefF;R&(Q5+7@dFMab zg??LbP_=5RtBKo5gBnv)k#m2*WMet%f${(vq!{y@Lein0%<8&Kz2ucG7yl?m+{9b) zgn0nf*h!SMmOwUE@IGdq(~8rjLFM~tP;e5sQg`jmyvzF}$;z<6lGD@HFb(n)q(KKS ziB@~@tQ28?Tp5(|&8$lyTQ081JXg}2XWF{8lLk=Xs+oT8gvqeUH=3xUiLV? z#$v@iX&57J^p=3rxgWiruwF@n7O$Kjl-HCfyMVi6Xb_ubcjKFl(Y|SM6^E*tXf>+w zJPlfiqCq1W@bHPZG${Tcdi@jl+U58BU7oY=Tkbx=A!*Qdt>aNp{}xWs>A)=_lWq zuAThriE9I5t!Yn%Bw8lRg6pQVz|A-p=N+G2w$TUNLTD$xB3;cEV!WUYNy-aVa2dVHk|m2y+SnpJbBlhZ-Y-3483Te4MO zOyosMzR7&H$Sx^ixBqIA-_aP;ycm4ZuIuW~hFkjfw-yS2)2dE19pkbr&%FHERQaLy zF^4Hh?$qE3RBCgbYcJNu7~BnR4C1h!_MFS{%jeru1|i80>DE4Mg< zO1Dkqhi7Z*g>FUrx@LwmWADqbHS@Y2XmQNa%T+nIC(AbUW%kQgcl)LHFu6~CySFFt zq!6#pQe&FN8>M_ZFL&D|xBL>@4(XdlHRQ&!IWDae z2C1?^U)c8v7G4Rr{Fc)&lBw=sz$>Nq;>vL~$ycXlFW+TLR=;yRz+hE$3k|BzF{MG) z$Q3Ef$(Rpn71$-AYc!}=hq5d4G=k}K31aUD6;>2`JFA`w^!xZ>gCiIup@Rk;tx2PJ zi&A}7rv%Dq(5MbM_&~xc0ZXLs?1D$?+Uxd`rCcl6cvyWLF439H%HM|cTO8E4sz1oT zvybP?b>%NAdosSdW!yRHT4NlLo*5nrFp_dd-1YgbIaJ~-;R;_?xZ{_bysigd%a7!o z@5risPH7J(66rIbbj*-5w z%JM59B4bt&KmR4Y+7ls`?y0)Us%K<&O;3dt`hEPc0mqQH_!Gk2HY)ZY4N`Jn2(d-8 zBgYI2X%MD{2DNY(RnedWim8$`$iJCWPiKb?eNX&WkAP7B%=Yv@F1>D_Nn#T=)w8T* zR?)kZo_^=m)QLXh%y*nu9+zIdBo%1EB6**;=CXF-E%BP0d{zgH1K*rz7vzjdIw)yj zL>KfW$uN52`4N?4c0s8c6>0vXITs9ey=|Ha9C*REjyZ`dv+$6QthqbOpmyYfZ|Q8L zmcZKH>1@k`l7T~Umew|!5r!`+%XG%)5p_N?Ie|J76+!1>vIN~#~^=E%osoxP}hhuZUygZ>Hp3CH5J z`Ul(iU)Elg5Qdqxx{`CxXX(?;86AjR6ssK7brqcpDNq_lJ1?u_@7DZ2^}O0v^|Q4}!qr2}_%C;O!ce88BmMXJ6gB3G+R>Tz#zT;Y) z{R4+OGLc*yef(R_M3K_d3lGjirhd(qp~_zlyi2Y>w3;51CUo&mroo{i6mQH54cZFV z=l>15Hcnist29+53;7*%(ExNYH>m>FUzJ|JYpVDi0sj*LOHLxVge65ZE1hkp)+;{^ zp?M1eN;d3wdf)cGGS$&YK4(l+ct?ZM9)ZDZP>{07E1TP5)@BG(DY~=BZQztdkc{ty$&@;X*H0-ykkqzDvo8{rPkGHHk zB5HT?&ilN7aNFvzGrsq_H6pq*C)4~ymOZP)QXh|7s75c&hk0noz4I6@-deu9nc@Dc z#-vj+XSKDNloYa<<_?!h$==GqgxRmXOsP^VPOsQ!P1TF&K5D#i#8*bqm!6Y!wO!Wv zMZ%0(JFn;NM|SZO?@5}?`7>8>I}acC81a2!kY3Iw*rHQ z;ql7EpthpUz>24nJz3r5Ph!(rRv8vM9`1MJ_>wapEAvD>SYA*xK}n{L%gRHuv!Guq zQS)$@K=h3Z=X0eOn}ym@@fNauK}9skEZ$V0^+~qKJT?XJ#O?JUuf{5l+5Ls9&?D2%WpG+y2qyIA1-OEK%E<7nyO5T6Wd_7 zbIph>yh1Y0%nrrv%~Vx#i2q|GSCxKm;WNrEfwmpjmKwWYFW!U^XR1?_c<&d6Hye$l z8lj~(UmVMMi#0W0-h^X!suOWrbNLJ$kro!t{zzf72%SPL8L7}f6+;J{=T=_C&R%7{ zfqPHyN-6#^#!?^D@ASnL{iw`N@uh7xeSz76s-fR%?kM#@e2k8iepnK7hsuXgH?B5* z)yB(YBxbsY@;;){_PREQ)bKM3Qntf&QKO%|K)**p=qs7$^zf|C_d!4TEo#byl?FZC ziXpSmpmUa=O3UyVT0{Pa5d#pqk0R^`}Wkm=hmWhog<@r z>Nvf2IZKYi1(!1revTauegersWoCk(p3xv43mKB+mo6C@N?VPm#i)XyL9vEN=MkrS zRrPK+Pd1eL-&Z}u>7&fPi`PDA-Y^-ek!yf*d~QZrHYrKl8NKVvfu?ceiyG4DA~*k^~Bb}g|s9pB^+xBZWn#+(iv=$d#u8oPcp z_wt*d_4~EL23L|@^lF@(c-&-JL!3|5*(&oS+^w*YB`UGU z@XO&pG#ET=ZDkN90f*8W>|AVnG0tvriu$!eVX-yReAI90#GHEs7bC5Uv2M6K73_zm zs(SLXn7@XJbIDwad@E($ir%=lh$UsJt>)eztuA>c`($k`_f^C>R?BobPP6csDM@i; zSCL_F(@A&w-JR&vYhhzn0mbTbh0(H4qYtmbrveXgMmF0Rb-q(qv}#CI@i07Z1KZ53 z-4iIe9d(WE#&ofj$0*a$1hY}A-fQ^1=O5lEiFl&cVTTfkXx4uROWSHJ^~Msy>~OpZ z<3?Jos`*-+_YSF47LOV?3FS3>CdEa#c9*>^d6eI$?k3l_Jl%}$OrM!l#qpgjdGy)d z_ldesBJvemW4&j6aRUt!ra>iLO|A<{iYjlQQYT}=pv_jrbEFpZb^bp;one}zChHGz~t};ub<>8 zK#5FW^?IAxKA%cPYg|L~hpeCAC!W`_bIM~n~ivDg1?k3G1T zWcZ2=Q}_M4gHvjGOJf)<$q8fu3pua|y-H0Wp1e@CpDbXZHmc@(Q^6uxgncb_F$Vv> z``b0!x)k}Xshp-k7Q1>1g5Qgs6wx1qbJVLj?#5AV>Zek+VCKYCt;WK>^H|}7sfO6VaJBV2 z8v!jrv!)%6v*zvR_8qA`e9P86vp+k@HaTj+gUdh4v3PAyU#+VXL)f`K)b`Qd>(+(K zc09eD^(lzFijQR1*ON6U&bs^M2JShi;A;1XqVpT*`8V~s9EaQsCYzkERO>%;yN}{% z&tQo*V>ktE<&2PvNkfR8T)A&KI>4TMvU4rv!$qN(0Cx?0vG9{j_;kdl4~L4^!t1=N z@3IWgpqwj+ei37xDh!k6NZ4M(b0Ym|-HqM^iBDWlo#-4QS8MEDu3GSNg%!o7G`PLC zbY5SJ2o?SuZKu@g^KeXsN$;tRqrz7K1ku40i{zWlX3mg};G z1J=UEkuZ2U2&dO^fBs&QRP)1YY(=?Is{teUj;okV@9l1b_-seGz-AeS<2L%K<=XEtC-Ph2f`Z4X z*n-feaXi%^?(LB}p71us_^7!7x{zD=xR_&4cGlY=v~QbItW2y{_N$!(3N6oOmXp~w z*xg%i8b3d9oau_&mW2A4d?Tm1XB+_`cOOh^s@~k?qdrDAt;8j`Ruo5rkZTF+8*uG= z4D+DD$4o10$?9WoFESr^3RQ`;;c_hDN}*&RZ8FgyYzS4-Z}`iBPwRReN8W|R$hk?? z((m<+CKGj;(3C8Y3N)``C=~Q!)`9gseQMe^4l%QmRgJaI zoDNrlg3OMh2lj{8$~1QtVTU=s!tdaPMeq_(qoIXkGn>U)<&0a>)B6Idg83}d#wqqC zbgG=ts=Q1*7Bu|es(8EToNucpf|W< zsUP4R=yL05dEp{^Q4H1U-N?fRo?A*|wx=U!P7IRyw*wJ@G)NlL_mmN~5y;BCIFrFw zXx;JQO4|F-(|oHA+3UHKt6(u z|2t>c3PO!E$Xu#-Xcj@FK{}htzb{6V@(ShRmkrNSkSj(}p}d59tzorc31wfk#4tIc z5nCmyg{q9wau#+>vKWLC$B(>;K5ICB$u;evLE|E4s>lhY7j%!WD%Q1@Sg2gkU~iAq zGh@EZKX%Lhv)!`0Q-(;JS##-vlL8GQb|`7Qe~Eoxu`U8Xp^G&$1p>P@j*Hov%9LY%F3 ztJh+Ew%plNt_X1&RB?@E5~ck*rsSxtt@G_X&52`tZPY?dv*(Dz)I}mt^JdaM9$DUd zTd^arLHd)q;kn6%LW?_Bn6|I0+_xmU6X2@O3A1yscWGq?dyr!Hx@7H3cK1?5ZN|&C z^wiwY%#D=}eC>lSzrb_u%sx&eik@Nd=EeMK)a7x#3;sbfbpFRG4Ey zu2^zbc7F27ldF?e36Gx_Q4ir>;dpmAq#WaotJ|p#QMOT3Tx$bI6?D|TuI@N|o%^sy z#jCZFs6$P5@nfb-neN=LoUE@jbL^`1 z?w<13xYu0j{`63Pm)!1{7?Hqkz%Vh6N<+AF^z8`zGbxzn*F|VdbNx; z|Asm}eC()~p#dsL>dLdNI}=AI?mwe*hS{cb7=m(+-%7s0FJ0kj{--bKBe7w~BxM@p zU8A^-*VkE@Lp7ma1=3}bUfe-dcBVnAUGS7Z;i~b`woA9I3hs9Qar1FD{20BoqZm-~ zAnAC{uCt9-W9A0h^sXH}Q|WeNGzhBbl+YBiMql)2vyCjrHQpW#Tg*D1Vo^k#=`k|Y z;Qppi7H@H+eugDQ(_xr$b;A5k!evy%{`ZF)YugB3C+@a7rQW&QVJiG#`@`Gb&*%>1 z(IDPhFx_uNgR)Rpj6z20%SFDP$4sd6(8ZGKCK_@}e3-|{R?fHmMZ^YQ&L6r>|J9*x zmKnq?JWTNDvI{Jik*?_U+WLURNK%{4a5PgMB8nzI!L7l-a zDM(Y?X^_FxY*blXP8nLhVgJPji@4zSC0Ski*Zx8_`}@1je98G{ViWCt+YDLXk%QD~ zDclyLX?U%lt!SSzClzUsk|r{dF>+>={o7cf|Dhl{0eg26i@rYLAXWJ}a&>8t$lSsR z`%7dA#rEq76r`K>;j-Dw#kj!{Se*g4k@(~(@C~2m6A|~3) zxoJ>%7%-a`@L-Mw|BdGg8Ov1nFu%h%>C zBgTsJ>l&@85pq{AhnwCk6=|VsUr`XcNj+p)d#cft=dk?eoJvlNY3HjO_*n-vll&QR zlB;yz;0C-pBhDwE9&xLJI$<(#`D6QAXRhN+0rI)txsTOaQ z+POC*?o7kStN|{~U=j4C==q+jWqE}WT0vrZ_=AF7jDq-F^fsc8IPcN=K^cVr2jkNk z?bz_>ukQy5LYF-#Vw2jglP|9-%va;Z+#OfHILtU42?>9?`$93gv9q9z%O39dVW%g2>}zw^?$9MD>khzI5r7PM#)rwI;u}{3KL=&A*TLsEzc(c~X(+*$Nh2 zabid8HeP4a#m_r!_slabdN4OV@?HaC!|A6ix;lrN-Mv2dO1K$FRn#+uc~6u?o=M}r zbEBj1`0C+}lWF%#n!f~gkp((QYPXy ze6`51;b>IqS)FGNdA{R1??!uL!YmGadzqQSpQWlNj(6+R(Uvv+u!|$vIAUagoxzH?EZFbS2^63ENQvl< zKH-uNPwab>r`nE|c@$+ZVH6WN*D%PyHyapLJmYiz`7DM@hDI9q#~7DRM{#cPdVe%8 z$GlG8T%dd9iHnHsNykQg3$B<-ZayW`G0r{L()xMndG?b(Z)v*_tkZ2{y)(=r0{`*I z)*$jI<*xW2b}a~lW-qzx@e{{Hn8NP|bfi;`Hw&hdU96EN~oA4^JELm|d6Nb8Yu2H8|%S4chi`A=6}x1{E9gv(h!em8WTt zp5_8+K8&D>oH_0VXbz{Q0@*($c#pK93*H4?7xZN`(J;F0G)gAo)Y8=?iuC!j z)hf~+yCn3VOZRfWqdEa#q{ z5<8LY!+6f)Fx`ZvXZ!td^u-D~qgSFo4Y{T)IJ4v*lK;@nyd8;l{GtH(KrE4O&8dPGh>N5?n3$cWRDv=CgbGXP+4)r!2F@K2g-$`QS?GAXIU8E zX`LII^ubJu8xfob-NrU9HSQsY`y`LVmI}qC+;+7dtYKm4PRa?7xj4V9o55%lVV9t7 zwHwrl?0Q-pB&4~0=(bk&d7s_=$S5z!o5AyppkS3Bm|?#U6UKQxw-WBrAdf0H0KwS= zs*ILu%#@CNbLfh-+8p(G87$gptSueluLBL2!lgzzE#Wr&C{h*A6I1RkOxYZj$y2^C zPM{dS7ch)Nx}BX9IvlT9|yM>TF{_d0m@g3sFOWu$PCX8 z1VIn6s~6)>gO=PUX;A3pO6Nb)PA0bp|8W_0ZZyqnc|LX?FTFgavy$a*66uulsWZB$ z)o9%1)ME$V&hql$A(yImk+;1OcZ}xidUM@J5zB3@QRPfzh5O|QL98?mNwzOsn|&3N z{B`+&ujx>hdXn+gfjLPbmyM{dw=!H;e5STKdkZXHDT;YoGi>!Pf?eO-f9tmBq4)q! zKU|pm+9Luxe$F9`h1(4|jgzNAV;3T+tk0_ki3bCyjD7Ipr&`Z^7Z<7EXIG=;Nov^g z_m1%%53MX*)H!&V8Y>PflN$fHmG6|WYg!iPzwpgTM|w_F^d-aB>bwp6c-B3M^zrrz zbP8ErIf`tqieH?2Eb=QJ`U`QtvZv!sReR~TB3bA)EW$yQ)C@kWr<=G#BJkrv)o$0> zc86xk2ihm6V`Wmazi#x87;})Sh}lK55AO(^!{u?meye$U#%hJ-59L||lE ze?HJjD0Rom=d!CX`522oydnZxLjiRKT)E?queMLv;?50+?E7Rzxv;~-{BuNo@0GJ- zd6?vKZgZXCtecsA&0D0qT}p@TIEuuu`o1e8AMPW3(Yuc{1Xsms9BS^p57NaXA?@a2 zq~E=MgI>)TvE)-*h@7LV>Oyq3Y| zj?k{|unOUxAQG~dx{RkkCt&&R%OJ#Z+uH5^;68(~vDrTL=<<#|vD)?#VyDMA_if|z z%=$PqlKckW6n@iTve&@+Im4D#IUg2XIzBoh>X%rL^Y*eV{zu!BuP^XCsePsD{6w9} za>W*`XZ4ZOS7L^AA+ATVIjXhxE+DUY`MZSi-znh?It@y-Zv9nb{^ZZ_{7@2@ePBHH zItcoG{IXGPU}N)h~$$Yp>kgKygr3a~A1^&{#yH!;n4`3=RzRi2_y4Bh<(pp2v*!yX}j*H zzD5#uo74BDV|Ib=`2#OVIsa#SWQKHpZI44m!jJZ#R#VdtbTZEJI6 z*qNZO#9aeQW3~i|8=Hn^~yh8CTTD_#+8w9n{{ncS^#y0_>L;l(;j(RlnnL&=>imGXRXSTi|xJO=ChH z@ha@!A?eLPgVMvO^f3U+WKid!hafY75#W|2`Mt;g=V!0~SDj6rC_`>y?a#h33CELa zd(dGLsvV9xa^EV?9bC}2s9#Imo%-z3r54EIxWzFjg{|3jvc?^LKvp=JpB1Q;terk! zFn=$>a)}0Q4s4<&`a34fF+d~Tw0>&JCUid>9BvMi2t!Qk&u|<}7&|8zJz!}qQv*3Y z+Y~WS!ItO(_(8$k*@>nx%PrUV%zO8kJ+-@n9Zr+qKAiy%DL@`kKgP_a1)u+9U(xUO zy^I3NWtA2IKyvf(;K{L@XvE?<0mtZl$B8t6WFR~w17Nb*;C-8e3!q5F)n;>qA!qnE zU-QpS)%?+^|0Rzcr5>gvmT`@6IX7y&`93iB)|KyL)XBiy_^;Vsp-g=3BMpkU2+uBw zPMS=bu#;}kpauct>c+w4M3#W`|A6j9+P?-SFvF46h*d%YajO>sea5?ba7mEhN5z7D z0+cXGQxN8hXdt|Vv>IGp7hJ})_K;f%&%reQ+6Nkhwufb*3G+W6M8vJ}fwco>fG=kY zVB#(>B>I$hHF|01qA0~V3{;w7(l+n5@C07EEUy8+ZH-Qr+$dcPqh1IIgl8A0=tFX6 zhXTk^VLM@54|s%;hzm@Q9m{q#rIzD1I3Nl;Jw1I2paq1mcP~66FGF34T8i>ABv}LO z;U`J0s85%_FPKCb?%C4st=;7}L8 z!`jfm3R|$TfS6CNsF<#Cw;_uke1OEJgCJ)HX^P$82Yrr?K$Yq;Jp>eXioi>Cw?wWJ z!w7f5YRzUXH7X54mFg*RgK@I6YA9@D`o}kiUwDG}KtU2gqL|F7K(0Duah3_J4V; zckmM$w8c|oXd$4-zcA&)aEVXupqNppPKuYYi5^K8Zs6=x%m8K>IOlOga z;=KuXHCz)SPnsM^*~-<_MY`*0WCWY9P;o9YG|@0TtI%Ffgt!p~*)k z=1+*ofkvU(Bo_i~2Wno4)_*6j}X|XtV7h9@_Oc+(A*(*#i&>nh^?OtTvSBfa2Sz<9AX?@^nh&7n`w1Hzi)7<+>1Gx)sa&8 z=WdVRq_tCoNa_buV655MObGuAEIwfmmPy|k7JbZ-8l1|$D<}WaIo{hVtC&^}@&+Xt z9?Jv#TEa4RjQ+FbL^yo%c}CBm5^JN)(R$yK+ngP;$LRZkgDtb6e37G)gOLCHijVmZ zpIX5(lBK_vi<{dL8f&a&XNqqpKj>Z)IGDQC(+lnC-KYdt#B^?QUk-j3-4p)Gb1TNQ=C`Lg3jP;bJfU1;D!@9Ao9m3XZ0D` zEhLnb!y@MBKn4n!)Fjd|Hpa&=i(%SM={;6XsouUAlB1 z_=zuBbDlqteA@M&0rt<8rMI5^XPz!wuJaaYSKMMnUrF7$SMiUjlNzOm2OXRyTg%_q zcxsSL;Rf9RYFVq#W`HX<8a8h+xV{;i41&)N+F)Qx4zubqm2O7igh5h_3uOaDH7>O@ zXu0|CvstbSqhWz$tKsefYh&T^1|gw@lk%LQQR9w1MlD-#-Gg~B`LQh6`2fB^sFGZR zzoEfS2)i=?86~v(C_V&r!t~1sV81g~=+zBTs=w8NqB1MsE&ah-cyt8VMB{FrM+=h` z-lpn+;;BVyT#}`~o|TMTS-xN02QQsp$ZDf^4LHDvy4MuL4b)(FA`A1+1cmsCd@xR5 z*#B3johL%g=_}!%$WQGj-plHLpA-Y(6Mh;)fW>}d&Gq}{6F%~SKQUZ8{14@R=#7y{ zWC20@Kjisu-|h3lV|Rse-%)GR3vc_Zb58Emo0mDmo%+6K&RYzGMfxBJ@kTDO)#x(W z;5Dfi>im(ji*PvRYY%yZ0CXo=cu9U)Ip=2Es( zA>DiX&PKh98nuyzv66k!SqPPc3~iaXNGG?rjBtl2!lXM`@_GQ3%BWVmn9n+Mar)E;DC{}~ zTO0P%?> zT*qG?BH9Zrt^aZ!(>l0)-@w7I-GO}F)f$2sUB~DR%97R+0f=R?z4SM5S-5B3?T95c z$*@cini817lq^dg@|MlUeljSGMF)ZhqrR>#2Yds;+EMs1zyNZEnW*T){4Y^0MdX*T zH*xpfzb1Esjsp)R*3)-pD^HZ~$~isU)bmO%L2cMSp4&}WgsbPP#@$C6$WSmy8^589 z=TZ@CpK*5h3q9skA;OI^v2>EblCEzHZc1=X3ItzDy`m&Mc33C)LffmT+y0fUGOXj* zZ|W+qok`qL@b9=e{haJ?iP>ULJswJK_4f1J-kQx%&>DUoQ`@R)r6SzS9HFfPxU~Xl z3JPK4_rrtv>YdM-#ab>fWt>B&!CfV23jICo0+0+I*$=X=#L0>Vc=Nbs3xDnWT7Q#y z43z6`GzT3AZZQSE6_^xU&CM$&n`@m!Z&$a7>m0oICj#=&&!dZ7q#lLjd~P{#NI+UB z;ZrKLs~yk~FDR~od&%bXE9-O7gUgF7lr^CtbkE6w>r~O%CFR47ht*HjCx^xtPGWmy z6DSNAGE5H&NyWzn{pcJ!WaXvh?Zm9pxXj^v2qvUr-*%-LedVG*l^&e~aHNxYe z)ELN`^->Tdv>@8;KCz9u$0BH1HSKd`7TrKOOO`V+ijsX)&V%$~lxv^NOC>TLK;%Nv zW;ym}#5(?}S>XI!Y?Z;AgQp^_1*S$x{#Yqw(JGp_eE?`gU}%K!TFJ&9zuJyr4u7z# zY6&R|B_02q3-?HTx+=rha*Q5aX8K&coVULqG+nW_FR#WcX|2>|gAhO^v2D0mIVof< z6vbadAL>6Gg-p3l?fGs;kH73VJ6D8VUIunNacw+Q`9=RV4^G@T4O;8W0?F0kK@xRv z6m8@E>EM1NvOsb4#C3tE{THRxYr^#UTA1E~8uh<7lR?!Wi*}Xedul&=9i)aZ4T~}g zA{k`#TXdrA-JV}{ZfkgQKZ4&hECXZ;zH4#`8*8ytf`nJ-B-JYwY+10&8$?XErs0)c zZ$Yl^2&|6Gb8O!1f+1NLBt#iCvA) zdrNzDdcD>loiA5bA2!eN2OvP)&Rl3^we>Y?;e%tkf6Rq!32XHc__q%9;0J40993xnG-l$Xe{yeaHe>Xr3XPkNFV++h0EJBOULFaw=9e*fm92aP8L;Igg^ ztFf?NG2%>HHIcce-$-d!QpC}x=}06Pd7X%Rz$I_eWS&~D2)s#u-%t~XwI<-pmE}Np zfm$`SUp^@z(h~mDzjH}V_|oL3a0H@X2J@LN$MPk};r}FOOeXf$Y;RU0%ihCTFVlL3 zUB%wtv+I?hTlun?MgM_d$BtzXVyW1}8_XkFg@iA_jp|f`log~2C^jW7?5`=I)pGFA z^B;ly2#8nENdPUwN8!ss#Y`p8xOPYL} z*|1Q5h^x*wKs$VIb%^AM4_z%=z0|1k_mQ))@CkDDA`>~l2|?V5+)Sg1F6nSxNFBw& zW*q$@9;8xF*OGwG5rd~xeZ+rOV}w@LD-la4%m-H|S*Sij%gk6EPXzEs@gX$G#15X? z;v+fx32Di)!5dT8fsAwi;HEYQyceItHDVf2c6>JP^{>j!5wtY*7v(1T7|>b}KPxvg zgrwR8sLUzLwfIVTetx0U`oUd%chaCJ!O(WPq06YZaMc;lvjDL|qU7ytks}EYxW)0c zkDRh*I*S_2?v*Cr{^K=X3VmFL?jhmo&KlS7Uj@Wd#nYdyA zeKUWZU#606K!8r$82ZCNdy=W(QgdY=gvu?}Q-mUlUlclWWUB z-)kncFvU@S(sxjcc6D_rZ*&6SX*DhWT*a@XoL41IvnF;Rgd7D+TRcH2C!P;5k88ho zAejNYf-*=QSJ4wz<17R-#AEdGs2L6N6eKy2serCY^C5L@evwh#<(Q75HQ};trA|2q`&R zkh3qTP8+LeW6y&G+|b|-PnlN75QF(~QR|jKwj+IX#0ek8m~0l%xAjpuV2x#R0&zPA z6iNC&A$Wnvs5pVJ8?Jrpd3_P^XiS*Dlct_0tfwlTa80pQ}iPywf?iplM>IM zXDIz4mMyd1^6e*u$w<~qIt<>Xfe3T@Gr0>?_QnwwGNBu|Aa%fa(Bh{HYdQoo(Z|TY z8h#8s(w^W4oTn6;un$@3BY!{W?@R7{x6O&e*zU=?&ggt2tvlvNA91K29pZ)>=9IY4 zsR(V!zT&^_%+Uy+1JNbOEv};17HcyE5)xHdpPx>9QK{A?MX|#E{e-J9VbA6hoXe*v z;n=aKhM?VjgP&2&2z(QA4?Ov)8R!&D1XP~q>BZDAyjj)uVNuQok)nLo9NPwFx8S@D z{dc-dU)xb(aFq&K5x}vxnaqeB1@kRvpvFU5T#_JoP|F6v#x0mPR-<5(K{KlArrM8- z1>x8&AH{pX_5Cf&VY0EeL^qg#h*45U&Km0>Hm@B8lm!POXS3l?K8mlt`$@l1X>t^D zKa2ynJ7RRj8T%LIBLL;6(-;3s>f~+b3H1B#(hV6%H!IHnHM=GJl013Dxth%Z?DM?< zmKZ}f`6zO4Hf?UT zh4uJR)hF^+btX+P|E@--jsouWSCMX-`Fmlh&==Sov%y;q99Ax(^TUZU+*%Om1i29% z;xNUWez_Aw@pQ-1fRaLoPDU^x$6gQ|s2>hsrDp+u50Zgouu0%8Or}Q2+Jy~1DqstY z6!lhV(6ILzKy!kNNQztWGKdAgs+%XaFtrcnA&3k6ym&ELR^y^wVHB4@#c*6l0kk<| zbSzhAq>daJwimQA;ZsSTQCdydh9EaU7Dfgc1$4Hb!_ZAZ7L*tJCxk7!(Iw2g zbO;j&-$QR4Z-H?!sNO4!f^)$jEINxIE?t1|zO7GNL!bHk9C+H=3Md58twYt^odDKD z@>8SMpJM#5caR&xvM|N=IT1Cm#pl4ENhix^GzVNim;>X+utfH3_pZe>atIMe@dk!? z6;tZCX$|;>@T@BADwtg$9@->_g@5G+>tDG61o%IZ z!>8Zm@K*r%|2uMEsU0|6xai_cJVy#q86N}Rp($f2b3!Gs&&OIKeFkBn9Xp;QMQyQT5Hq~i) zDAPa6z9lZRC`UM?XQ4Xo(%?s8 zVM@ve|2KnoW7Hi!TwbpAj#_rh9UTfIM6EY(dRyS8(ketZc&Uc~4Jebp zKrX9sBMJgs9_K!^&d0SLPJg1^&`xh40jxKOZ`|A|Eiv>JQ4hqSg~K|;Z8L}zFM3sT z`pH4U9Sk$vqNYdLd~t)#7k5Immiqy!^Ze-#VSisHGzCQe$>x7Pi`F3WQT`GMyJw{F z=)r;_ynIl$>UMg`l@B*u$z~sAC*a8@9STDW)5IOtOCVf=ecDHv;s9e%?bCILGjF=)Pa+6UA5EN;SV2F&AX!or_@OEGH1EP50;)pRr7q;SyxGY0w1Rei(|e{xwH5n)+=#gPj&;;Gz|VXS zda329)LTBMY0y6}0cO2yDic=rw!^2CK0;3Mrc-Z~`__Pk?w{j@ z@CpGH3a4DFs>Hsyd2$+TxLoTkBnSopL=9(@T>)<53vv~6gMtD6AX1v*RYRT~E`f;q zsT{F2=#`yqVEOkFHZWcmOYp8M%7)3fwFj%-)@Gepp%OZT7*P1s20(SzD!_MOGz(Bsh*y1P2J`&BuHQ)}E z*it}XjRyg{w(k(UuPVo)xHzKk{1e|h$m5J3954N|auD&N{MSfuHSgC*QHeY4XSOHM ze^>Myekl4YP}Ji3Ts|00r~r5t#1AW{(Km2)gGJ5c5ECwd4KRpXz+mtFGo$-H>5aZl z1+!Z$i-HuVunkm0)M~;v6)oA!V2baZ4jtzhI-hZ(4zAcCL2p>;#&!4L5Uy{g8qXf0^`a@dMVpI(&ab zSVw~C8ssT+5cd&O9Yu-sV3h1k7*-?m${wW9tt^lkb?}KSpHBcWfVwws2i1q@LZufZ z05aLu^aw!2OAh41kz}AFGY3%=XwXBmvw&`-;y^V_1CY$TaXfG5@M334xFv2X*oB9= zcsDfJCP7COSMk0DSL$Qn7BkSM3c$=9D@Y%1o{o8{2*Leuj`o_l!96kgZxxGu`;`!$ zK>49tLeGKt>j-)mZ~^Sdl@lA@*h`YTPFCq1v-y^jdx6p>p}e^AMIW@xbki1!o3@yy zg1|Ib#XAlxBL2%Fj=&=1B|;&C^WZ+ub33=?RX@MS#(IOU?gBKX<$`2Dj%5?jU?b@b zj^U7BK0*|Z&D3B#tk>mF&#^hPTuRNVO1LgGegYC&0ZBMHoCa+=sYL2stnBQ+{KZEB z(o_1EJ>ottu9E|0Cn;`?;H6^*vJnWlJ~9^s$XMcbfPl?;^dAwhOD7nln-TCG)Obz^ zrQa{Vt6%5dw^Ih}%=?h^DD1Ex4xW+(RacroGJ(9~_0TV1eB>;4g`*vd=chrn<&%_` zx$`#V`z-PYbcB~B!k&i*pB{6Opqts8OJD_uE6ZFIpxO~Y(+2@fORg3F@>v2x^`qZP zlOuN65Q(uHL4>R2Ge5F;7}q}dS(mBctBQW-b`F(Ah6gt7AQL1FknM+#9cra@33*6k|wJpk%4|I?Eb55 z2Jd5a3rWHuWBoEwVoXfC!edA*)V-j}#;3<-b4chiO|Y>MJ;1=I-5eO!fV4IT#$Pay ziv2Mp=r$hdchR7_aV*6VJt4Hx`|~|6A1r5Yd6b=OPP9F8=z&P2Uswuc^a~>=p<@gd z9oj&}t>*B;S=QaX!%L3VKOR>b?+z_!MAsa+xtsdvYUbl5Oj?~(ZVT&1=8M3%-Np_K*dUaw6He|BT;%slUTG{9KCE!c^4` zTIThE#0579M^dqT7-!5g_%1fcgca}NgVx5a(%U%S=hi9}|2S=u_nlvc7buK66cQM_ z?WkvI$JK(!Vbfpz)K_oZ3=rgjDQ|wfu~M^ z9flHFuGAdDfMD<(1?*3k`Y2otTEhHueG>KHI#|8E6G2)tr|&TTUw-qK zsn^zjc}KVPI-B6~-Qa&|WUZt5wUAeGEX7&ivl*k0;@M4({if3uOFd76_Q1Fic#3W1 zk>?<3KU|+M!a^kLP_~pmP293sa0G*TBfzy|2~Ai{D^QSx0tHDlh~ljQDw$qjG$?WK z*c*ZiwG?PxCe|jABw3J(-^4FaT&Vs&$g@+Mm5I5Uwcs&jazihzM;Eykx($X7gpD^1 z?cQZSh{t+wnh|*NBQembC3%og2Xb{}HN3huSdZ#P|9pOe~s#c`p) z!u?6Ahnvf&J$C>CXwSa6lbf}Rf9EfTSbU;4h?-hB;lYoF@!JWg5iEmizUHFn;}sf| zZ^3b|(&Od1+nJEPof5)!B53J*r(vf~0}&3?{eWIrAnpWFs<;xjAql9CnfSrZ3mX^8 zvk5(oi^U{KMXporq465;sk>rJoW6Gx9nivkM!}>$v=l^PCC~$&hXHI>r;sl(PqD*F zo7#Tj8Gw%m|AW0ZkB72-`^QHrMVh-JMNFGik`{7@j1r35jfyGTRPGiecS&Tsw4ex~ zbVnLlk}Q*Kg=Wfq3^Zb_Y=Xt*0*X#F(7iPJx^E%Jt zJeK!y9Pg8MnT0PKl=6}&ktYWV%4&aw;}FRRP3JSgGln#u|?5^H^q>`mtQrD7E=mpm^bzMbq0gK`=_r zV<@-2n*b!NYBN^nL>xBOp!0fEMr4Nv1*-{{3GA7G!X-W(Y&{m9O9*V4o6n+O9Y9Mn znsYQB_J0cSc88eN(qWIk9)>+9RG4gcbl4b_JKA-aDEWVp;{oOYaD1a2H^J^>}H`)C5#~!Cn#$~I#fJ?@{z)geX#az zItZ)|o!9pb_qy?rzEUfnfK=L;^G4#hq>o(_WA09BrGDJ0KD59Jw z$stfDyE}ygCKO$1pfGMaqZOstFE*~>{tF| z67lKI;{<%50Is-VZt7%4D<|8bD9sl?)Rc7pV@tEs_RN7loRS=uF1Fq(F{~*eSPj_+ zwgjZiEhMg6+K-2z^EpK&g}Sl#o}`BQtUQ>(AGzR7ngO`=asP7gqs>Q&JOO%R$j|RC zUkmGygl8d9BeDpGIrXTKa4J3P*i;cl>~I6PaF{Hd)3goeeX|edNjch(S1?Si znZx)(e)h)4oK;a4zuBs3)$oagPfv$#AEnWzlpv8ir$!)7$p3!eiSFIxWqg1 z-*Ip*T~xi54?dmydij_$a#1i*H6L2s8N&PKSD zqXW8@UW$x%)0-w>^+5r^>R23;2si+)&+t#?5_(6p7ch`2a6<;t138`WjX21acVk}l zmyJCpPI!~O`h>kuS=#Hnn{C3&ksXG>8!afK?4+hcF83`nf-=0{*BthrF;`MJ|8Fng zM#f|tF{d~x8pba$>6G?BB#9l#U>xc51Bkmx10gns{G=XJeZpoao*x!teG&T7q4syh zoxIVC{sM^bu{DBZHdTYF*tF8IHp4;!B$@VbzAe?KT#>iYm=_Jl-q)-jmARBL<$ zy<<0n*!z|&oZd8gki$oJ#=8S54mQsW=YOQua9ILAiFlaKRm`LFMzARPUE@$iA7D}| z@&S`@0JD3S^p1@qJtyQrZcEx6eUds#ecJmuqB$BhotXvzV>@aTpnL6XCQ%BM_N!#5 zJSOrMWjt@r6{y2i)nix1ha8Qn)#NFF1{tP)m<*F=1 zpti4ywuYKV|MB01X+MQGq>{Y9K~dY%^DLX_9e9ZQ9fpBCk@~Z$2KY_sCtYmqLFUf> z1j~L5J(@0w8^^!nqb79_>Ot17{ht)GBqw656jqFa7oMvipCpwb`OvArL3-=7ajZLy znyCUmaOdnLMr85MvGu6lXPEkn+z^~_E35k(?#j$K7*#fu3*?>}k-yu@lL_@H-59<9+nXNXJ{7?x_qv}srKXF(b3%~rqwBRG<9k& z@i3&jRCpAu{1nHVkJZopE&0$qL=}X~PbDA9MZGUGe@yE@EQ8xqXQ@B;&Ek8t`ck_g zwD};I4`ab9x+l_t^j~SSk;?t73OL_uSd*voKf{!D>Kjg`c(&7!R4Mr#gK@gKyeotD zhsmOjvt31!0LS7vM+kTjX2cSFrriKGMnDYLO}r0IcQQ1_B;aooTd(^P!6_FSqe5i@ zt4fam<{E_v&_9Y60Z~23W{@Sod_sv)0-t>yDP7G$(2KkXAk@p`!}|#m^7X8@)K%xH@4h}XdQ|DO`i5n=*zz3gLiE-Lqi`KL z_8^}J7u6R&7!`oF6bEdOPDN!$!eqoox17IU(66RZ=yrLY#^u~oW7mDmZ$ELnezC9s zX}z9{2azLy$lVZLhJb?zdE+s2p(O2zjMzx9U{lO_2(D({1)+`>xZG^gQ+FoM986~E zMW@=5-}fLZJ>}+B4dCxDAvJNR)%51QsHxIZl!NXQ>n5-oTOa~Tm>CZhj7rG`!Xyuh zY_-z)Q;F}l&rN0?iQ3YzHLP(H4=b;#`P6CXF(-1-M;;hRjZ z`nlXn3{bE>)0=bzaXs^3al(xwlHT)^2|ZT#Vd^XTt*Bxz&ZkE3PXGe<{c?iF5kHvp zRp;AYMKrJN5vXTKy8k3v2H03KIua;0Fg~g0O@{?X_FLyhDIXZF;Ejjrtn;z zUz$E)D^j}lIFY>pI*xjf7f(Z5BQ9k6inktTo2sT|CGM6t-((@CTy&5`(oc}EQags~ zGa%3#R6#E|_wQ*!I;4?Fg9Sj%D43)cgmDb+7G{;MO%+@kB=&;+?KVocka&zj$Yp&K zE$*7kWn+;9c_Nb7hf})IL6p^-<%G=B-4UBNmgecSwk-R^6`8B}Oz%-;oNqg}^AeC6 z1Y09jRD(l5Vn1`MWyNL9`TK&ztH?vtq}2(MJimvJvh(wd);%=Jp1Ez8#Z1H`dv{9= zW}#q%N9n*=?c2~i))Buid=gTr2n&}rgxGL41xWG@ujs@0MFiA=+I_;{r;6*({HzD?t`tkr-leB8)06DIiAwFruvwg#w2<#2HgGs zpZQ^aSja5@H_hrnzGkT|Tbdi$b?;4-_qQmEUG8&f`49Z^f90$H+g@AylzYf&qrrt0 zj+7Gm)=}mXPU5y>7GatnMhzcd!>!Vlei1-4PIDPCMiU$QxjW8sq>c zR8OR95LtbS6b2wXFXAG7>v5~EP*`reB z8f)Xlb2fO_NGKTk$55tuk%_}NXv%w;3^*~qgw7WIlYoCCui8`nVZrvg-2EE(zD&KA z`q#%kbuP7IybjL9DT8A$bs!nR3@t;3l^|{Dqk;yK?LxQ;V=Dkg8Ha+l_mE8DbwK$O z1<6f?o?IW{kRY8s{~=YdEWueT!ox55eYKA8OVrZc(5^7Dyigq+yHn%f9c{yb7yIYeX1qLBCL3?RFRj5 zY9J*ZZ4{A12YG`AB%dO23m+IjuzvRKIWe27Is1d)P zOf4WDt)AVjO(_Z60+YGx(UbT)W_6~&9EK){%lD~(v~srVLcF6dt%V9CiLKLF1z^XR zd{(X)_pYJ;O>ib=x4MC6K_71g{dQ?V2@|Fh87!%hsAOhS1=mBXc_Yqz3sN;X7p1tg z;>B&rUJfm4YlpISx@snJPwX3e*-u`F4W23*ya19O9?JesKfJUT)u!snt%Et+2;{=-Eus1A)T= z#keZ|nCALE{r|<+S@`FJi@m}}+K^zifxPsdgs6V2+Rl|5o_}UAzgNLdRm1)_{dVM} zmo=LQo~iDKQcN8kv*)@MJ<&B5Gs|UR&#{U`36@GH9;~Dk9g;C~Vknt7Z$sIzR`LQT zCDnQ}^rF|zv*)YMU8!G@ccNyx9Doj;rvS6~z?B!2V2uHz>O3y&MQ9B00T)M!M1Z=1 zNa}0#UyzqRdqA9SL8@1OG27CHCeK1s+gz z_Cq?O|4>oh`K7U2-dZ*4uYfX^uZ^8#$lgW1aY4ldUk3&2zu|(J>`e#IV*=H1PENZU zUSP=51b0cGaKALIS8M-9TZ8yF;kjZ%$`Hb_lJ%A$H3gnL1d93p@i)%ri>PV<^TtKp zcd0Wk*(5ICcrlGj1eNSVqPN>gnxie8)HY5i8YJ=t2 zbA^}b9dZ!e^b^pFWDC+OsHc?^T)=Q`dr%`aE?*^~>D{NDAh>}CxPtKkSq20ZtS@DQ zm-K#ZEJm`J`;Tiau=!|rGhE+j(Lo?7QM6|~$8%NnVC|{T;gkxb&cHnA8?GgqORCZwi|Dug zO!>vDL~V|j=kGD>XLc`FRJ@|taN8%AUT!_M^fuTNPe=tk@uXDztG6B$rau3MThin0 zJI-jH@!Gm(&ABD*k`msy5uRmJ)OC*p{5|?%A{XxkZS2u7Xljj zC+l~5qFg)!f0-c4%k-$A7i~ww+I0?4eb=PhG~bOnml?mpQt;dMX8;qKsMbY1$6wys zURpp-nG}L29suIU);ipPk_>}3Iib|ezrDp^5+79qgN3S)ON{pgE%W_jehRFMFT5p6 z5%Fd(w->QjZ5;AWygrhDb8xlF(jk2%zTr8#VEte)oxRBhs(tA%D7cSy<n8USZ|;k*cQC5*Vzx)B7H~tK*5@O z-vbXyZ<{u+VC)DdvXw09OZ1UOtb^Y6WI32cP>Om1A+E)MQ>oI{FTy!znVEXTFgiZ@ zU|O5bp0dxt`Bd9|&-RUC*}OMWGeZpLX6RhlB(3+)k(U1@|5`mlgTRbcHDXq)=J0j2 zD=bFytNvS;4j>$7{zC7zdm<>uoy(N%A+wTbu(O}Ib3;0cZClM;N=UWuITv1->RgfY zP8wa&6tF>LZ>R>joXzd8UhO}rCfMe&a?|}Yhpx)5SlYm3CHrHV0TYBcOMCidy@g&C zrOo@AW3m%J+PpU$?>upkG*5qqUmzbg-OIE#An(8uz}u(~>4(W&SzUuk&0!%?w+~FW z4E&s{=m$An>0U-39TXb+pCfe-zIeKv@4#`)>M67;vUB*jJ^$C6=}Q>p6jl-{8vTSA zf;yHYio}oz68ghimJa{%TO>_a?cUuDC)a7tx~g?{V9&01r|+fJNStCB|IkKWC1~e> zyZMhc^3MOEHgbBtoFEF}`(gwrq#*Pic^LFtKQIL86XpU+D>gPRYG=y>C}aQ{5BePz zh>8*r9zUAPD6*KaWA0$b;->6a-rwvPakTPZ*s;+*iMVl#1gQinw`hW@?UD}a2ndJcGOopz&!#OjYplr>^UInGK zfw?M&3NXmX+>a2}n%@J|QS?UwUR?Ny-VtX5WfhFF(Xomj)!skTRK-9~o6VgHlz0#@ zmr%i1N$l?g(cj(T?7>p{uzC+=v{w$g6peCo!C>Wj$|Rmd%iYE-pEOc4KDZeit9L^<8rgfex>czG8_ z4q5Zah^&E<4#^dYu}cAPi+2PZRJlUPwIQ}fXCU2wJjTE`vbm-H65|qeTsf<^R4u90 zdpzW*lW&E6>YaC4ZLrGVWuUrQNN+T?gq)TsxRmhtzk`W zU>lx`cb@2XIxepFG-3>pPP%H>Q}US2 zqXrL`SM_bGf&87B>zS)hJ&ulOqFqk{Fj+#d$uB}U=O%Hyd!Hvkaw$i7@lbhw()u*D z;Azb}B^C7AXi1vjgn}KEc0|RY#OEcwHJyrd+ZMnhz5zWZeaHtql}~gb>dXxSxthUQ zJhayBiuabge%mgcHEKF?dHKc@9mjMT_x`zCs{1Q$u1gnT0a|}ER^@V@w5Bi7ZF%$S zTDMKtY$Y=N^nc(xqC=OhH+Z@ai!tAu3E`NgJLMPTcoAKWV{B2w+;QQP`hr8(O?Cs% z*ZEp~X{?~Bkv7NIA+9z4sbGUsWJ_p)werWTk8{rUVuodM*abqNGxtIWj^~5QmyDx< zMQ(f^@SN;-fZbf+3Q?gA?GX5>cd$D@#qRt8Mf2ReYJwUXGH zK1l4g!{#!Rgt#)Fv{Ex}45#4yw!sL==PNl+*$q00;?U6Hn+D zJJtSvZ%JKut<9h)Y{8%c=^AiqtQPMkbN=8%u6BSpSO;o*&fPzV zT^J7q-IQz|xyu-8ecue+As+RxH!3m6)XR)J8}4?^NUrOxA%@vLq)j|LOuOs%Ty-TV zZqiG(7<*ix88|MKI?EjArgzhW^wuDYIh#uDqdvLD_L!bCc`rqdZRE}7Zvn=v9%V#$%-@Gs#Z9`V*E19U8mjL>+sfRc^E{6uG zZm^~VLhj&59nw3XE}UZ_(bGxiT}Ce?kKXV9hHEfp%c^8z?j>mHmEc~|nkL*!ipaej z!`w^RK}5n|+*Y8^I%D&(@OA#x5nTtH7mK7*n~LXSO|o$Hh!2q3#wr2iV6G@fV1uh+ z8~g%)%sMCk(VbImIzbzasw0%oJ$vJ4x^I-io!$cabvBW`K%a1P{GpUjTB5|C;YPpl zKDQ;L$I=Ct2EZQXV0%05dokp+kQj;Kf#s(J>+rpnmOap4(*qQBnw>WS^QeLq`TZS6 z*#jdHI{J%+YY~o%6qC2GpFTK3deub{Xo!s=CFT(fJ>8*rTJV-o=|oe7%C3w{kp;bE zebRIKqg3LeGmwMAEI+?0U}3RmyWDdfcpQ+>z5wO%3gHVHD6GbDI(t1>e`Xi;uT!S_3Q8NB;>zx* zG~@+e-oc1o{M*5f^*%?lof?{P0~nLsKXUF{$+wZ|h=^QKR7ShxpS^?0?!#&S#^2Ku zLxu*MZ`w5E5`rivY6n;46v|mrZ_RuD98Lj#cN|O0at@%HuzGQppgpc5CDlVz;hg3O zp#X4tN!1YqU5JDs-A5sV-brBP2p~2x*pdXAYPC4q`9HCFg>l3y{+nV37Ih3MC3Fuw zcvji4;EmtYWPujTuPOS7E=tIbayqnI@%`4LYiFwrX+Zbu&vuVWg=Fh~jyf8csf7v| z@xFenvB=MqV3IcD5}WU*Vij_{c75hem-|~^&J5Vr5+Z;d8%Sw)<1Gf-WUjGb-8Y;9 zQuFFQEL{~&xGb^Qc+7$^YW|DYDzyCo?atVlC(TMnE5#L6Kir=XkTt?oh*5h9EW#Iu z%zb%&>njdqN4?sYAO5w@oGx>5ZG#xs{dZX&IBAmf=VZ284}JV=1jh=MgvoOQ6HebJ z)`9$7IMvOog6f3eGZAx3*J&$XT4-tVA^wovOuf8x42BVyfVA@+z|0q*9=ro~@yt@n zEC;*Y+Ze%p%6E*`4b*mIZ|fExaSjnKBh2xIov@=N4z@HrQlR5!DsLg1$sYyggzKap zfQ=YU>863T87j8p{l;XBlb_62T!2)p?i zCV$+HUN$(L<5o%&V`%!}jdHUa`ueZtk8ilg2s^(7n5^>*{oN>6MB)`K5-%p_Kno^p z6aR?~BKd37iUBRIUmYbudr+4NV{b|i2SO0rpGp%k#a@j>fC zNFbEu8Ne!3Jcl!+dYI#Is-3l-)YK}R5#tuRWOB?w38|C<0C8iSDo`b0lLI`7g|XGG zj7ul7S_^aGIkkRy=+&`{wU1XLL4wdh;x#>gP!NE1USgyovcqbA9$01YOTb9J)0UDG zm^sG)#1luKP{5bgtak#X{d=UNV$7{9hYvQ!1-J>p1q+ze0O3y#RdzC*+qLt%{2T(6@;()@DehT~t!K{J9tm0iS&dUE4?OAr_27v~lwR(Pb|6=d7>I{QsI zCGS?cE+JJhVfTR(^L_VS_%WQ(e2wkg0^yv(n?eCKod&C7Jan@(PVrc|%^CT7X`3HM zb+vlx;Jg4G3wGK2lLclIQQ3$K^ln@5^#>!+rw+|}l?z~i?U#73Q z{oJB<^?5E$7d{+l9H6?Eo}BlZi{ml20W8tTUx8E-@?jS|q5nkzwkGj2m zqr5!LkNJ4L|Er=B@|5;dI|jBZNC6`Oe~owq;%ys56uSGh+qHy!cAL|Ev&Sd7JKy@I z$*cp1f7>9DdFDDUchax&Kg@ak4OebI)u4W&srqV#zzQ1pCpy&26Y%?lx6B0&igI5Y z$FT-==oLIP)u6804mFh!*j58YFEFG)f4Zs^nH7GqZB1L8U0uK1k;|?|w@&Z#hCU$s zshtHAGqtm3|6^ylv_=S`2l4O_w~?ws@1UwC1=anTnU4EEU)hnkx>dh_?_}J)3jIJI z0{vah>x8P#jPwrX2TznJ*b1;mGSC1G_fIAk6)T<-eq^;(9f`o2lre&02*H8E==y(C zvO?yk`%K_ECPWO@ap-hV;2%ONSHn?;BC0^0Nt9tm9G-)M+(KO63!yx7SgaoDZB`dT zNtSZ30I@|FhM?9xTL-zg9{OlM#Q3YlcNBY$fnAHz0Y3|?8gwXgc|~;EreMjS$=Sa< z-W1j1pgSZRvN2}qf@pmLuGaoB$E=;sTL^5opOLU%eU*opLMizOA&dATK<|d|D$+r5 zCSpXy-5cv@Zo>FHTS8EaOi{hAr@K-xqFE8QMC+K#Cdb8#sQW<*b3PM!@*0EuKKL8w zDMNyVlyZ57ur**0ya(a$KNzcXc}1)oUM56w(V3-JFY4nHL+)ldkI5^=E03jn$Yv%Z z-UsH&Sj_mU3h^#nkIa(?zm(q%{}5RDz7nwgf2Y9dq6AjL+^&!c7V}E3+k)*Hytzp; z*c&$(d+lCko^rxM%w&yfSP|#$=X9MXnOViVQ_FSNS=Qd}+|^$fprfrCEV{Hg43){} zASDqTTRng??j~X8EEpO#A16LW{`MMip)ZK3ff1NTKMJ&ciVI<3eyA$`N^CY+thcJA zd&Fx7QgA3Itj;r+d+Cqc0qzd#^cHXi3B#&Tq4g50U1)U&5|5ei?HG;=T3pX#loDBK z0#7i$`GB7LMhsDlP}%o(JxsiYf@*iH!hJQXm0^YHb6*5#pSL4c;JGVlASxn)kI1Gc z_|PUfZAvNmmv`@W)PJgWu9VreX@AF`fSa9;Dxz}sq2Ny3b%gRBngpi~%*DN-FG5LP zuxNp|6<~}p2inY&Jx0mST+-MHiPq3wiHfQ-%e)r!bq#ioY-B8=o`%`V1B&;>xswY4 zE1SRob0~G}83nke5vH*~HLEU9%8X=wfo~SSUQNZmwkmjSdxOP{s>zBIy(7W`q(dBv zura1BeQKmjFBLYdzMcX+$t_@T9`YctC4Ua#Zyr0t zIp1m~(drQxPR)A%zGYpw&0@O&+#Mb0YRJ3UM&T~hM4-?Lo=5WyQu3Zf5YFLw3L(%+ z>qQn6%OZ))_sw46CAM*)=az?Py2l*dgInSWHf7sMB7@Ga;5K+=4Uhwsw9hTRS*q2! zKWdgGs=?&!Yx!ZtY9#Q6o&;7}F(w`_F!8_-C84rf;Sd?_Zy*kyhtzT@;}ZcoJ|p>V zJU(=Ma>3aC@V@34zVFT5ul`D$Lo-EeVWr(jOnMluyiGEuNJ4*o|ZBKMm6m;Dytwu@-MB zw`XLw23Rmmt>HN0FOXraDLUU@n&_dc!rRMuxiycyk_#higLg>Hm;M79)YRX{0bNY! zC=^B@jAw)#pqB@O3Z>;R`_n>H?+nR>>FTtmUru0*y12|fvS%pc&4p*433G$4{-8?f zqI;TiY#qd5NECC?kSruT4F9CF@{2JIX@qG=EOD}s5dea69CN#JU`Kqiw2O%xTJfCs z;;wY~E$Vx_Pp{8b%-{O4qPg%b8<#f>ozgIW=n`1(J%~2%C8-kgR}ml^dV;m}W1aF>1IWyl=a9vRc zeJU|RrJN#f_kYhEwm37$V-w!e{1c{^`^Q7p-cU^rEe0>6W`_S^Mi1j6y<<-;RK9o^<5qW4k_6Q!_P$2x1yKm z-(Lcf2yV)dDr)@32O3ksC9oyOlIPPq{29<-W&TBf$s3s`i2?IN9Dt%3%b|molWQaL zAVDncw~f33tGbecw8vR<;Y`|{gc*Q^PH~J)B)E;i6i+0$N00SCp{SIeL)nxn+Foez zG}MD_QMsH&7kaCYXmT0kL<)}H0gaH@%xH*pV|X3*`QWe(>x< zHXA{*<7zD={IPbTN@YZJzCh`Xak+j3U_U^ndchMT&lO>9;=$fAuTUaD?0!i6PMwB) z@(@>o-wQi{J@S*{u=Wflc918XGV99|_zP`=Fb8JX4~%VZHIMOrbSStV_T!4Kn}8P~ zO#yn|Fu`K(SX0GU%8weL{70{vJuP59jVR1VI-FSJLT@4(Og>1e0NG>+!tN-+gBycf zb&GdZt#LO3Zm<-L+}l_PcCaJc%MFEXY$BXPttP(zfSRfV(RJR-MDD-=frusd${*+*ws^I&#|NfJ*Sa+8HGL9*KvD=Kh zBT?iHSp&f-o@Xp1s>rO$I;7?$eY~RTI;s4rnU_rp@{_<%6KnsZzhfeN+-tWUMmWVLt= zO~ZF5`HIrg{D!D!?`Itv4w3s54GhY;PzUKB@AY3OjjX+G!I!GTiWSb?&3u`swMf`% z*FLi_d$&GeEkLsLiD$kKaH%=MTP%kPNsV5Rw&{X)dw+SYQnM8nk6iT~;+|Y8y5RcZ zd^zXEB@&X?0J?3Z1GoOIO@r``@_tDN zI`aCHAdSwdtOmmh)4+THndnj43ZAmx0e!+6_^1j|I0s+}f5>G7w%JsHul6@I8wP! zH5knIT!H&%zWphWl)MEw_|^ALg|G&*KO)tufH;}(-g}GrC2gCRMf-Jc6Ebn0%;``Qu^Zx-1CFmm4n}Nlx)idfOxcgj&XixaF&NS}Ee5rA(z6OZC)zz0HGL z*5BOeDob6we7A1xs@jpWz_}1ebfIw90Ygs@emDWY6Q&{CoVV};3g>-6_TVsutp*t` z>UHSdNJm;T_M(9W5VPe%3NBSB;a;Sh&=MOMxq$$h`^;bGu%yysH zaH6lW+l4SF?tJytV6VI<@#yRvf3My>FeU0jDR6TTmqwx!uuH}=YoQGkv5;tlqz6Pz zJHT_LH&kkz;+qzXRaI$N!lq69tGb%*`6iZYO)C*zK|1yU=exfO8gN`(0bRU}-t9;j z?~0!Ii_9M(Tf=U-iLL@mm#xVcZm8^vI}p2^^WsnMCVLOU0%FHquV9fw@~TF{l+{~y zu|#ikz2=Jo{*L#RU5j$UK9XuD_aSVTaURVw1+YVGx*RdB06uH~CQ~<#(^b1NBpZ1| zJZs<&d$GEetV(JkL*3s+QQe;-^eOE{7jFe6>qzF0LYt90ti=m}&VSrUix{X71ZWQU zSnQ2dArBgIp~I|r0bi3Iv;?y!)^vLwe-SdVGon$*;S2ATB^n4LfIaNNz;4 z&+1m28s2PZVJy!-&q=m3?%Erfi%TPc`$~Pp$c4QZmHASNX=OHq{~aQ#=TxjsvWA)p z+E4|nx2I|xr5h4kf5%!W?~=_SATj5F6_#{wh0aMy9wHn@pdCVcgq-R>7ZYE@N%6I- z5Ui;Vh9$w8vd1f7a}4y~4R3B)y;MJP=iPb6`nYE@VoQ@#s9V7+&-f}YQL1QeQ9F>F z_cZimWfF0daJ6{tDI?X90+@pc5P)M4N6~^s;aMmcVD@Gv!ZZLphixj8#|3eh3T=}R z&KZY$)xhY8t*3nTVZdOtP#&RGpOCm8J2K+C>C2>Z>>RKsy2Hobx7#NCI6fY2|LNtC z$G1XX_BUSndVE@*w|QSLmWu`lYDg%3E{|NRY1_YGPq0N@mP7OKg2JtPAH?E>JImGl z&mtuq7`KIi5rpx6cx>Sa*urVz`Q1Q$?F}_v;I=I@haPS@nBDX)`EFfOTR&1B<_5(M zYX+Nf)|=N5R&P6HCjv(| zeYBA1{sfKdNJo@c5?cHL=%8-uyQD2pVFi5ec0%wmzsAp+fAT$m;CW*`M?b)-N9En6;Up) zE6SgK|74`Us&i(&F=mW{qVHwBaiEQy~cgl}9x&qAlnF zkWNcngnL}~HElE(l(<$1b2Xk`=Ov8Q>X=V{SQS3J8XPa|GSfttc_PCJdqCg@ZU?n4 z{o5VA#t1=40(Q@cBTL7!wWsd#FTR-F{?y{mnft`S)-;K5O@nW^4Whq0A|2{f;A*qX zgOKhSpm+VtPxRz3EOdQXkX;boGNU{4su+hf53Ea88VvtFGT54gF0i*iUa!aVImbJ$ zxhK`+7`ar}zwk~%UT=cqVU9uQ zlit7FTHCWYMYhXq2*1~%a335R)AS;&{ew>BLWS~S6}MJgB)6{P+M1-ipBw|taov-T zRv$UIZ+I3p25}(=Ystwal{f7vPKi8xM=@#E13i|eGG*;l5Q$~Ju%CTnlY!hC;4VDb zgs#|+HZq5eLFx7ulV3BQ-x#FN@U_Yk7z};NKi_U<=A6C2tKUtk2lXC}b#>h4_<`-FQ2+|03?l)Xzz>W-|6@FJGCr-dZDV>GicGZtIJh?!L=t$%Yo-O4XP9k3Nx5^ z3^H0zhO>6)@LbvglzNu4y61dx`&GN{-KF` z+#t{eFR~S9Xfmw)E3KwZnSyPp+Yilotys5I-?3L; z0b*ci4I^Fw*rXVw5sN%Jd+&4Lv6Q9YY7lt<|B0k(5r=T^fD3s6TTN$QL3meHo9iRP zr7sU%B)cLdcUThGTy=8je0*c>CNXrOmN89z9lMKnQ;`Akkndq(KpjOa)FjKasA3w4VOaU>FP5 zn*$rsW+s?Hk(DtC;T&rx!!m$=&-scF9;pIf8c}CYL25cE8iMNuu5ffA5o}ubVZhwG0dM@rKM8Xm zMLjX+>__n+;h}(=(i5|s^ft2wvc2=Y9kP#koPHL$`bt zfKcE6zWDXTLOO?}w*hYLxd(2&4=H&v6fBg2+%+#@?(Z@G*}Ho}zQQ)Kr(_WwbK@JC2lu(-jwl;6xZHQZMo~gVzNdxkz_L`Icfu)a&;+uPc zh-Pup>0Ob3Y#5ahdmS=Bn+cO^GYOp-!>9q7m>Bk1vV^Wwn;)^wGx+47hEBI@TcPzi zCEXc|bYpS+dYQvel?YUpZbIW=VhhqsUQ2pM%o_~$@bmE1)9}-78?QRHX%-232z;CT zmw1(`C@pCK*|!VB`Kj=ibF`b7+aEIqT_wM+>-sg>@%hzoFRJH|9(7@0vYF4u)|nCw zjp{*T(uY2~E}ZRJUi|9Bg`FQR%++Dc9vT+5@n>Afo~?az1!HDzlG3^RIogjpLyyJw zHkUIlt2cKI7=57JaNlROgFhoav1TK<{6V`sT_q|1(-4<0GotTOVAym)}IVcO|bm+(v0#i9GU z7QZgXpB}f(H`g2O)^57tl<9O+-alzkO!Jx5y?rhv>yv#BI6k?2 zBWlwIW9RWtTSMR3Zr!RjKMIlTq$*i^)Oi|rG0I%$aF7@BH=>@gK4iS<2pV{jb(D5X znC?r+bWzgIc~W~OH1x9GYjKTv9Ve^3hg3M8Z4E|eS!jZ+(e90BWY4En)|ymVhCEz$ z{`t(P0LiszSkhPdPt5mc;&x{ZY%F?p<@70ACM!!^bIY^VVheP|+Xh7ejE>DL`p~aV|+bM61Zd`!k@%f&P-zvWOZ$KN`QM!kCkO zRq@4*!zpa&f8C(QXSaL zhT%^7zXjBPzX04+G67C`v-eeJsXSL&Rj;@9xLj<|6(2X(Is7}jSCrhZa3`O!n5J}N z*b^gPT1Jot08j<>7`@>eCjz@op*RR)8bI@(8Efp3v;iW+m{-sISJU&uyIvX4?#wpo zh;(cX&fE`1)2&^dCkHwBQT>Iq*`O^BOaZN0FN8CDC-N=u*L;Y;}?bw1$XonEWzf z231R#h;&4Fg`$rU8}?O3oPRB0*ti(tNIPOC&fiBV_j+a%Y@IcypfeawF#>BAbqJEp zBQ^piwGMxgGW?sa7QpXLaGFMTV$1~E;xv5}Cv=*%h_G6a#%BCFEkLY7QMD8$LPpdF zu8fg`ebi=L_Vvd~?LlSRk-1GAx3(2?j9Kry+B0?MuEBLBTRL<^EL@(DKD#c-D$P3K z(N5V-;m#}5>N8h`>>LUykYqm?vU*%(y~ik9t?cRAkE$;p)VPN{l^oe$sbDC#%wdM@ zcB{7TwH*eU(VDwr=Q@uT+x}(cwfA;YW=LnCVTGgV@7dF*89J*K8qJi@RDN{n>cM?i zjkHNRaYt*djn&idZk})GxSO!oeqq*Uy6nZxrwv`jx61rw?|7y4^h}; z>~?!5PUxJKliOYJ8F1x@b2`W)=dy|gAs0z7E88HH^PvON<6j>IFM!}s*%PvAu&_Hc ztuvjO!wVpu&8h|)ET68g)Go>jW6UHsf#XntdhTd+y|3$9Z9 zhzTEs9E!d$pV;Ay5AYfK_fvl9F4fBk3LvWVrW=MykxQ}c~HE2mOPG~l^tZ>2wjtp z!2Fnz?P~Q2DE_;N2;52A?UC_PsY3R=X?GHqZY> z>exqynS&MoccfBj;#lmr^wx|sP&PV99~=ggD19gedKUd4P~Jl`N=d-i;=xaypGi~h zAu;k}793DZPF()z;o-8Y9fzOJr2Q$p0@80e0e4<-=^oOb;XBwyZ1()E*5gCJreNDS zRhz0>x%u~{FE-IGPryWa0Q_Kq3N)0a3N*uSfZTr?7Ya>+#@m%sUnm^Vg(28EjcY$1 z-t+aC_5NzRYg@Oeb`-Ly@2jz-97qP84dlR(k7|ZFjUkVBLN-G*r+;8`YEHHXc=Zl_ z5xVfBVAGTO)}n?(G_A{T=a`UA&fWUv+*7}8A~UlW%uG3eT|l-Tpa_*k*zrJ+0>lc7 zx<>u42_Tz4p$pA>^%ZYSmc`B3Wa78U1ovmRYMKAGs~En03i;yDtSvcY9zhsZf;2TK zJZ+}5VCSLBsDf^{+OB}T0V&g-=6f~CI+X;|wDtM^?Ga@0&&~-Pf8PIhL_F*Ysg*!2+w*4;qf<9RDO&8&H1y{jPmiPrEEL zH?YZHv`4Yf`k;r!Z}VkGexH3mYcOfo%e|Eu3PtrA32V59*5@8o+_@q7S3@OLrR7CV z%8A$m%9ZyOdyV>*uaUOC)=(#(xvcB25KwSY!mkDDpRiwzi(kKLgoda$XyVg$Q&3b^ z6ya&}4%O$QZk!S1KGh3Tb+VYMi{?spl?{YF{IKB(?EAH9Bvc=4`RD-PowL(UB8(8M z$>f^acRklb+?Y2-5D20SbWAqT84Mqv=+$^&A%U<+6@U8b4qa=?%IGYh^fLEJ0zT^pktJOexDNFZ=IPohQC*HG-!`3A-ud#`?QpK_mj>`M za<5}VDSMzM`{H_|JXP{ye?Ury^?Jyo zU%*;^iCtrEVC|gPS2|a|7_5SW8*Kd$9*SvS zm}*S@o&)u}TD+?elsM7Ue-Tot2faOSiJkVE?!-C74$5&jeu*%g6&nDGmEaYnK8s^+ z!8mq)V0n&N|NISmm^)maLn~P>1vWT9A;XL`%mpwK!0I|c6A$T2mU$XXDAJmtf$0DQ zJ`pZB$4PY=+R|%sEiOTyqCd!Xu%X=RrbhE-=#26rjNPc$-P)?CZXBmy ze8=?HokoU{`iFHwnqe-w0?aB{+F)9*eH`HE#>kF$~>abiwrfPSX`8 z$?JV0$@*fwh+7v9G4?O$Wug0TtJ}S1wD!TQc^|KQ8`zak{7^Mn`lk^n>slx7y64%!gJ* zZ@HCW8ydQXCinwtrC;%EgyP^pF92@zfQ<;#D1u{2fM$L%IU)PFcs2bn!jbvb)&lb;`+>vD8!Grn|!>fubCWi_k2{zQ}R6wZ6!&8c0 zvjHg?#pofnbLoST4jyR^)%Tg^=g#uK#J?)?j&7)#Ve^x6*X+vcA?Hv8G3HokfOH=h zXRpOzuP7)#Vsa(D+|kE$AUaZKG!UQ`CQui1Dac?&s5iUCT-d>%zX+o7*f(Pl8^I^4 zL7m3ogoNDx z^xeEAc_ZU})b{FmUT^K)(fO+5MYewiZ75Z6C+nNmSco$QLWbPJnP3v=z0qJ2#v;d! z>xv!QQw71fZdkVdZ9yqAZwZImno~Vwl2k_tIi2Za9KKv#b9v@k$s43t$sIF3TU`#; zxcnq--@aYRyDnsuXzyFS_RQy|OY3XUNJW|wR%4bDvb~H3Lt1J2JOIiDBjafkk&mRnXq_w2eEp3vv-fAuooU0s_9pI4! z7uSJ`3_+GTEJ#NxZc{8B`cuID-&Bo0P92tgbLEr5q5Q*k;KkbfYzMx$H6v~KE4nM% zOn=poHtatfg?s%#r>oe_oYUA|RP@2eCG7OkASY`rJC8ko8h6@N?C$a|_|Z-rq|10RW-ITnJiWW~a-}9wu$meS%vr(URB%TO{VyUVAsD;0{w!p(@sIC~8~R`P zkKX(FdxJ^ezliew>8gKwcbL%!&#xR(DvHZ!P}`=b?xkhGA%8B4Sm3p)T;tQx#S1ka z8$^kF5K?^kkWpg*7lQ)(IuRhE1MDEJxQj6(90b z{Bl`cX`oH>UBM6K`;Mw1U$PG^OmJ%#r$qoiLkB0ZFj;C_d`Vx_7Ic{%Ua*=Xk^#|jaSi<;A}P5tDFveS`r+*{Csno_ zDq{{z|2WWcen2|4Qve1yVLB0;tm+5<>DyOnvwq<4-Rj_zC{Sb>g+BP;tbI0AM(gY# zO>2(7^(b}6|Bx+PdoLyJK+oAWza3qw$K4BjGjosi>M!ihz;k{RKsdi0PFaB93`;f7 zrhq@)y%kD?N3jxnuEbb5xVOY(2?-06AW~5fsLsBg{>!rqzb>s2U!78Zpgx!X z)TG1qoo(f*t&8WqD7um$nM>LZIW+PWq+0@*7-N#qU+D8eHt-g3O*Q=`LRrs!b%VKS z)@mkoE(i9XoV`|drT56b`=I5qqiBcMbkipflAcR9*Ck9HB>g8m(zJjrGi0oM_#a&L z-e;*F3!f!7s2dhQl*eTox!i%=V09_NRscV55c0_jh+r*hPL{%?NaDEeqBA3lO2M6l zxB}!ouD6@`VS&}d#$v0xdU5Aw)F~$g9lyOn;?Ld??(hWI^1sS_K|Uc)ws`KtSK z9u)x^hx%ijaa9Ah2L17Z`Z{kvxZhUngH}yz+!?e9zgWHKiwYN8slx5ctLM=M~PXx>3)%P>z; zb<$DmO)PZ`vzjkV&#rjp`1+f2b&s}#Ja?TuPfnc;V2_~^*dq8v{Qxu(3h??1*jY&N z6-bh-vV+PS!?K~XkbTs8TM7Q^15DpKQe|TpG@~`jHVehaK2bmd! zG!y~|wJ`FX0h-`2B%1)D78`S}$D$#BM$OYt-OFlY#R zi|IE6C#ebAyG>hfN(xgE|JRhaVG=QIu^;y)Xg>WGcQ5OvIn1oY@UGv(b^iMM=3BEo z7Fx4Noe=tD7aqjXEmQ5<6=$<&XY%i8I&E>i?#b6yv=6T2$o!Qcl+DCq-+c~a!r!C0 zS_Z4ZMQjIJF`!4;+Qm~_lAyMH$8qO;D$Mn{5k21Sin2!nv7QN6)@d7t<>uJ(51;re zl{^1`+PJYrvqJbip|9{xDC^@>YI9&qgokS>Dm&QJCFckHx@w z`}ebdZxMH~Jj{b=YF#p?6EyQa>F&+;6us*F;P@*&>*1vi*Z)_t*J}%o(1;)r#xNHE zc3x#pj566Fz)NQrmO+Pi&&V$D&~u5_M8b(2&xhu5OutVvhSxe?Z!#l4e=KQmHlcOA$OAg=|EqlN{1_HpN2v}4oOLK;gRKeK; z>J0aE5A4_wc!Cko*F$~M{0A+HuGVs2AL|vteE4eH<^`TtnJ@icq|y z8O)0r;ZeV++D&>}O>mR@3FanZ-={A{{Orhy>cY)BYa?q+Gj?Qu%$>e^nLS!P`yzku zj-IWJHV03OKQ(S}eV3cNF{UKCuw}8KaCgOY+Gd-Uc7;m^Eve%GkH$Kzt}DeK>yXx{ zzSZ0S&AjYm_N59HGsUrUB%255o~HAmx|yZ}#P5!M`>U>>|JZ-=!_mmi&O2!TH^Dh z2+GZ~>Gr~5G8=JYd;zVYWpk$eLdOikHt*z*G5KY1=47W_UxeTJjX{$MQIlgG#5)DO`8`da% zDHhhVTVH29C$nQ{@WFd^Uo5`PeXTCmV)wvMqLnojKrKb|x(HUB;fv%m5naAXl@EI9mnfA(7S!=1K^~THnHs ztB|;#yA)pqiy^T-G!?N#=%}0B~+tTLVwg2m3obYPSdrW^gK>Hp8LfL9oDD4Xz z2k;7J8N$;9f8K}%@1&UTkY)t0f3*g~x1D>mh(fsc8^xQs27-VaJPeg$ zUqN~atpVQS2hBzJSwiM$pK!-9?un%fXW3WX6HXLg%Nbo;{uT&n5csH^ngs(328SYr zLvd?~Q|Gd9slfjP92Y43s%KyTu1Gpj`po_@7909NJ_?Q5}kz+k$8QqEd83PDN z(QfeptDJQdP&c(C8a4jQD75zIbOev@>_4N>xBoSw{Lik8|H;22hwu_?A$O|4SK{|} zmr|u^_bNGtCa)l2=^1c(KQkox|4Q>B};TA0I0$PyN| zsE9vrr`)8Ul$`CVW4I}!5hsm6mWnIO8>K4HN4&W`vLX` zcuD0T+|@-qpS}&&(<>VoSN`)z(L3z#{l@)szyCa#-|uqrzdmBs=p$@TT$X)E^=2Bb zz3bdBfp1aE*8AUVM&Z-F?>N*EZSf2#A6`=!y2Vfc&yxoS;LHr=%)rdRyln9P&ZedV z+k;MkqXq40xHxfT*ju?LMQ86$4t80D1(^!74-g=-Q-t7hucg5}=wLlWcRbX=NN){W z{yr!A6(?S}=KwNMWS;M>+*ckUtpwlje#`3*+zXq% ztgZ@EgInvXr2H@E=4c#QyWe!HL*=aHnnGT*#^=|0i6eBa@q_(*?P`Qd~|Vos*b!3PKUCS!8ig9DU=ntWWQ z3AE12&$msFog*s8d!59{Obc2{Psp)T`)Y^t5k#Nbi2)KIFCOl;klZ1p(%qWhXn zj>KRKoM`O?*^V!Q9CIsBPU$!^JIg}M`>WuTd&V6hAKUB7$7>#Rud%?!M7>BY88}!a zDuvnW94)4@VcYWP&ACcD1K#V$`p2{N96S|621LJmn6?2UPqlmNj>iq_f@>X0r9<3a zkyaj^<5%4DJRW(a%a%QU)!Xu-t0S^3s?Fqr&Z(9zD}!wXuHD~Kx3*d>m$rGNt-F{+ z_-SuS#h5>@+go+Bs7&ah0L_bd#;PI-sZ?LE-+ksGN!CiS-MR%aG|NNxlG-DOFP--> zsWCcV+aBb8rWcL&Sv`WRo~=1qsKRaUv<|$uHbs>yGSI-S_1V@uVDU>aSS`He@sFcQ zx>mXz+0hQd?C4A~aCPy+Aawx%x1XpW zU6BmEK__urM#xj^3wUh_?7FjnDyn+2FBQ`+00;NJa!F3J-CSDl=(oHrZ(I69j*#~L zZEYYJEkS}Wax1`R=W(qJ0N2X)f7+&nH}KRMF92)9gK|@*lm4Uf{_$+%fBDvbXg!{l z`@ervl{%X4nC!KpC8C@;`*R~rDXzGoW06F3w*)@X1);Rg$GvjkA$t?(Sd)Jcu>f@# z4E|PvK>!L;4xfuKYaA&q*w{BzDvN$pv^}#WtHW&k%|qdxX$fmjJIBh_I_v#C-YJ>N zIsij{e~@1Okkgm_O-?_E5#jE$Lxg5!kEoq49vqmsbRgxvpD)y>JzTuJSq`D}cf54u zd_B-m0BpIC2}Br+fE1@4vadkkvvASz3m0A98|8~JEWo_eO1F(Ne=2Asn;d0gI>MJO zIH(Q=s!E*#g;)=nsf^f#(2?rYF3=;FMBP1lBKlNOPWA@d>f$RhcMPs!v!ES89RCAb z3e?)boA40Y5&r(0@DU=sT6q!Mk95~kWu{hXpmf``1wU=wA`J>MSD9fz80al z%?&o^+*~&pVxbj~>Aw2`A^T~FBmZ!OSi(KZ z4{j5KxT5OP)Umyuse96|O9$tm0vj%1dB%Je>S}%w2)R!Kz08hGWeIRx42E#?13*tU zfDO0eA;}hWVrUGlYn<2Itgj_MUb@u zR+Yn{-=R&13lDgFcXv0-zNF~fyX~~Z?&CSSPiI_C*xbj=>w|eJ00b{wj+s(&fM#-e z?n8%}DdIjhcq&p?GvTA*kXpU!dtiwNaSYD%_COsbW{R*=B(uk0HI9xm_cv+UzQ(F# zP9msJy@ikItmU;1mT);$V15b1G&!%;xAbw3!g4}|&C}4KjCfU8!XfrBWsu;~D;xPK zrfa}D=-ray$U0Ab)cQB!MbgyV3a8C>v3pw692IxAt?={@7I?Y}1q1#A?z!V4&}Zaa ziswOLdx7eJA6nR}Ftgb0^u|7ewYIq`rs>H#dn2pI!WL^>_O|7(8-`iRKLp)208pHT zR2RKgoSOUNp3Gw+M|vN%<&&Mh*S8S_n&+`}kcK(#QP~vbFYRrq&dDQU*FIYQ{2Z>m zZcR8$agAoX;`oXE>2A3B3N{nk_(i9C<`x!yFk={5>?QJgR4L}|i=-NnfmBA=hi6ZO zn|#%j0(PqHL?w3>$k5{G_j-jFmA%RJzaGBy^WMSh8b8Y1)CjP|Xgz1Ks+3Yqwb$$U z<~(KyvAo3ZG=_2iHrO48Gllv%Z+ZPE) zHhud@kUJNxZ)K`_-mY=UFQXN@t$vHL1n=tBDQwyDlJtD`xny+f*jc3y%4c!apM!OZ zFOAIP6_wI5zR2zR>6&`(ar(2u`zp_vS3@_ylr-P(HCvf}#Cw%;H1agH!r1sl@>q}i z6@FU@tUS`28hE!#p!#0n?Ixpl&Szg88V*TMXr7@z3X6Pp=iLE^whp3m+0C`*t7M~| zM&La*To9-$cW6k`^o|cVK_AP&ms+^3h?sph+Uq~P?~8Q z=f1G@ufBTN6z(XRq}4U;sH;BoF3O_6<%03t+)AuVU*UoE52Ri?=1o0K>fLQ`5_I{E z;3^BzB2-aKv*Fd8pv(tDF*8aeMNzKV7+)YOf%J?n*4wAk8kDp$@#a0t@{~T=uf(dy z){=>uX(K0}we9=(A@N3Il@s^%C5FGq>;{N-x4e*N_Trh#2#+(^dMTC{Pd!^45;%!- zus!~2-RZq89Tj+1zv*`Kk_jS-(DWL5g8GHLL22=rK#RONZ`Gyg(O+Y?Wo9?XH7&b* z|Hx8}pH!>;@)eWl8YgM_l+rUTslw8?rItRCypoDk8u2)8rWK9wst@hVwOO@#3e_l3 zRo~&DoM?jM^^#a!)K7o}DX1BS>`C~YTQG^jg6gKDN|6;OL47wp>1)y9(9;d_Mk}nZ zXPCqbRmBM>{c5EsvlTX$3Nr=EpwOYE@4(XZ((rurWF$<)2F2|MhNUj=UkU71 z)5VH=t)E}X4gSdtkBiCinMJt^jGKY}xC2*_U&gNUl5m(ImvxCYt6ID%icBwUHaoKP zvS~-SinA308h4JT&*8Fo3Z1_<2uns&>JjLd*eK88Do*Q+@9(Y?|Eezk;RA&&yrdwf z%UV`_0(dh1f+Q1o$E^jt^M%9&m$+Y5rg!tPVrGj z?Dc_Mb|{Qr+4Mn9VP3cPAt!o6Pq2`$S_yw1I3VKq7y4g!vpTI{*+^1n9u>1@~i#!ht3Ynb8%5U6i2rC?~d31 z$yuTQ_T7a46QYhWFLVAEqK9%yi#sc zVp{z;N1S`N9pNrcqPMi=6}@oX89*}3j4`g%9W=!}QQ%vE_DOPm_uWjrrlZ5bwfs)a zf!7ti_PK0nb9v5^88TbYfEp)c&Mu*bvV&vl|DYOjWZ>5WK}_R{Ucj&GaTOVa-a#z& z^!v(VyYipjU8N}=jJj+z7@nAu8#8i7B_- zO4pgiGZ))q=qsUPK?8lxr<6P;L%+u*g6p6`h#wt7zE@X|R1|wmS#=m(A9uUIr}3%u zwK>8YU|_35^Tv+B-{SR?5}HPUiX!;+NR!*NrQ1x+&@tDoD|03^MVW>IYqac<3l8X+ z=`Uya9Ww~Y7NT~`?W{>CbUa=?!s#v>$xQ)k0kgH#>_r=lG*fHRC?c=Dj$p8V}9 zD4?EZMdIZ?Cu&LQ$R%D5Nm`kH!}s1y9m=-R4Bx@TaF%2No#%1`YCH?kfd$I>vpsxC zbrNkhG%jx&E&psIm)uAk-kSt9p%ZZMyne8*BgqsptDc=YkO~-F{+tnJG><0v@lSO$ z7g9iv{iEuMWB8(u3h&QlUO%qHjV>D5p<+Tu4ZU#Xm99A=bQh?Bt3%6{Rf?4!O9$wd zjzi2Q%pN5bp`~=XUx`W^W#m)}#=a>4KoYnu8oc#98Zx~Z*Km|NfDD@QI8QxQCra2f z`?>7RlS7H*5bxc;%BlixnI5j+(6wguKZu`^z14N)IxafzDG`Gml1HDqP}8 zH~0GRxtA1k?RLa7YPnr_?PPFVz{gZEl*HxMxG?p7`z^mOP9bPg0wmews zg0lHgl<3JmMP{+Nl+T<|F7eXb8lh6_EvA=Ku6itAg2in8?$MT$7_Sx;y}9SfKChck zZpL)IT(6*g-Co;HH&Ez)vz)0$$(7^Vw}eKE*ebCv_hxA8tt9jFWA}|7PnYJ9VobU^zIQ1Zf74l-*C~u`mT#BE`aV|Ks3sfS zk{$R8cLtYJT|cNySX3x;v}sJC=G@iQ3C(=r2AD|i#q!mkbGKGD^(ytf;oMa8 zs%dHG?n;lN_kN=`i%d;q_lgEtoj;@WGo*EPRZv-0;0Co*Yw+ybCT|SFext6D8RBD@ z(eH=~qZlT!-YblDkT-nKPJC6`ZQ*%p3Os80vK;_Dfa>$ z7;e^s2b%a%%UAzv;|(GU!>xFh=YQC9P9WnGf05^)X2TjTZhq=1VZ@mFjjG4;I=KL! zMsEnbH8mV`T)8*>pPp76!+Z?NAaA;M?8rWrYQb9XC2p;r8|P=3{`AWEs!#DMqYxZd^)@K%nXA=tsWa(TusE~K>x6>+O{zD09AFwk&24Wb(h3fWui%6imql#&5s81h+-@pTdN zuiHHZkbm-r!_!x&TaXt6p_9%yV})H60NPbah~mL2b#4ERx`P?QnH-K=a>f_c?IB1( zx1Z!5%aeim1dd=K7o7IQ^E&Cn_hFadRvJ=Q>J+XJGrAL5FTFKWTkM(BrFwJqj*aQ= zR@FL*^=mY?^fy^zqV31eyck|yZeKXwVq~G4pZYdf-2RH`nSq8L`NYm!w>+E2<|>O1 zwPY%u4Z}1qm)Y`Ca9E)!<_iARK1plk8mrRV!J#{D4qjlYbq5{MtjMrWIG&Iwepfc3 ziLEJ8TKqF*`=?Peh$577JH9Z)!%UWsoa(u3Q=5p}V#FwwTb5wPH)8A3*+4Sb|BWUS zIMm}{?r<^b+~XvJE=;G0aY^G6;@VRfS9Gnj0oCeLUOGE?y{FVB!JCt>>wDWC;*ibe-It%5+B$i-MQI}Y{@ zWq6h|KaMaz#jG2Yc^dEfNrEIRmi4j{=~f&rT1>&wemWefeqs?a84)uSBDuQt)d{S& z!Tkbd^$+f=tq00lie%fes9=U2~OJhf8;8dm}+M@Dsx$~C3W{i?U0tJ}%< z*37TD%7C-@%*Gq`rLMMweO<3Vj2tu^X(;17_GW>baz=&{IuY~XCO78|456)n0Pj2q zW`q6#gW6z=;L{Xg2Go6-Yu>$0|10}D3Ec}~$+rAZKK(DTT9G&hETHPl*LQWs<++ewPWQ&+HuFhsnxz>&5Chs#i)k-Ko( z!%p^>{YI%aLu>a4!1@~WS_za2A>5*Vu1hWU3>B(E!jCXKpvUut|I)iK`|~fQd=tME z>5p==;TDjXB%8l8y#M2%egCpUFvS9J1P?$9C2_lY0geFj!92dRd5!%Cj=&?j1i70B zmF=0OtyzJAy4`CxoYUSnkc+SQ{kB$8IF%0d$h9*y5Y-; zq0e9wxZm(oX1t*TOW;XC-N938iuS=ZLQ^dZQm(G$=xGL&JY8r|8|_0flS=e0dg`Lm zS7q*5tN1fu_|oCw{dJzZV8UFG-9-lh-Hw4^gFeeVhI>JV3TF~4K3G1mGAw`=&oZP#lU~i+craRm^!G*$0wYWPQ_9kY+dP6lT@6gb4?^`;} z>q%SihX<#}+_%BrdL&X6#s`my-CbiXpg#MdH&nQ!hhx9Oc4Fmosc_j>)J;xVe)+kb z>G4-9n)=PJIq&&Uo1Qfx@|~VK)a+=}8G9JUu@Wx_Y*TvaXL%>jbgx*~a-DBW?F>4f z;0yU*gjhEEnOa}#7EHYs_j-%TBb9ZQJ9`H{+fNORwSD2AR=dPV$gy|4@L~VrpRGGy z#pB2)9*bnf5sy}CV1lt(x>lvW5igsre;q8`LGCGQt&X>>-FlT@FkzjAicK+V=T#w> z+f#h(?OK$8{M9C&b^qJK7AfBYq%=?2BLj1}u@t`?Geo_|&s(T!+fxAsa( zWZETa#BcvD$TTQso@>rJ)OY9m{s=lr#5Fx6z_umOC|q~(B`vww;9qAm2v;Kc+{LyX z5?poA;BI@d6f0+I==KLHy+^E_vqNYS4<&j(3kB7Dmdmi#eex;dM)x#M&rTy)4!UCn z`FZXS6}R|Trh^!JZI=JGnL z%1kSR-HSx}ucDaiVtkthpEL;M{<}i>-vftewl2N?wVS|sOL>!=yr?@tC!c9+3wb>7 z(qmnM0+*pbw*WHgz%J%rjGi9i9b(XA$pxE#qtt=ogNhRYA9LJ*ZZ9u@)V~GqpQrSM zw7Mr5Gti$JK+m`bQXi>y z0xu1P)ZX%xkKy>vtO%d{^GCI2_;RrwQg1cCzMIYL!!X1Z|4>0H2DbYuvxR#+>o?KE zJG7f)4^7y7YkSh8LwBStks?2U^A14K2|WFSSrB4Bppm#t&0%q~+M`D7ryJEL~{$yu( z@W~AaH1CUW(~P&Ul2?n`09kVvBp)rQ{%o!t;AsD)=l(A*6V&MOlp0gIv*J^aT3S2kQn12ul4Fl=tD?LzQlj?RlmUV{zozX?7TsQo04%ME9}oHAl8l zbZ4NkT`UxQ@r5m2R3w`aC!ALx&pPggEA`-ngxNTFB=Xl<&Qa$83_ct!R7nK$By`)hQg70 z01GyZ&;to09=dWjgc5?7o){ijRDCw2r56KXf*^)Rj$1YFr&vwMzM!D*Dd72S@juK3 zPec`6j96PV9YKou1$Xs2|4vA|*~WKUFVY3+FR+|=Rd>hKt`ea#|LFDK0$*Hwxpa#w zbm$Hf9uRvHRS~f98_@KXvJF5tXh(Gl;%+k^X1272UXeav=;IL6jB2ohLw0A-USuYgxEUN_Vw5fbqUU^f)(C99Hoig6@3acY9;HsuITRN z;~MC7Mgcs#nEC}hJ(-N~YO1`OEZ~Eq$@J%fiH{S0xRdZ1V*7tS`CmSP8U(C7r+-@a z*(sZ{&E`YhlJRy4Y<1ZsZ{tNjzq;=K^5i|gyB;^s8?Y7drNRtmGjvWtu+}NEfb`vo z%mZr<6Ddu|YRu4(0R-)5ta@I12~k`FW&-s+v3IGnLfZRp9@+lYM6N`JU&2tJ^i81* z4bDC^`PM^|uk460{c1f5sZWP3g3C{L5@fHSI6YKksac=M&9|t*l0`l5mS9d93cNd` zwG=!aia|TB*XfZDx^tN7=3#N$BKdpOU>WgspJZe&KP{Y>(Z?vE>4H4!kP*T+;UHwn zm<`E8oTDX?tq={jLFe7ZCM4Khj`*(AA+N40$)}p+diwH{1%nk9ao9|aHe>>!4o+Y{hSdg zpW8$hQawW9s%7g;n?|& za`#gdTl2F?{?@`(b$J2Q`qgd(Q7T@Dpz}O={y}K8txns)1-^BDp&%#_3XQ8 z7|Lc$aWqQ+RxwT=vNSSuV?DXMj`&v|4^utg;Yt(-X;%*S2-A%V^zFYj)UGwi<74Ri zJpPTkU<;4{%X!N!iYboF(%=42{Nx9zu1-vG`iWwUa@GswPsGPcx<_A;Z@0|_%88l9 zYnko42Y5GP!yZQ^GNLafwOGV)pDG0=d|+gCto0smX76%#xbe$+I;!%}CRrKIlzs1j z#~%D#6``T=^{>rY`NC|Nf{PpWFvLu)`~czzk|x;P{(R~?ech>(j_I2Ug&jt_G9@l^u}{zoK}G4>G`lcw3XuAVqnx?7`Q| za~|p%uGSs3ba_a;sA{deZI|>+jHgJ$1~iwD{WRHY9`483yOo+0%319$ zFT5|{C`n6u%V#kM3Z5IVKSrY`VwM!Zxi*pKOgYTd+L~iHi(q45T|>wAqPc#iX!+x!%_+4+ZpA8{N*`7y#n59&= zcibD z|D$I0K!Ild^>>``E8-OlV~-MMwiwqhJGr%~p#CdvNx}B+PLI9T#~+T%TbpVf&@Z?! z+grF#a6T&Mc?axrDuEr`{-^3AR;HNU=fn_)Y7HA=r=yC&8oVYa@V+iJ0+vB_5t~pO zDnN%~R&56eu9!iu0Gne6Q%Zue!v;T6dzXIh zI$?-;GMc&VKzTVE~ z5K0qJ)c(=s!0R$+A~L01EFThkkg8LvCa%*ex2Nt^OTP5->+8cpDimkyPy&fw&smO; zGS(?UX1_sSju}aqy0ItJzi3O1oy?<4uew@9g39E+2?Ivbtw!8!q(qU<&-I=aW-rI| zz`El~pin&rr6fp~JV1HL>l5;bf9!VAYbc1xuy=yDGjJ1!jIvkaE%Z+xo5 zRt7!wzOjN7TCUBzUkz4)`PgYG`5U?dKQ9$t`d=CFSwM|~uyl7}*=%~I zFzLrQ=}F*`a0nbOjvBWdazK6Me;AV=6JQZ}NP#>qA}L%VsXg(Y+4&`f+0x}DoLfw9 zhiTXI&GLAQ7STNuCtf^Wwr$5(@z8A4#W1U{EE%5Ue1wnM5uXtDa;sY~L)D{Pb}k*8 z#;s{fyjUd zx-ba@e8?fy(n!3!w%Fdrjc zQHO;EpB&FJV6lmPIw>W4?xA&OXb1SZ)?SC+$CnthUo#kmw8;H<^pI!J%A=SaX(e@W{+yO z03zu;YyxCT&R^!`f(OM=#{kI*g>|7ovWE1)pQBEgQYSNl#@zgrx>;t5}w!^%ny%HHXYI<qf_~Tx{9*bv<7b-Kj!-D`oCU%$=m0n+tSgQC>0wRvq2- z?MF(PA%zLQoPT_07@v5b*&XgZI#~UyNL21O>XG>(W+xChbGx?Wbu-R1Ta;pv={jV0 zCT&d!K5^>@Nqi!|EK;lm0|Tmk>=aDTwnE6T-{hV>U*hQS@N>+edpA_e1%-#sJO*>i z;ATOKTn$&@OEdsEkQoE$0V`1|Kka^-s$y^cxSY7GsjfIGQay6K>na|0HD`WTi=^mG zYdHT&>5S7Mu@_3VRzeG5BBa`k96HnXKq z#UC2oUW6@Mq#gn)=?w`PDu#q8!IADnU1vze-XEVeOYhN%e0C8Ap!^QvY<<3k+TsOg z$SM<^(g$>vRQ+^^em3#95EmBk4A*4IZ&ZMj<84+7JGm-g$4y?<{=^-YFoOjWPE6%0 z9xGj1m9_Wy-quXzABkVq?vfc$TWS}ljv(!UmTQ$1SO%3%!tJ3T8*$!A{SHIFvN|Re zsn*VOq3jR#PP8i!3(?Pm;Ji%`AR)_r4mn6Fkp`OwGjlb#?4b7d!Uvv;GGmxsvab$#y`eR8x#=jaC22ve|Wlv|{TD~ngw!^`4% zYC?x*3d&s_i{Xjzv-K&vbZ1BPG$eceRpgAMkpBtLcFT}Y@1)64L3XZp=FhL|JXHMv zO=`nIZTo0i22Ye06h!E_C5DTPVmAqS zwmypW`zZ(M%GiJ;P7+TkFWf{t%K%g&eiR6piO`=E;{!)KEZ+=t>t|#}>RjuGzN6x0 zd0&l|Dc^dcgFI_fLqYFhoB);t1O)QzfY>eYPB;P$`d)sx^W3tzJ8eZ9si;q%A8xze zyhvvSJ{l%2p1()(bT#pq!*mJFJ8T=huqQXHb^ZC3nG$>z z={|m&O-r~)I$#kkEJrf@@OR|zd;RB&R_tN+NHi_oAM~I(h|K^fLQM8G%n*~ba8*ml z-$K8oApf_|TfTXxlX9+VTY_uU#CW5ri8MF+e`mb*FYBwmlS9auxJZ93R-*cW@I$KS zydM(_*%DhaQa@17;W$wV*EkNiRE4VS)p81G5@e$e%X&a?QXrn5}aFD1XE zA4UF22hA;_lp4cNS>vBz2VMNxK3^ea&ExJG@&{wYpM=bFcf~D7GSgrin>x50!6{68 z__B2MwN@t`#58w)dL;1p=)V3rk+Io6Vm87KgW+0+KO+W3w`OV2aS=Jp{PK)I{cTLMcGNj6$I#P`h^7q6m99iT!Rxt4D>!Kkn<8-t`q7?D5JS zJAQ;$j0N%bogd7VXK7D!eI}5}U-5tc_3o+R;^v=qH#B2mT;{^YYcJ0ph1##nxs$9? zjT9G^r;s=|BY5CL&m4YD&X4(uLCX!W6q`&7%^$Nx`FKC_ zlqq{J1oWxw;2W(!*{k3T9^mxm`5QrVgF+^rMBX zy2O;U+w?6)d#O!7AtIPF#O8ofL4>?5=Urt1aFu6exPCZk<3;rylbE3qghjW{&(|$` z6}W%mv>nVszY@>7E~>Zq*0Q&a8XJ%-d1Pp!kIPl2H*#LPgRV1%^I85b3pU2`{HfPh zW6V%v-}JR&;Dc$2v!yrhhq%XUHlC^Er@U)9a{&yfZ5LACIlnRK@w7H;9}y zdv`y21g!OKWVZ{-i^H?__H&4@9OrOvm(@bS}>W-`3YB9g1h4L12?4) z=VFxV(EARR%SnXA#YAJ~W=I{Fk5BbHDQ~(glAGMsoA;4-KfHF?Rgu!^-OPekv8pXO zp#M^c&k_U(zfm?IrS-3JIV=$!vu60Z4+LVPBe6yAH)>DCoc=YjM8IT;fKi;~n3;7N z=w~s>{Kf?+)Z%X_^rTn{R?&$e2>mZ48_>#rW=#S_MU&zn$wW@A(-7sRheBhDfYl$GPiFp9*LA>|E9?TNUL&wF*4EAT(+H|g> zQ2@&HQmxtJJs$I4z*jjg{!kk*N><;{GUGPngJsOevzYdY>-n5mM<@SM6-WOl4Nw(J z2;(d28LUsduhXGD0LlM}klWtk}G~Jm6DRxEA&A!XAtwVVTd_6LnOHc2K z^i27o-+p)H{d->tjVkPZ`c_aDxDxb2Z))eiYRfz*(f>dGiPW++sc)mZ^;fVgET)TU z2KOL&`V}%sBF%polo1|-vIZ!khcOiNTsE4$6xw|C-Go`~pRCfRt0OEdoEOOgI4{qT=Y&f@ z2$u>6I@Agw)nb4U?rRpWAyb^C!nUH2d{uY3CONzvGuT`4tmv%P!~I=_7F;ohFSssf zB;7Pe?a^vBs@Bqk#qs1vj{2;RZ{ngwjs_UVt^q z@DlGic+16e%pGSNT60=uAoK*i_s?){Sv+qzcPW{DP&pKnpM-SlLRRHP1-`SHNRh_= zJZmUWLZs9dsd6V?LDatu600~##Pu|9QE)fmCt-O@4$!Ard9K{W0n|e4(=YT|Ca%fl zFlbYo$aa;Lhq_54iaxepy5O$O!9gc{1`bPSm+$Ot)FsgminiK`eM`Q);m*g&h#Jef zpgSTJ@SV5N?KWR7cylzMzVJB#_76$|8N53lERq1YR(nnYSK1a?J$c)%K>fO)d3lY@ zE6I{@=Q6kK_3aD&7N6;xK|q~~$+Jvoo))sK|1!yt_1OSF-^$hY#c1;;WO~)5*(V!{ zBvL-4Y>1$;t z#C9$F=Zhx#7PQ;cqUUdalFz66M>vUw(S9zvvD9IrdWa3_oNcE;omkyMh84M+ctqb-7ocaBTS?wCYXW+@x6-?!E~Hmj$<{bbB0n6sx`UD6dr1S#@pJ_UQwT?a zcbSUOW0QUQk~Fe${s#2bk73uZ$R3j3wz-0T7`h{CsLJd>72pSjGZrU?n%d4uELb7- zZhwpGPVnP&6o?d9s@ZwjFWrb9?f;E(Yu?2-gNzGN@0F;lQ(TH(ynaE3}ygI^r}a+vCBCr)h)NfC0w>XN>IE^BdpWF?x0u&)_F77$vND zZd=MIW=fz4yqdKz{^18!Dwq909zWqqt6DQwh%iN3iW-+94)w>gT1 z)8!--2stL0SyZ?CzgWLBIPjLX2C9`vx5M-OM*TV5!p64z6PSNKrNdUJ2j8#|0*CJX z1b;}wf@<%1>m|S$*MXFGusgZbX;l~7yLBsxy<*^nxGuL&roE%uvC~Z;+v_U?!R6?w zEhEV691vwyVtSr?d2;HGK z&Lv&NTgoJ_d2*lJ-h}-{&0%4~_Q8oB6pn5c!zlTh}Y9RR=tM9cYD@D<>nB?)E9cVM$JGyl z9WbIQb9{(18FIcy@ORNuChYwnle|&UU|wBUnDHN)6La>coyph9H91k5t?OgxBx5+vSL{+OJ1T_ybl=`S zU-pep3Sx_Pk$**66od}3HUt#)MIduNsV}II3PP`QBbR&}&dV}&u#-yt5TdH21J&ee z+xb)hFI`YnOZkl&>*yZktSX+ca}hq^w$`*mZs`e#Swu;xKDD^1b2CnK^c3MW((_DJ z1Uh(U(JYpk(V65h>Y6m*INC zh&K%!3G`xEYeyX}w?UxvHnsy#sa0PN8B9Rk0Mc9upKo2O8`#_5jhNoTWtU|MZCe^G zwar&uV?7w8`nL%5AwyWfEwl!GkvbXxQ!!Vl;^*kDE}rU--XH9Y3o%f1{&IK|K@;xz z71VgB&5*8a-KXC1rbzlhTdL-Z> zfHurL!T)9*p(ee+9)|6Lxh4`jtWK7j2OWpq0W31j9Bj_vM1-UV{*FiI6?zg^D;TJ+ zcfj=u*`M7=OYyXJc93U&?z4R{Tq3||TElR<$t^NSd5i7vr(L3)8C8PqLhSr$7bym8 z8gIw=)pxup0viN-$a{u3IS$%qVacL7oH*gfsS}|;#JPG)ayj1m>@vve{pXz?_>xHh zyRIda5!-gnr|oIMTMpz{>}~T^v5$w4tx}T3{R9D9TulC%^ToLvri#gTj$g(c-LETL z)A>)^H##96`UC&fxlTG=E>86O2hLS1wVSrl)TuWzgVsw967&Qt{u-|Y)C&}G(PIzrpXgL3BSqxXqwVXU%0j?|YEZ>as zIOuO_Pcx?cG{M)Qt-y&&DpTJ_8UGfXQgJmWv+rhzsEn)0|pSPAmEU6wq59Nw^mJ_EdAhAG%s+xftiR<%%d6z2*wi57JwPV5je7e>6o<+8`ZavoLnr%iFWxM8Kn=B4LEayuY__A=S&%-~aRfWJ{21@{H{m8e`)$;ntsN#RbF0 zBckCo%T0bCGe#3fQ`sp zLuk&YzK{(<>Xo=Aj0l27wM(bY8eOF6%J6*{r-)nmq3KBsevE{kE;nG&>sbQB69H4?cB^xP^kZq#r#~L79^GN-2 ze>SR-b~2RF1HOf?7>YSblY!9dSSfmz#-rhbVMEOKjKflt&_|0ORr`E@xhAw*apo#- z&`|g>8t}}?vvdCsdv6{OW#9LWj}&D|ibQ29N+ncC38S2noU#;C#8fI_JLck7K#x^mE_o=NN#wKg_Kkw&hMd z;=-CsV6yqM)RfnF6BNWH1cFHpR&kHnFb5{_gt5WIim-w(`KVDJ>Gl}p0KHIsckDye z5KM71EA&1MA24Wlo*E^*Bc45^SJI`#cu@2>_QsYVoXBZy{2dpw^Y*91m=n<`Qw5t; z?b8JXgTgo;0`xahhU8dj^t#O9?0&;VrD~Toid-l64XzU(1JDH%W7SUrMJj0*G!F>i z?&T;2voUPfZ@2_C)U1Y+)xzCsWX>6~*w-VGZS+ef$3t&^q|SOOm+%nh`&W0u9#fA2 z)1{WM$4VTAz2Uey~{&U)Behs`pY5kCN4{Lb3C~s_#>El1DqlM0 zkA1_C#>Q&2e1dJ(#NGe#eV@m$K|eok4j;!>YwDBw<8`vwS(8$5alUlznp*ZgCw?W! znMMhi*+UxZi5hSEUeP)otD6z?7VD2V!GUWW$Iejc<0(S-723qB*tu(ysBf{?S7Wcs zVXt50zV7(DI&d7HISivGaV>TEU)kw?HuuMxZ zJUnQFY1;T(%^(rOX`rkrW!yYcZpV@}$l|^YY1?_vDb6)TF4Yr0rowMRHB;AiuNJTRIi=NLWr!)BLXP%+ZP7k42MyViQ$f`{!CJEgL)YhucSv|Mkan#~pQKPde`Rt`1qQSQK$aKYK5-rzXx%8F@NNo|^LkX;nu8&Pfay zJ?tH)&7~aAO3Ms1IlVx5+4_W}uXDV7Jh}(KfRuHcK&h*sF_Pi>y&=W!QBLyu04vkU z{X#g!WjKO*MVy}sEel2-%>!GnRR`3lr*nUYmun*1qDH}0@K$Qd9j%^(Yp;W@wssP) z5tGq(1k42i8*V+0MPZ3xtZfBVdI``=859qKQ4z#BPc2)1S1|5EO+`bw zqM+uj`-cIifeD|%^&4+ZwLht5n)TqaoZX9=NsJ19v(`S z1-qX)2_X}tm@qbuT#KEr!#ZmQdlof_&L%MFCXB-Uv|MID><(8i*9{A;R^>S>Z+<$u z;to;J%Dv#oH=KkVq^_8N3865v8t(3{MT=@^J(3_({CZLG`@x&E@ru3;Tggob^oS3u;#ON{*Nw^B-H`X$IlMMHPWD}_ zFRo5%Ew2H~8C%}n32b?_q=WC9B=M&C$B$fi$5r`7|!=!eV`r@gHhY~#uy3= zK|uytkRJWzvF*@~Vw_iHe`HdNl{ayBs#)HPE0#lQ26(V)j-4@|`?z!@;4QpWVlik?>F~=F3zfgUOOo-W zpDBy?Xxtm3AKq`{XjQBbc70yuAw~KA$IMa;yES~`PY~XOK5c19PEImXW%>;V;UqlU zo{%->JS9K^wJ-r;4G-4Rw=)t!#JLzH*Mr+wJ0U+jn+=_QQ{Jp)ex z%2J81NZj(QUJ3Mq{3$=>Z@8y0(unUM$S@2K?Sq98hj#;wu^cJLP~K(7yPYM7$&};K zC@)~Tm65WnQu4{cF0z@~fpZ-7WObKl`yQc-kE?nkEGOi3UEAQ)D8dmk<}Xj+w)XZq z|E_`R-my1>x#iOa4Im?clx(OiS}Ei%szMZvPuo~1lX%!Swoa*E0kWfRP*M*yt^1An zj#OpbL0)qOStMH?rqS*ysjq>20Qp$f1)-wJ;Rk~EyxOTRREH7FCd8v)DW-y3bRVEu zv9yQpC2xH7=tt@jrc>9`40lJ>)iJYW?|I%K9+{Sq+}d4`koTCXAfM6;31gqh^||YD z59fzH(pg_NpKQA<_)O~4wJTTvtFQ@%N&YiBAO8Luzd089Z~pfqW|(JNN^rza%4D~U z4M~hqY$(e(Pms>55S=a})%$79|Me$RLHW2}8ikTG?_IU7K1Z6juT|qeZdGL4@LT|( z!dJ3%y|pUY7Zs7gR|!br^t_LNN|ZN-sR&xM7L~%Ll+0E{7|Uvn2E-XG!teqt!^lS! z25m765rbf+4@eC`GK=BZPF_kXOk$>+L9OE}&BE6guPmJR`iVNY3jYnwUmU7j6mmKj=awAt{cYHwAw^1}-3S3&L5GILy5UuGOZ5JN8i; zy-;nfrNH3PAUotczZWS@d#xr0h7ZL`6g<@~Y6&xG#Vqz9%Gdd<}eb& zY8(d(GlK9Cc2;ggjAsOjnz^@@IelxkhR}zVFK?3h-g$%5jfNVt=hvUKUc2PJOvqD! z6_>67JhBQC-qw*w51*e#fl>p$jeYvD=2+F*jYxGRsp2juXK_Glx*$Pf&PPh9qGXOY zZBXi}pH@CIrCoD-hnv4d5SvG)>5K6m#pP>AYiBPXb}S>b-KDMo%Qqkaot<*Zw}duq zGDzsFP9E5UV{S(!>FpvMUN{n2BH4pp5JUENeFX4(5!;J(uRY$<=3g7b*-Qx}IEGEi zPvFOTMTS*)+B0QERYvs0Y!v%N^^)XfJxG2b#}3>;=xZl%<{I-S?qF#!Q4iDoPcuKX zZvNcrLI?nfCDaR0dEQP0#tJ0BrxGlo*NW34l^py4^oZ0ZY}rb=92D&BF6P#teK}>K z0JU*G;ze4+_X2Gf4rl z4ZAe&?4r9`^trFXrnLJVzqTvyUN<)(?$zqlP@CB==N-ME^4I*ocKSKMGyr=M6YVAU zHVtXrZWEqwXlWVV{w(xD`ueZiG`C_CgaSh_oI=6`_l&|Cav?=_A5vu8Q4_Hud643m zxgk2rOCF-!CfFM-?V)ZPIvzC<^eDp8BZZx%7;vuKgCmA}1Vsb9Uqyu2FyA34XtdGb zcHhGoL645V6v`w8c)C>+{zeeO+(#5u~mpLA84)> z(z*?_)nv=~-J$xRj(m5$7mWU4L*e{fy_GJF7Y>M8E9i17eMBUI0PLB^f)Tm z4}k|56wVw~d`%ehc$ff4nSCalwX-IE4MOGM1Fh$zF@4&`Yh22@?(wdRcA0hYU=Y-bR`daKJyrGu~>6gW2%t zK4fA_#E2iE3+TryCck_T=Y8Z~|4r|(_JJ*$_@ah;WL}BjhPHN7&ub!LJmJ<+EOnS+ z$>`>IBRbZOzdhB)-{*!%h?jkygCt`9v@lD_C>W4zsi<7CwLQ-rmAYMQl~Jw+ zGXm3l02-$)i4C^AHoWivl_D2|NmdJP4-101&@%u1c@MfAt+rk@q!gOrBRyi6wp&R~ zRJrs{;)>z5ykEGEH|Bi*?!`c@%n)ITZw0!1$mnvWu)~Bg^{Q8yQ0gToLFXI47Id$%~bk zBAi)NT}mqLcNJ#$n=SNE>^iuU)CtZFTIbbXWUR*MvwUmtxJVFy3%L%?W;(R!*7k4s zoP9CP{Mwz7KW&>={Y?i^b%9_XfrgzmI#?eMaPfKiDWCLUI9dHX;SoXG;}tt|8g{dV z0o8~AD{59%799yft>-}i%+oaDCGNK1hUm+&dEFc!NKtv^q6*e^*G%3Y;L|x2DC3I3gA@q2{ z>slSXkh-QtsLo>i*bpAQxIW6jUfJ@K?q;8!rb9kE%5lE;dkMpW5JOE}#}2Tdgrqpg{zWW$>}^1D@+~C{{|PZFO4-`Ol^$`ySKG37 z$RMuz?4-F7!ORhJP6FHL_+zWaHMbI;>ByJFcgxPw8+}Y>)Jd-ccC@|=m^)c$9TYC8 zicAekjHoh>d+`^M!lVYTdT-itIq|^9!r7^*)*HSqq2OWSntTft=PgxDUBz@5F{x8h zjJ+Yrj!m;!QewyZ(vFfrXuIL903uaWXB4V=Yzt_ki{2AJ%Tw=?Y>~nZOd@*0GpJXO zy}2>~0fO_r&NCD$KJNbW`Ogn&|}4P^msPew&@11Kb6BnDCmGG>}f>mk2&F;`N0GzH^k&qq=91W~g z9(zsi`%1;$Be2qfU={&e`2+N*XIQWGI+l*NQVqKFQ}4R?rbBy`#BUXS=-|)9c{Fix zAi023BmkT$Bc!;Qm2k05hyXY>Y}98Jg;V$*)cL3Xf8t6N7w$P26Ad$e;9jqd<(+j! z;J^BprsBIA3;ke8_UYJ{T|6-0-Rkx2~vGEf#{qK}4nhkk!%|dJs zqlOfIwfgS+YqsCdhx{%WJ$<(knSJ}!$V1%(&ROS9`?>6bAw2tv6q(}#bb3Fga?nD0 z75Mx9`+&KVkWuA>oE{!5fhFG$_^vec4~3b8kA;blYID^9WKaMHpv0Wicm>D|OOlA~TJk5z| zFmYefvM6e6Wg^9X&{`6+>(@e5>NV{y&igE&g6^e(cu0XX1O!*?04Ohk1oE;TVmjwV zP}LA!oD)*85BQ|Y5v-S&pwXlVZR_X|yi)}|%8k<=t=3*jVq6E4b5sgy$yR>DLE+>c zSBP^610v@F(6$FMjQQy7)nKx7_@GR5E&{mX3-2PRzeN}p-K$p+u4TmIftC>4=Tfo(WS%YTi`9rwPUA-Qo|=6bUEPQ!io&LVz*gy#um(_OOw} z>UKYY(&$1@*3Qq*yXoQ#CtrWxaoW8UPi_>Z@DR1Z=Y_NH6g-#Giu`p>*;$%`tJ#2EuV16Vc@58=C($jMsC9s(n{ z2S3UOIs@xwPxJ^Xv{)v^_i%%S?gb6E8ue$5u`4dhE;wyvc*0}4vLz2>QI#;x_f%zj zs8PQiK(}Ro*esxV)cn>&T<}lav9{YK05S>%W_gbUI&f~^_u25wI3X}3uN>( z?=NLL_8WbSHp-x*5|+(<=2b0Cx09}ypShs7LiBuOxwj5h_6fpQZvZN;oT040mewX- zMd%R(8f*yw(oITcl6D%d8H?Oi`)WZ>f97sP;*mS^OE8D- zB2gO-bqjnHg3t0DVIKfGV-Ji8#E`$+VueUdT^ErlUf{WjHl%>Mi<2~XDvvO_lBJ)o^K_CJQI6B13k$UyIJSqne!XvA@48ShQD#8o zb!Gii<$WI!KoT7c1JNIEArd&cEyG~m>Kc`a@PUpKZrMBa*F&t?`o{_(c;r`xirW+- z!?;?cuYtgsZ61a5upJ=8+HWDvbMAgtd$RTJi7=66>*Iy)wGxX|C&C7ZU=_QAIiFgM zD3kc1%J#%Bx=y}hJxgsIXgXXFQrIJPLY3tkI7TE(A^R%orLqxdS=FR*rSPv6=2EKGR; zBEU+e;yAZx7#xr}h1LUg2s9_!q!fEQ@E#N_AEmzf`zwB0Cm%vCZOf`5>1-j#Wfs)f z@Wg`06)$=Em5nfE0^1VXe1ArnL_(`$E<=!1@9e9=epkGt1N1Vi?HGZSTn-JJvDO1-LY;q+`g_q0#t!tMB7cF) z+j0C)VP@vSyBM)YbH$OS7*8*mFHbM!khclrlkyjOP0@sdq_o=#hUXP}kE-u!HrOx` zX>DOsY*MG==$Rj;gH~*< z1QH}eW%<@m7K59O!4wk;zPA-{r<{m2V;l#VMR1F;WKF${Pgj-hi0DRznUg)1UE zpp6#Y!r;H;Ej|wI*-xl%6qUQ~zU&r%L$4!ebx%JVxfhhvzb@`Fsa5|J!JKk0irZKXB)+2xnzL*keqZAQ@+1nT=$ZcVZ zNj_l*)2z}9v)yXPDBhGxa)Tafzz43975^}If}Iar3l>|6>Zpcwh=ihZ-g_%N{@CEa z>W=wXEpenX+z%QDxGoEB%XL#|zOtC=Xq79b@iQ&k@&lnz)K|CvR9EDQzvi`5_2O<; z(vY_4E7IYL#Qn_1Rx6`D`A^g9*l%wqTF;GqOHh6mWilB~;xi`)Af2`~0Ct7PvT^qb zFOf_0`}^V|p6K(xTfUCWxbF)w`YLg#P9gd3K;sA#dcz4-{4eppyrIWlOM*cIbDUZI z#FkND6cXdU){k!yeJiL->NHj}nuV1QWwSKfKkeyKzBg|P#R#0xq+}U`M^;7tO0s){ zHT6yBSjJv~CM2n#mt?#Fa#q$;d9%iowhMj544sX&y zb9G^W#ZuBa-#YTJo~y{z02U;1+7$BN(?pDfU@bo8@I;Zt-MXx67LuvQT((bLC<=Z9 zl!=Yh@e4v(lqyJ}bqtwraIQePl`*hoVYV`O%6Laa!rudw&>A#xCP zb&uzx=Ok)9Y;OKNn-P~?2_C;N3BrU`xtQGuUW5Q{3Jsp*MWtxihupazLZJ(W*oTZN_JT1t38svn z7A=kn)?Xl{9vn|AkoSWlSjwJJH`T%se1ULK1kiCPJ(O#Nw&XF|l!Oa5&Kw+g(m^|z zp7-EGvwDEzC7J7qJ%Veua_nXeoHH5#Mvz4;z*Ixj$2^C_8va6x@69NjSzLufz!Z)M zD4cdo;asBJreThq2IkmZ2GK^WS>?Cznv})lzG5V!J}r(JX;K?kwL$i3{fWy5KLZF) zgP+!uI4VfNKnk7Ea|nQOzpuHQBN*d8z2>KOU{JxDE2kt>2DwWH)h$d)MXGY36M(xv zxiHe7nO)YN8l8uZwPl1Z6tqTZ$shk)=%!uS8D%; zE4~ebCPEHpPq}7Z%h*6HJJXmx26Ac^(A6BVjD|+WscSJ4!7k;B2ZcRuRcq$D^(>Oq`I7$GvBG%f>Wu5f8W-$%lz`qURHk)AQi ziHXoO(s8d}T}P|FtN2p;yzk*%&11m?2aXpSFhD@je4I}JiA=}{W8V;VM_-6^S%-4> z7^pBGqe41mKJ$G0<-FtVxlL!jG%0kwa4J#RCaArvOU#@cm@!)C+9)WwEbi0Z@Wson z&4UQAdhiqrl{EMRb}9&%hNcgbQwrDFkw-UvAx!jQ2z)!@@=H5tuY9(*QT>l39{7^2 z=5%`7<%nYf{F1MWJ&xnlgPqns+T>_dKpR%>#lx6mh}CU=VxN7Y)HpNOY<|_Ahycxb zrt=}B%+bvTkZn9Gnf-VVbBy3n?kcLy7{6P}+2RS)6@MPgJwL6Naa3?+15>{rPBq-6 zRVtU)fRtiIp01^Y0Z$}nwg7_wv5k%5Uy+t#$ak+c^|@!IS-cz3II;DzOxIvJ&OO5f z;ONUUql&(4t}dzIPvx_lRSe}?g99e1mQ98Wd!){Z=B`+?tD9e(pRMZlgFY;-h5hYk zpem-2=tu)*ZkZVC{otncl5!cr(8@#+dW<>!*DqUVXOQ?T(W?4{;uq!bU+5O7eas6n zUcTEpXpNtianci7$8Zb!0{RKyrrNH<#PKDrc-H83I_HMUc2{MQ1rD=IN6vR9Z2jz~ zb)GTE4w-W1cy3C(%degeNc zWh6=B6Yn+5x|;X0V_QLF-l@W}?t6?Zy6`)N#J^1i0>H+`9E(+21ol_FqA z#31W?dvI|%&X=2IZx?9IeLt4#=<@tVjb`7{2;Q@`tEhJPOfChC&qxT~egXLo0A+LF zoCOZt4#p`AxR?=Z`QODFYgRB&9ta$Wq}=zK5jdFKCyy$@xan!J|1$1ro?wc8J7J!E zOvccL%}p<#WjLJS(Td8|2LE>&IpClDKZH<^{0O0X-37lMd@9)oXv-?Fw-{p(oWeW% zmLkIjPlkNERpdw5K_Q87no4X>$6J!^Wg6~4{Co@ zeAghaIX_Ee^{^7zhKcDC*<0Uj3oHci51r&b>LREm&}R&Cd{>`QrF+YPoe>FeB5aUs zYq;2Aw+Ilz{G~Xk5L8*^WfF4SI`q+ZbK0iq7 zy9je{lk)@i)zdmfd?0`*3v$*F=X=zFJiHL3AU>rVKA;6D1w4~ksqm{!LI)Pir>;1R^#s#c?BJ+R4MqZN@~UPN`JTsZ^$@X z+y6_)&ycZU24O63Z2Sy8(Dwsrcn^UKQGpl1GC$)J{csqvFN~&Go)ZoX@`piG--SFj zKKAuJkM)1fdNd=Yf6TCUyjmbQF{6h2_a9i0wlVp4K3&UeotHd*4_H7g1$jxh>zM*L zM4GT9=V?0>$HXoq@^l)e_*YAldWv(I)@eM%&+*0Jb9RFW82N=0`$k`22~8;Vn&DKx zZ84)tes1OsLB*l7>Zj&*XWek3EWHxgO20?v&K@|xh6o1-e>5yp9seO&#) z4konW7-Wv%6s%GF{ZYUiUo>9CKqKRH+6MbW)f+0`*{j_&Ugwxc{70$qfr>G|((kV&ylQn8Yiz+}Rm#P z48d?+^}=B-?=845!2-~_04?8%-IfFt$&g@h^Xt@@VBBY+(C;WdOdMF6{6%^p zGfFGE?RHK^{x*d;*X*pLmzMZPo+mlcXNpP4++q^kNF~!q!whbRqXQ;@RQe;2j-n9< z^KgUIFTzXT1iv}4b;Z+Gx(rdpc~a;Ah+L0p5gs??e%@O_k?bTJBkZ4B^rWb0o9r0} zgsF`wkDVyk7{0ove$<^{=3R!mG5;o5xXAQ*&<0;WKo{WWfkdW(wGnOw@!xkCWfNdi z24Pcdch{U$@p4Y@b-UKEBiB88;eg~=@_XZ1wFmfSi(d06-fFU2Z*wyrBR%azl}k-g zXXM;6(B-g6v{4**J+D)5&x#Jl!E_|Y0?+wTj7Q^rhBYS#QWtx?jM!Rm=NP@%bs}g0 zl!&Y+7&tH&w6Ar_MqsLTd>5l6dj?rr!CF-kORQ@ zC_ZzDc6lJBM$qRBy4CoI$?+9D1ExrUS37M=1RNtKUCAH@H`d}Wy}I68XGlq>W~_YA z7nV`Z?PFOZImb@>zgphcKM2Px55W~-0?LNG=D1sThg=|#Isp&x*vD~aAY0}K)ir)n z$c(gt7EgO%(M}@hyT8B712uU+-_<$pI43H@Z9H7oc-O{D{ych_(5Lt7T(y7dW8yFR z$-4VDn-b@(35u4#u_bLf<}%RNlw9yV8q-M6HJ;-&$YkUI>pMYt!x4yp>oi1jyx#&> zOK~))7$hxu>T}C$`e*zlLZ{|TV{NR^)UV|d98Cto4)#>zoGSzt5?~ilYFqe0fpGg~ z2~)UOVNYn0^+IA=D}o7`eXE7*p6z(F>79g9DjH zm!t;9F2*@phTMqQ#=na%$G`rD-%_hezz30+mVjlz+=u7E1L$0&>;Sh$2P(0r)UYF| zBxNEO@^-Y3R+R?1e572NlUJ$4JljXoKnfzT%*xKD?q1UU>oA-H)P{foL<|Mp$xsp+ zPlZTt_PHry57g{VcK`C83o@U&fZ1&@RB_qMr;U}EayW+=v8zPPlis9aj2XR9#Y-!( zknnH2;4JDOdQfk>cxXJjif?zSEh^y-mBo@9zTuQn@h-?GutTs05UePm%CYkZ+&Y<& zOT})z%yyE@NNw3se^QmBxLi;Fxxwvqfi_XnTROhV_LYNT(7HY!S53Ry~T^etg{*C=JG7kZk{Cl8W8 z+5*W$fz68n74_}Mtk9W1@nOP%45PM~D#wi0dT_sIr0HX3-fr2w7yZu2_XR6!~r=$PG?WIHD&}#n(LhuT7O}l>eRgbnY77SEub!dTB{5<-9zk$ zZof3ox@K9~F0XrIZQb&{zUI8MU6O)IDuX_NcMwrI2K&wA1r2j8rJLi1e0G2)<7-S( zyIz3CKDb5O9`)k7qOHALR-#-N#`wBC^P1-D<;>=Sg!hUuoU)f^{|%+GSeT zGlg5&o=9gjEHrrY9e|6|+hfaCYzaz0Pgok!+48N7hWNZU36Yf#y?>An5qWsh7n>Oa z4I9%lW1?`rpi*XNt*)ildXOPOOk*0gCFjeu&nd8bab;w(TIa8G#{e~|SA$+c$Z`i~ z?F70S?xRvy4e<1rQvGQgz}bwpn~- zf?fT>bI}r}9VZ>HKBC@%g)~C{N1&w=sKo%e0f>Xho-Jw_`-GxfzqC=0)w~szY<~ta-DCEr2k@hw9Bz_L?Il^5np$Hj+h$VmpT)>3cA~R-AYp9&b zy;5U(%HbF1v-FqRu5Z4(o`Mk$ZFKWTl-5QP^FCh6R#1YQr9&)bPdhl5SL_Lk*`mo` z%*UnNXpWjqg(t77%58AWrBoPQApHt7(cRC^ldhJv$F z>M5g3wj}jkk+W8o=x>b`3Adv>p|wS(AU(&(DCRKyc!-@6sS@q7o!R+oepY~?45ckw7KB%qDwl-rKA!SRs z*@S%a-&{VNH&(C8{{cx;V+Tuvu%i?K<;(YqlGJ>$bhgL$nbq-7jyd^YNEyH{XCRNP zNNOHI z2EIdh`ys5q=e$M8^32!FA;*5=AMTp*)NoJM~ z3amdPA2qG0meBi#(-&EMs4TauRMi$pp%M)=2m?!-xAO~!mvfuYdDRX-23)T`+`>H8 zXEY@V5XG+_yW=GuF;!PGRH?V6A4*3LE4PkNC|)w=NnTRZRVv*vx?9dfQTv`7dV z^~W@S)N&Iqun#jw2w)~IF}Pp+pxFJHZ1lU$;;fGykm2(zH-(hk^xRJfFYQnrhKRlo zB;~FlEgimN(bgSoQ?sfXayAR!epb2Y zI-o7xZEOXF4B)DLRU1|vrm$5*VIhAZEuSA5vWQk?f(dLHw{N(OGMuyfBuB`XVBLq` zLyYkYi8HqzaV|-Cf6B0HthjdVK9!V}CXJ^n2p0a?9oo_~);fq`+CW$)6{PV>k_Y{B zcR`CJ!H;dlm$N*sDPwn8m$G<;Pv0j8yx4^K(0N&(&HZ+5x)DTfuLAcY64z`o??peh%k!9uz6Rg)vW zA1Ih9kl)>|H!zP7>b>&Zef>CHhwLTqLRQKiru&2KcG$Ow2AF_BGX*qZef=UIqETI= z1)y@E$VQB3^jh5cY5VOQZ9tH#0coHC@A`TMb2bi51V$D&i@wiTrzrJNNoMVFlL%UX z2aXdtKy!kEWcz)+`vp(y*AN{m!|ps~ZI!X$m4 z@e=dZRf?ZP9B`x9&`N?Dx|z&;VU2~2O3|niseW`boPp^O#7zmKsT-^Kof!szfCM)8 zGz8Kcz$b$buO`jJkLW#JKMc?B2IxQIfAO~e??K3a=zmHLlQA#sGtd#{g_Yzz7$<}I zr1=C3AmY6F?AN^y^wTQhTn0Lui4R_I?3tP@oJJ)g&1e`yAN+4aXXnir2eaD0hcDrO ztk8-__Y7$s9=j%NQa&RF+e}?q9!hjs{$q|-5Qg##|`eISDDPb%H8x3Q%y|5IlwzLg*1>41r z`e|L}1k*ZQ>uG(}fDl7EMlWeeBeAbG&^ex1TBIL>ovmRF(Wt@VBnY&3Kx4uLdt~ow~-oJP=h(}V>;|mpE{BNTHi8!K#|s0gYWv&HUz_G-M!PaS{LVq^q&U` zb0YLrkh(~xp{+9w11SYvOW8(?@+kzcH3Ke0wowp3WK$ckG$&zlT@|D`_er*LlGc^n z-n}!}`hA1zrT{UM-8y#QR&e1^u!-_~nA=7CK;n*4SP7Se%T~#EOtE;!fvm$VivYBP zJO$88vY0pW>)#%XUAAid${vcj&Y}3>{oZgqq%e;B<=FWht2a`xAv6hAF#ETr_HO2et$NaQ0YQny~QKjrP>O+_Chv1VLB zgA#}qDg)3V0|_{-V$ARK>uV#>Y6ds|^F!DRhs?xy8o(8)8KQB700WT?vcu$m;D^jK zNu|x}>QBl(t8;z(HdkMZZK`FKd}BJnrUi4iArU~wl6| zJIToh<2ggtIc@nmI}Ank=cFAxdhK~Y+H?PADwrq%4?2)3Nywoy=FtLLH)v1LR=rS6&V9#?BPRJ0C%r{sOx6uu;WND?GF?O zRiY50@f7)N$6K{tuyMCi%IeI#%O|p<_Gp~kyu?!~O&52X%t+1?1%s>}Oh(^cLYp~L z1`S972vWMeF7AkjSAm5{(CQdN5rfRMbNl6Q66g4J^JUw?ezPYz@lXp5h;3EKdJZ2} za)#i0Cp^ux*HBGRR@tM!X>4mU zfsyPDKK9NEu%&(b9@ik961E`h_hHEpz%B6M+|STdeyN3XvUzpGxMzyoAQLjMB{_3^ zQ|Aflk?#uwN}ePU(GU1xeM^+K3Uy_4;oNA93~y&JoE0B{`7KYTXEqrXNgVAvm>)CT zDIBN2cdw@W`Wt>?wb+4!DAhC?KUt%~ac;jGb%SCr-t?ew<)%HG;%gYwjQaEAAFH*O*|OKS%!U{Q#NJ;R z!MprFF4h2U*iGLJ!CN&99~TN-yt+NdWWNh;6VBZ5BonLLcSLDxfMv>UjCt)awc+gH zRXhcq?<*n{M9gy1u9|=96Fj_WOtrFFdpQN720!@gjSK4NZ#jM+wFVq4{Wy|CHcs| zv?PC6q#;DRPD7W`*zdXedUiC)Av7;Ba4%1vH4Uo2PXkvc47G3srouQ30`$D?N4LXH zlVGQ*{7kqDi1n5NMUYF8QF+)B;8f_H+s7P41AJf>J-&nV0Jy&!&t8u#hr^{{e8-Em z-~||U(5A%4@T^uO)b}co#vO!_#0G)A6GIsG7J=?Lal%y)Aw<~Q^Qh|7w*M1X0}kjj zVqhj&*u%!c-@3Lt?S^H%%fUcFTtc|@(C?WU0C*WV+qN7jdU4fniZA-C@6)1oqt$1# zoUV9(2beF+Km>0o5N>E6rqiT&8JlX0lfAzAj+#{DC&QXl^` zq$VrJa~St>+mBTtPgUNAfU85h)p=#0GR3=vdw?YF^|1rwEW^yL$^v9LQk6D+Vvb)) z7aClfBN?>9di5(J&hB%b9$a<$LV}#XRsQb@CSnm;n!22sZNYq)LbA6I9;myMDqtXW zA*J+`m%5_r}{!`HNjTcD|3a1}R7!qH_eLf-qvG=|RA^g1|JBRtJ)` zG3JtNHFx9SUccbE#5#SiUR>c$J{lI_yEVk@S%i_W1JA-^C${tBHdcCa zMC1JAdlPMHQ%uearLA2tzE%7vF!pQ2L0E9#kVxT{XVn`X<>#{Y8+QG%`7e<*)Ez+P zCjXJlA8Y(h=6e5#%oBfQIy32i$aEM@{A9ZQ9=|hP2`$~(Ps1#hz3&;d-o+qy`yYY2 zo8LUgUE4%D{!94D*PEH%<{p~rZS=U3KX9;lo#I2+!~WL}uWT3T>nLNeE;n5 z_`iSeUsJF7?|uLGzWDlaUG z>Wq9}HJiW)wKzMl9lKd|p#%A&POG29{bAm)yOQq-Q3?8}b}YJwo&!hS z`~@UIXZe`66U22AQY+J69P2!LH-3yQ@B5sy^N*0y8zYGPH#?hDxpipv?QASwyx9EdONCQ)GTO1%m$mxX%_?QDmO;-)w@ehDv?mrC zXO0wY-l-66*5T^9Whj3P~LyRu%893m1%VLXijVEq;H> zb?d1U&*i%=Zz-A8+QAM#_@dK2x_RZ6#q@}mp>t2*?&h;jBscZYr?y0^R6CrR+p#oj z4RPBF&nkY)q(dRwCBIt6+gX0`5=dwZ2}w>_wQlf$##McOyJarR<$29>Z+ycU)VO>g z`;^zn>@FI);z|f31Xu*^sJnkzChWj*T^ufUGu;4Ze>>V`ggRLv#0CXi^Th zT3vM1PWJL;p7%*IpNwZsUgnIkdq(!L`X=ehP)%b+X0s0=9e}dWUg?I644wXlgFsmT z&ZXG_@j=v0R>3qIj__GLBj6(dIv1Cs+fi}nJ&Zc)ZO`b9`+>de;l$V*a!*tD?Rjxw#O!(9;OF4A58k=;$>6lV zo@PRcP~Cl}2OhXqTj|WOwx(SjrV-CzvmpO>D! zXZarG9ZMY5#G9%gGqrR$F}F+luy#%fB86+J&fKP?=r8j=9w887fRZ{dU+gk(Fr^@JF?2$`C>-$@#-Tt+MCy{ zj#i4+-p!xTTiKz#SXE@v#{*?j`pXL^!!L`)T{%4ZMb`{EFe)L*b58=9fZ~!NGn*o{ z^aF1ny|Pc6U32cZ+l4KRjhgE9`4y`&Z#~VEQR~$fAN4U`Zr=1cTi;AP3%?O}*3WBE zpSjX0^O_pRAk()}H&(d5d_tP_s7Xj|qU!{zGi8322B;hm#++*ZhI`|uEHiY-bj7i! z`70*o5=x@cg%_dx+Gm2p5_@4bM!nnSXb=)qaEZb1woOCeU3I3R`4_L-uTPfOymkwa z;4$C0-AR+v!)q}8HKz&AkYQwT_%7wu0qr8@#_OV~D`{!_)jUlfjbv&+7dpVgDeHxF z+Iy=mdv-sxDLi=5-T155d1__&jo-eOS(RkvP`@R8N$M$^t2^opE&YxzDUA#3I&?8w zYCH3SWn*&Uv7NVt40*TA#n~og;+zrZUeX9lqVGV1+U3%QulL`l%4`bK+W#hZf7nPl zKx$bjn2XhGTdpmBzYM8N`-W?GFybq-7k_fYZNl;NnvQi1SDr7M-|nZb z{zKS4ei758di}zy@KYWiNC_t7S`{90l7n)iV~kJyaqpawT$z{#3V~+m$nq_gyCLI# zrKoTT@161SWHG1`8s{Glf&O?G~~=_%>B9Fjz1N^^32W6t4ci%p4? z?GDtmHplG>ie5Lnp4*0=;oTh0PUKBrHu_jiJwG7%*&(Z4SM!P6WQHGkAgHz*VXO>^ zGQZ`rCg+@Oq>=sQLdoU&uRC&L=h_aPieBud5sCj}C*j`OI@g@IsyJF!$lDW<_WIG+ zwgH;=L2I$+&$8=O-69-49&F|jwH28A)~DrE)Y%fXEO)7;rOvL-GC`)|vO>s8ZK;W= zqQ!jgt@zEz%$JkItDiH>{MQ~|k<=-A;L)HywZ`Vc@Yh<$T{-5rcI*qI%r^Bn+1T)h zlZ07z?&`4X$*UqHY9(WZ-xNQkYc!d*7mVZ6_olG&?gZgGno}L6f_rb2DHja~`rO=` zKM#$1N9v1lWsi>-9XWF>AX{E&mQPqF&dE_KzTP7BuH)RDin2TtLanEG!|Jp8DqWX* zE=}>Q80;UuH1KlHe6>Xp9w`*20jQJiH-p1s0AYG)fE;}IMSkE%r&FR2onomt@oHl} z>4bJmP4%$>+pBKj+On~x+a$j_EYYw!qB2yJb|}zIV4hit5nCWxSH+mwJd`RM%9D+g zILt#(T2|d2a{Jupnv08cWdt9--o?Kx826z*^R~~5!tv`-c}vCnj=&H43Zow zwXlj%I%k;Hv3JN|m7~%3eO%L*4w%ehR(O)ec+i z&*DxmU5JC;C7CI+$3+Zhc1V2-5wvQ|?Z+NIu7V|ZnvE8>@)h+K&ty%4!Adrisv_OZ2YL}s4~H~bGOOM z*<&f821PsHj2j?NKk29A)vt<3K4MDZs*D40(I>B|k66vYfkL|>GR4>AYi3YLN1TR2 zn@Vp~4_;)aytlDKqZ)|v3D6qAN<`@Rp6fhmsZGrX8%w5qvKwTT0YpE{G3h(vjZU9F7SXKJ`R?rX zS4+B^Q)|~;F%T@-%bR4XmybK^yDitBwLyEe&Zh9aFJImfaY&2nEA!c4&ag1*+JLCM z+sMP4Ac;@0VMc1F*Qy{}L}ki9RA(hlN!M>16=*xk7(shLbGKnW23w=85 z6u$Co%ClLq%tZ)!=oVGhjFf4oLzx1?k~f|+$oylJGT9}^VXLZ=gH%}C6Thy|Te$M| z>ar97BT99IBcj(7>xx zw7Y6Wo-Z8aZKWDM?R>lOHF{VuIKsw9?$zlKIX`AB**VK|JQY1zntfJsQ@GshXg8WT2n)4H!0J7C{&U9<&XJ0 zesLCqn|Q0rgp-ULES2)tUhW2~O=#Dr{dte9ZKB1r z5cDYo@{GM*vvydBV1XL>3RWUrq!l@lu^M+=P}p}tqr75=e1T1N0s>&XGOLcpGsRtNwrF zQ3Lbk8HCB@;l_9aMy$5P*sgrr>bQ2w#0+`24(;^|biF!7%$|x_+QLoQOquBxW09rs+Cn1$T`j|jC zPf@pX;bbk&&N`tV&jJ<=a6uH1NmmbKLNWC;ne-z#zxJ0vw?90xU2vu;ipJ#;NAMBp zXsoELv}w{1CQPbiu7P{^=*RN`{rXwnF(2S*^?_q%6 zh4y2G(Y19?Kd-LsZIK5YeoSU)YoaS|qj1_Y|?T-wF4TnrkVhLKL z+AXb|mIQVk9&>1wdgop?bE4qh73y;7j^)IL!{UbsJ~^QeY6N@1Jx1y>PHN-viic>L zyQEn{YNw)EXmFJ}>znY1>ow5c6u-X2PZ|-2Ai1c=nU`cOl8FO()>}(-lhKD5%PM|q zl6~f*{R>}Q8^0$CWJSu`8f+QA4)i&o`R2TcUeL)?{==9*xP4LmqKX`IJUAJd9vI!p zte@bsp%%q(f-J>yIVeRb#%Hc(Bd+)d7ELqtFW&I0OT>hO6sQ+y_fXr3vxXt-T zI#vDRG!WP;4yFnM?mzyx6EE9E6aCo=2@CE8LU+~G_Ae;?V^<6drh~bi`u^H>dH2g8 zEws0AL0c1{&Ym?y7NHWbpUqUsyu6PHz?PkBwIVapkQCUSCGV>2mIaK273$_udK|gf zTftpjJy;#@zCxs&=0JE3rnt@5TUN?}R-zP-eZd$cHXR)Fn<>BV`@L7&m@%p-!PeP&nW|O(mi!MU5MwClFhBHj|_U-Du2r zDdwPby{;u30~f5PEStQi>~3)Wa@oR_kMLDBLi)@EBXoEFJ)qdPee9cY=tC{qDhTE@ zboWuF$d8CmQmL|vkg>piIwRYqt=i<~gzyuqW64q+eC;4p4Zo|0$-~4xxeIP@3aibV zSMI13-eI$Y)1vu~fu}jWa)tAzJL%27iND){0oSG&MD1IoJL=joqBuBec|uk=bmkGn zb?fKQ&9<77&bgT0v(`(l zOR#q`YpN6C^DU)HVGZ9p=n{z-Dfcfx2WjY7+P!YOd{}B>E$6!40kQ8g`_A-uP5ojc zm(1+igJml~k0DU?`>!}A|{YcL9OXj>YsV7^kC{Xk+oB3K>VxKSW9ij7x zL!;BIKa>#`uDgiXrNHjCREK|4bydf`Zx^GlTZ128a_VtG2S3>{^lQzCn*}7W3&VPj z7$#R^rW|al=S0HcI&5jT4J4IQQM11miA{C!FrMI~)<9OA} zXIHR$uiU>dsf{P9jrB2k#;qY!A#I(_oK*tE!30t^p&8INO@Y4)_huq`=06WDXcx2u zHEDxF!Sq7^nJ7W`KYkt4e-ZlsTi&-{<5>)AM8NxGj?{H*p7E9l)U~;rTj1BOZQ_EqTZYHm!6IPwOVa`x4%${z@Wm&G}znn0Mr2M+x2AOe&t zplYQ{`YrA%m|ZET+X|Z`vc3^@zP;RMew5pA^O?JlRpI-4@z&Cx_2@+~@!~WA|2_8U z*p=Sfr4>z%0?n&fRE*iKgietiM8dUIF7Z7Tu9sYkj)vY)s3jmH!-u!q2h);Or9NG` zFke#(=Vyp{7%Z^}kj#nt_7kwkaUZaFv!NW*&u$F-qOjkB+kf1fDR8eXWdKq=;YuXW z>>#RDxYuGkhj{65YjB(Zb+^OE6yUG0pHD27mNPS()RGp#7>v@b*oh^%N3Mbo? zF7sQ}aeN!GEs>t6WkW&KR1>QaR?e*tCcM3F8&?8paSPYeC)Ri=nzQ)8_KSBsXdTWq zW}yPe$kEa9M5|Kg(W;+!7-bzCvQV!lP1X5yBju6l6zE)v;rm*nuAojZ?N5Y*<1dha zCuUThdN-vu{{Dvt?_>12ms~_J5~&=*62+g2KL`wg+gA@R-k6eMaiq_>7XL?YfVWOT zV1@0y9=Ah7;STA059jJ@kl^|w<5$g3DGgh_suHfCqP)NQS8kv?3T%W;G}h4;?sJr@ zk`D?*e8!O3rPFo*Jzvs}YF_6x$SZ=J)dSL6t8)z56t_v7h>H)o4MkBEq2x1RiH7nnd zO{NO~xAW889oRE6fJ%K8CX2kj076h=KaUnXQIOX4ru{6t8B5|0)`m8+O?*nwY;e#Z zAo@+{FBNu|Sxn!b`~`AUPEV!~pWfKUkm}EgZf~zFTuc@n5~tI5+pM@LPMf|p^?m)N z&&hYZlv29|rfJ|Y^mIYYm`#-J_Y z(2vow%DeaM3&SNvEygL>AvzXNnnSeAWuWlRE|$p zNa%gaGx~!d+I9buAY+_vaAELUC@S~zM~Z|y9|CQ1|LeyAAtx}v8XfKT==ppxP1N&U=)E_BMd+wa6kH1xcID1mCnm%V8txoZh-t|fq1%2 z)`8ukd#&l3b9OQ*7Bs<73MiF zf7Qb$*TF9;DwdaAG)gR)UkFZtsZR+Q{n3plym2+9-4{rV;%h4-wO4Y*TQlvncVuG6 zmIP9>=@H7gt0|*A!#x#qZn3QOG-3L@?^ww75*K3-pA-P**$YaQ>D^Zr` zX&$bTeO3*SFvjzz{H8^w(`$#pVck}fUEQ)F1~0^l`>f#^ruj1EXz{;HLrSGV#f3G* zRHD%tS%oqo(GVq%v-($KD<~6}97NLd&XMMm6;E`RjGyZa$wwy7+oUG4bxjlrQY)8s zaX7^h`Lj`_dDSsy#k5$g#%!$D-6s!P)X2bf8==GUDNHD?^!pYa9rbHs;GQ>&FH0Ny1h~$l+wNU@ryH6aJB{K%E zqJlI6>anE-P!sB^0HC|B_&!zN5bcL@*xrQbpBI`9*U<)`1d^r$$+~bdJWQym;-~`d zW+l&4*?X=XZ37j(#;Wz9-EJumzeNDbku&W3ST-|4GnL@MX;dtNs$CHnNEnP^X)nPw z``=->Z^6t4c2Wz?h+@;s4Kp1(tIzf@Hc^`?Mp}dW4A3fOU36cUA}?~^TlD&j)x_yj z{f;GSYs$t~iD?gSSS>i#!9_~AxCNK~#?|xs*@b9uxW0Q95`vCmA7|*4`OJQPs1&Tu zM|travGtIs-_w-l?>v>ifPtS#kgDeRRV=3_7~k|%%}-P{?=!5$z=sPmIMLOXtyPjT z_8h+QyeqZF5WSJ)FY8|STJw|CIaCBA-PjSR;^kO0!rM-qfQg&$=VeHQ(W^%mi%K{Eycq$m*;0jTIhl z%y5nws!el|{GgS~ell4l-im}>sFCJNyzSZpLVx>w_wOCKm7)!wzZP>JBn$kY=51F% zueI*`R_9AqYk2nHAxxpcaX;pWr43^fs$JIC#htWX2YJDGK=S7$Rt`j`PdsCzFip>+ zS_dwX(?4%r&!xO7eDsLq>)RKwye1p`=dH|{zRUSeTAeB(PqX=WK+&IWZREDqRjGf0 zKu4W!OWG$~FxAWktAC&+K>uINp#KiT>FIa;Z^F#!S)885DF#k4aEgIb44h)%6a%Li sIK{vz22L?>ih=(d3}AkJ?SBUNeSsJ2@|+I6