http://kafka.apache.org/downloads
节点 | IP | 服务 |
---|---|---|
hadoop1 | 192.168.220.30 | kafka |
hadoop2 | 192.168.220.31 | kafka |
hadoop3 | 192.168.220.32 | kafka |
[root@hadoop1 opt]# tar -zxvf kafka_2.11-2.2.1.tgz -C /usr/bigdata/
[root@hadoop1 kafka_2.11-2.2.1]# mkdir logs
[root@hadoop1 kafka_2.11-2.2.1]# cd config/
[root@hadoop1 config]# vim server.properties
#broker的全局唯一编号,不能重复
broker.id=1
#删除topic功能使能
delete.topic.enable=true
#处理网络请求的线程数量
num.network.threads=3
#用来处理磁盘IO的现成数量
num.io.threads=8
#发送套接字的缓冲区大小
socket.send.buffer.bytes=102400
#接收套接字的缓冲区大小
socket.receive.buffer.bytes=102400
#请求套接字的缓冲区大小
socket.request.max.bytes=104857600
#kafka运行日志存放的路径
log.dirs=/usr/bigdata/kafka_2.11-2.2.1/logs
#topic在当前broker上的分区个数
num.partitions=1
#用来恢复和清理data下数据的线程数量
num.recovery.threads.per.data.dir=1
#segment文件保留的最长时间,超时将被删除
log.retention.hours=168
#配置连接Zookeeper集群地址
zookeeper.connect=hadoop1:2181,hadoop2:2181,hadoop3:2181
[root@hadoop1 kafka_2.11-2.2.1]# vim /etc/profile
KAFKA_HOME=/usr/bigdata/kafka_2.11-2.2.1
PATH=$PATH:$KAFKA_HOME/bin
[root@hadoop1 config]# source /etc/profile
[root@hadoop1 config]# scp -r /usr/bigdata/kafka_2.11-2.2.1 root@hadoop2://usr/bigdata/
[root@hadoop1 config]# scp -r /usr/bigdata/kafka_2.11-2.2.1 root@hadoop3://usr/bigdata/
[root@hadoop2 ~]# vim /etc/profile
KAFKA_HOME=/usr/bigdata/kafka_2.11-2.2.1
PATH=$PATH:$KAFKA_HOME/bin
[root@hadoop2 config]# source /etc/profile
[root@hadoop3 ~]# vim /etc/profile
KAFKA_HOME=/usr/bigdata/kafka_2.11-2.2.1
PATH=$PATH:$KAFKA_HOME/bin
[root@hadoop3 config]# source /etc/profile
[root@hadoop2 ~]# cd $KAFKA_HOME
[root@hadoop2 kafka_2.11-2.2.1]# vim config/server.properties
broker.id=2
[root@hadoop3 ~]# cd $KAFKA_HOME
[root@hadoop3 kafka_2.11-2.2.1]# vim config/server.properties
broker.id=3
[root@hadoop1 kafka_2.11-2.2.1]$ bin/kafka-server-start.sh config/server.properties &
[root@hadoop2 kafka_2.11-2.2.1]$ bin/kafka-server-start.sh config/server.properties &
[root@hadoop3 kafka_2.11-2.2.1]$ bin/kafka-server-start.sh config/server.properties &
[root@hadoop1 kafka_2.11-2.2.1]$ bin/kafka-server-stop.sh stop
[root@hadoop2 kafka_2.11-2.2.1]$ bin/kafka-server-stop.sh stop
[root@hadoop3 kafka_2.11-2.2.1]$ bin/kafka-server-stop.sh stop