1.环境准备
(1) 三台主机
192.168.0.66 h1
192.168.0.67 h2
192.168.0.68 h3
(2)
66 上执行 :hostnamectl --static set-hostname h1
67 上执行 :hostnamectl --static set-hostname h2
68 上执行 :hostnamectl --static set-hostname h3
(3) 每个节点上执行
echo '
192.168.0.66 h1
192.168.0.67 h2
192.168.0.68 h3' >> /etc/hosts
(4)关闭防火墙
systemctl stop firewalld.service
systemctl disable firewalld.service
(5)安装 jdk
2、zookeeper 集群搭建
(1)下载: wgethttp://mirror.bit.edu.cn/apache/zookeeper/zookeeper-3.4.10/zookeeper-3.4.10.tar.gz
(2)解压: tar -zxvfzookeeper-3.4.10.tar.gz
(3)复制到 /usr/local cp -R zookeeper-3.4.10 /usr/local
(4)复制配置文件 cp /usr/local/zookeeper-3.4.10/conf/zoo_sample.cfg /usr/local/zookeeper-3.4.10/conf/zoo.cfg
(5)修改配置文件
server.1=h1:2888:3888
server.2=h2:2888:3888
server.3=h3:2888:3888
(6)配置 myid
mkdir -p /tmp/zookeeper
vi /tmp/zookeeper/myid
h1:1
h2:2
h3:3
对应每个节点的 myid 的值
(7)启动每个节点的zk
/usr/local/zookeeper-3.4.10/bin/zkServer.sh start
(8)查看zk 的状态
/usr/local/zookeeper-3.4.10/bin/zkServer.sh status
(9)测试ZK
任意节点
/usr/local/zookeeper-3.4.10/bin/zkCli.sh -server 192.168.0.127:2181
create /c1project c1projecttest
get /c1project
3.kafka 集群搭建
(1) 下载 kafka
wget http://mirror.bit.edu.cn/apache/kafka/0.9.0.0/kafka_2.10-0.9.0.0.tgz
(2) 解压并放置 /usr/local
tar -zxvf kafka_2.10-0.9.0.0.tgz
cp -R kafka_2.10-0.9.0.0 /usr/local
(3) 修改配置文件
vi /usr/local/kafka_2.10-0.9.0.0/config/server.properties
h1: broker.id=1
h2: broker.id=2
h3: broker.id=3
zookeeper.connect=storm1:2181,storm2:2181,storm3:2181
(4)启动 kafka
三个节点: /usr/local/kafka_2.10-0.9.0.0/bin/kafka-server-start.sh -daemon /usr/local/kafka_2.10-0.9.0.0/config/server.properties
jps 后能看到 kafka 的进程。
(5)测试 kafka
a. 创建topic
/usr/local/kafka_2.10-0.9.0.0//bin/kafka-topics.sh --create --zookeeper h1:2181,h2:2181,h3:2181 --replication-factor 3 --partitions 5 --topic test
b.查看topic
/usr/local/kafka_2.10-0.9.0.0/bin/kafka-topics.sh --describe --zookeeper h1:2181,h2:2181,h3:2181 --topic test
c.生产者 消费者
/usr/local/kafka_2.10-0.9.0.0/bin/kafka-console-producer.sh --broker-list h1:9092,h2:9092,h3:9092 --topic test
/usr/local/kafka_2.10-0.9.0.0/bin/kafka-console-consumer.sh --zookeeper h1:2181,h2:2181,h3:2181 --from-beginning --topic test
4.storm 搭建
(1)下载storm
wget http://mirror.bit.edu.cn/apache/storm/apache-storm-0.10.2/apache-storm-0.10.2.tar.gz
(2)解压并放置 /usr/local
cp -R apache-storm-0.10.2 /usr/local
(3)修改配置
vi /usr/local/apache-storm-0.10.2/conf/storm.yaml
storm.zookeeper.servers:
- "h1"
- "h2"
- "h3"
storm.zookeeper.port: 2181
nimbus.host: "h1"
supervisor.slots.ports:
- 6700
- 6701
- 6702
- 6703
storm.local.dir: "/tmp/storm"
agent.channels = c1
agent.sinks = k1
agent.sources.s1.type=exec
agent.sources.s1.command=tail -F /tmp/logs/kafka.log
agent.sources.s1.channels=c1
agent.channels.c1.type=memory
agent.channels.c1.capacity=10000
agent.channels.c1.transactionCapacity=100
#设置Kafka接收器
agent.sinks.k1.type= org.apache.flume.sink.kafka.KafkaSink
#设置Kafka的broker地址和端口号
agent.sinks.k1.brokerList=h1:9092,h2:9092,h3:9092
#设置Kafka的Topic
agent.sinks.k1.topic=kafkatest
#设置序列化方式
agent.sinks.k1.serializer.class=kafka.serializer.StringEncoder
agent.sinks.k1.channel=c1
do echo "kafka_test-"+$i>>/tmp/logs/kafka.log;
done