版权声明:Copyright ©2018-2019 凉白开不加冰 版权所有 https://blog.csdn.net/qq_21082615/article/details/91373545
介绍:之前的文章里面讲过了怎么部署kafka,这边文章就介绍怎么使用spring boot 使用kafka
第一步:加入相关架包
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency>
第二步:application.yml配置文件
//集群brokers以逗号隔开
kafka:
brokers: 192.168.0.117:9092
groupid: test-group
第三步:生产消息配置类
package com.example.config;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
import java.util.LinkedHashMap;
import java.util.Map;
/**
* @Author: 凉白开不加冰
* @Version: 0.0.1V
* @Date: 2019/1/9
* @Description: kafka生产配置
**/
@Configuration
@EnableKafka
public class KafkaProducerConfig {
@Value("${kafka.brokers}")
private String brokers;
public Map producerConfigs() {
Map props = new LinkedHashMap<>();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
props.put(ProducerConfig.RETRIES_CONFIG, 0);
props.put(ProducerConfig.BATCH_SIZE_CONFIG, 4096);
props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
props.put(ProducerConfig.BATCH_SIZE_CONFIG, 40960);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
return props;
}
public ProducerFactory producerFactory() {
return new DefaultKafkaProducerFactory<>(producerConfigs());
}
@Bean
public KafkaTemplate kafkaTemplate() {
return new KafkaTemplate<>(producerFactory());
}
}
第四步:消费消息配置类
package com.example.config;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import java.util.LinkedHashMap;
import java.util.Map;
/**
* @Author: 凉白开不加冰
* @Version: 0.0.1V
* @Date: 2019/1/9
* @Description: kafka消费者配置
**/
@Configuration
@EnableKafka
public class KafkaConsumerConfig {
@Value("${kafka.brokers}")
private String brokers;
@Value("${kafka.groupid}")
private String groupid;
public Map<String,Object> consumerConfigs(){
Map<String,Object> props = new LinkedHashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,brokers);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,false);
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"100");
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG,"15000");
//注意此处反序列化,别搞错了,跟生产消息不一样
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
props.put(ConsumerConfig.GROUP_ID_CONFIG,groupid);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"latest");
return props;
}
public ConsumerFactory<String,String> consumerFactory(){
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
@Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String,String>> kafkaListenerContainerFactory(){
ConcurrentKafkaListenerContainerFactory<String,String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(3);
factory.getContainerProperties().setPollTimeout(3000);
return factory;
}
}
第五步:消费消息
package com.example.consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;
import java.util.Optional;
/**
* @Author: 凉白开不加冰
* @Version: 0.0.1V
* @Date: 2019/1/9
* @Description: 消费消息
**/
@Component
public class KafkaConsumer {
/**
* @Author: hualao
* @Date: 2019/1/9 11:25
* @Description: topics中填写在服务器上创建的topic即可
**/
@KafkaListener(topics = {"test"})
public void consumer(ConsumerRecord<?,?> record){
Optional<?> kafkaMessage = Optional.ofNullable(record.value());
if(kafkaMessage.isPresent()){
System.out.println(kafkaMessage.get());
}
}
}
第六步:生产消息
package com.example.producer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
/**
* @Author: 凉白开不加冰
* @Version: 0.0.1V
* @Date: 2019/1/9
* @Description: 生产消息
**/
@RestController
public class KafkaProducer {
@Autowired
private KafkaTemplate kafkaTemplate;
@RequestMapping("send")
public void send(){
//发送消息到test topic
kafkaTemplate.send("test", LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"))+"");
}
}
如果生产消息时报以下错误
Expiring 2 record(s) for zlikun_topic-3: 30042 ms has passed since batch creation plus linger time
需要修改kafka配置文件server.properties,在属性文件里加上host.name=服务器ip