Kafka实战——在SpringBoot中的应用
官网文档链接
1. pom引用
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency>
2. YML配置
server:
port: 8081
spring:
kafka:
bootstrap-servers: 10.1.48.214:9092,10.1.48.214:9093,10.1.48.214:9094
producer: # 生产者
retries: 3 # 设置大于 0 的值,则客户端会将发送失败的记录重新发送
batch-size: 16384
buffer-memory: 33554432
acks: 1
# 指定消息key和消息体的编解码方式
key-serializer: org.apache.kafka.common.serialization.StringSerializer
value-serializer: org.apache.kafka.common.serialization.StringSerializer
consumer:
group-id: default-group
enable-auto-commit: false
auto-offset-reset: earliest
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
max-poll-records: 500
listener:
# 当每一条记录被消费者监听器(ListenerConsumer)处理之后提交
# RECORD
# 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理之后提交
# BATCH
# 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理之后,距离上次提交时间大于TIME时提交
# TIME
# 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理之后,被处理record数量大于等于COUNT时提交
# COUNT
# TIME | COUNT 有一个条件满足时提交
# COUNT_TIME
# 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理之后, 手动调用Acknowledgment.acknowledge()后提交
# MANUAL
# 手动调用Acknowledgment.acknowledge()后立即提交,一般使用这种
# MANUAL_IMMEDIATE
ack-mode: MANUAL_IMMEDIATE
redis:
host: 10.1.48.214
3. 生产者
package com.example.kafkaspringboot.controller;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
@RestController
public class KafkaController {
//发送消息到指定topic
private final static String TOPIC_NAME = "my-replicated-topic";
@Autowired
private KafkaTemplate<String, String> kafkaTemplate;
@RequestMapping("/send")
public void send() {
kafkaTemplate.send(TOPIC_NAME, 0 , "key", "this is a msg");
}
}
4. 消费者
package com.example.kafkaspringboot.consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.annotation.PartitionOffset;
import org.springframework.kafka.annotation.TopicPartition;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;
@Component
public class MyConsumer {
//1. 设置消费者 消费指定topic
@KafkaListener(topics = "my-replicated-topic",groupId = "MyGroup1")
public void listenGroup(ConsumerRecord<String, String> record, Acknowledgment ack) {
String value = record.value();
System.out.println(value);
System.out.println(record);
//手动提交offset
ack.acknowledge();
}
//设置消费组、多topic、指定分区、指定偏移量消费及设置消费者个数
// @KafkaListener(groupId = "testGroup", topicPartitions = {
// @TopicPartition(topic = "topic1", partitions = {"0", "1"}),
// @TopicPartition(topic = "topic2", partitions = "0",
// partitionOffsets = @PartitionOffset(partition = "1", initialOffset = "100"))
// },concurrency = "3")//concurrency就是同组下的消费者个数,就是并发消费数,建议小于等于分区总数
// public void listenGroupPro(ConsumerRecord<String, String> record, Acknowledgment ack) {
// String value = record.value();
// System.out.println(value);
// System.out.println(record);
// //手动提交offset
// ack.acknowledge();
// }
/*@KafkaListener(topics = "my-replicated-topic",groupId = "MyGroup2")
public void listensGroup(ConsumerRecords<String, String> records, Acknowledgment ack) {
String value = record.value();
System.out.println(value);
System.out.println(record);
//手动提交offset
ack.acknowledge();
}*/
}
5. 学习来源
哔哩哔哩:https://www.bilibili.com/video/BV1Xy4y1G7zA
标签:调用,SpringBoot,value,kafka,record,springframework,import,org,Kafka From: https://www.cnblogs.com/hcxss/p/16603247.html