引入依赖
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.source>8</maven.compiler.source>
<maven.compiler.target>8</maven.compiler.target>
<spring-boot.version>2.3.12.RELEASE</spring-boot.version>
<fastjson.version>2.0.51</fastjson.version>
<!--我服务器安装的kafka版本是3.4.0 所以最好和安装版本对应-->
<kafka.version>3.4.0</kafka.version>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>${spring-boot.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>${kafka.version}</version>
</dependency>
<dependency>
<groupId>com.alibaba.fastjson2</groupId>
<artifactId>fastjson2</artifactId>
<version>${fastjson.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>${spring-boot.version}</version>
</dependency>
</dependencies>
</dependencyManagement>
<dependencies>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
</dependency>
<dependency>
<groupId>com.alibaba.fastjson2</groupId>
<artifactId>fastjson2</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<!--kafka整合SpringBoot-->
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.11</version>
<scope>test</scope>
</dependency>
</dependencies>
yml配置
spring:
kafka:
# 服务地址
bootstrap-servers: 192.168.75.61:9092,192.168.75.62:9092,192.168.75.63:9092
# 生产者相关配置
producer:
# 应答级别:多少个分区副本备份完成时向生产者发送ack确认(可选0、1、all/-1)
acks: 1
# 重试次数
retries: 5
# 默认批处理大小,ProducerBatch大小
batch-size: 16384
# 生产端缓冲区大小
buffer-memory: 33554432
# 发送消息的key - value 序列化类
key-serializer: org.apache.kafka.common.serialization.StringSerializer
value-serializer: org.apache.kafka.common.serialization.StringSerializer
# 消息发送 最大等待时长
properties:
linger:
ms: 0
# 消费端配置
consumer:
# 是否开启自动提交
enable-auto-commit: true
# 提交offset延时(接收到消息后多久提交offset)
auto-commit-interval: 1000
# 当kafka中没有初始offset或offset超出范围时将自动重置offset
# earliest:重置为分区中最小的offset;
# latest:重置为分区中最新的offset(消费分区中新产生的数据);
# none:只要有一个分区不存在已提交的offset,就抛出异常;
auto-offset-reset: latest
# 接收消息的key - value 反序列化类
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
properties:
# 默认的消费组ID
group:
id: defaultConsumerGroup
# 消费会话超时时间(超过这个时间consumer没有发送心跳,就会触发rebalance操作)
session:
timeout:
ms: 120000
# 消费请求超时时间
request:
timeout:
ms: 180000
消息生产者
package com.hs.kfk.boot;
import com.alibaba.fastjson2.JSON;
import com.hs.kfk.serializer.User;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RestController;
/**
* @Description: kafka整合SpringBoot,消息生产者
* @Author 胡尚
* @Date: 2024/8/8 17:03
*/
@RestController()
public class BootProducer {
@Autowired
private KafkaTemplate<String, String> kafkaTemplate;
@GetMapping("/send")
public void sendMessage(){
User user = new User(1L, "hushang", 24);
String message = JSON.toJSON(user).toString();
kafkaTemplate.send("disTopic", "key", message);
}
}
消息消费者
package com.hs.kfk.boot;
import com.alibaba.fastjson2.JSON;
import com.hs.kfk.serializer.User;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;
/**
* @Description: kafka整合SpringBoot 消息消费者
* @Author 胡尚
* @Date: 2024/8/8 17:09
*/
@Component
public class BootConsumer {
@KafkaListener(topics = {"disTopic"})
public void consumerMessage(ConsumerRecord<String, String> record){
int partition = record.partition();
long offset = record.offset();
String topic = record.topic();
String key = record.key();
String message = record.value();
System.out.println("topic:" + topic + "\tpartition:" + partition + "\toffset: " + offset + "\tkey: " + key + "\tmessage: " + message);
User user = JSON.parseObject(message, User.class);
System.out.println(user);
}
}
输出结果
topic:disTopic partition:1 offset: 8 key: key message: {"age":24,"uId":1,"username":"hushang"}
User{uId=1, username='hushang', age=24}
topic:disTopic partition:1 offset: 9 key: key message: {"age":24,"uId":1,"username":"hushang"}
User{uId=1, username='hushang', age=24}
topic:disTopic partition:1 offset: 10 key: key message: {"age":24,"uId":1,"username":"hushang"}
User{uId=1, username='hushang', age=24}
标签:key,SpringBoot,import,springframework,kafka,整合,offset,org,Kafka
From: https://blog.csdn.net/qq_44027353/article/details/141034198