kafka发送消息与消费消息
package com.yl.kafka.producer;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
/**
* 生产者
*
* @author Y-wee
*/
public class Producer {
public static void main(String[] args) throws ExecutionException, InterruptedException {
// kafka 配置
Properties properties = new Properties();
// 配置 kafka-bootstrap.servers
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.116.128:9092");
/*
序列化配置
*/
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
// 生产者对象
KafkaProducer<String, String> producer = new KafkaProducer(properties);
for (int i = 0; i < 5; i++) {
// 发送消息(异步)
// producer.send(new ProducerRecord<>("topicA", "message" + i));
/**
* 发送消息(异步带回调函数)
*/
producer.send(new ProducerRecord<>("topicA", "message" + i), new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e == null) {
System.out.println("topic: " + recordMetadata.topic() + ",分区: " + recordMetadata.partition());
}
}
});
// 发送消息(同步)
// producer.send(new ProducerRecord<>("topicA", "message" + i)).get();
}
// 关闭资源
producer.close();
}
}
启动 kafka,执行./kafka-console-consumer.sh --bootstrap-server 服务器ip:9092 --topic topicA
命令打开消费者控制台;然后执行程序发送消息后可以在控制台看到发送的消息
如果发送失败或阻塞,参考文档:https://blog.csdn.net/cnds123321/article/details/124181849
package com.yl.kafka.consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Properties;
/**
* 消费者
*
* @author Y-wee
*/
public class Consumer {
public static void main(String[] args) {
Properties properties = new Properties();
// 配置 kafka-bootstrap.servers
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.116.128:9092");
/*
反序列化配置(必须)
*/
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
// 配置消费者组
properties.put(ConsumerConfig.GROUP_ID_CONFIG, "groupA");
// 消费者对象
KafkaConsumer<String, String> consumer = new KafkaConsumer(properties);
/*
设置消费的主题,不用配置分区使用默认分区(可以消费多个主题)
*/
// ArrayList<String> topics = new ArrayList<>();
// topics.add("topicA");
// consumer.subscribe(topics);
/*
设置消费的主题及分区
*/
ArrayList<TopicPartition> topicPartitions = new ArrayList<>();
topicPartitions.add(new TopicPartition("topicA", 0));
consumer.assign(topicPartitions);
while (true) {
// 设置消费间隔时间
ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(1));
// 打印消费到的数据
for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
System.out.println(consumerRecord);
}
}
}
}
指定消费位置
package com.yl.kafka.consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.time.Duration;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Properties;
import java.util.Set;
/**
* 消费者
*
* @author Y-wee
*/
public class Consumer {
public static void main(String[] args) {
Properties properties = new Properties();
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.116.128:9092");
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
properties.put(ConsumerConfig.GROUP_ID_CONFIG, "groupA");
KafkaConsumer<String, String> consumer = new KafkaConsumer(properties);
ArrayList<TopicPartition> topicPartitions = new ArrayList<>();
topicPartitions.add(new TopicPartition("topicA", 0));
consumer.assign(topicPartitions);
properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 1000);
// 获取分区集合
Set<TopicPartition> assignment=new HashSet<>();
/*
等待分区分配完毕
*/
while (assignment.size() == 0) {
consumer.poll(Duration.ofSeconds(1));
assignment = consumer.assignment();
}
/*
遍历所有分区,并指定 offset 从 1700 的位置开始消费
*/
for (TopicPartition tp: assignment) {
consumer.seek(tp, 1700);
}
while (true) {
ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(1));
for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
System.out.println(consumerRecord);
}
}
}
}
指定时间消费
package com.yl.kafka.consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndTimestamp;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.time.Duration;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
/**
* 消费者
*
* @author Y-wee
*/
public class Consumer {
public static void main(String[] args) {
Properties properties = new Properties();
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.116.128:9092");
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
properties.put(ConsumerConfig.GROUP_ID_CONFIG, "groupA");
KafkaConsumer<String, String> consumer = new KafkaConsumer(properties);
ArrayList<TopicPartition> topicPartitions = new ArrayList<>();
topicPartitions.add(new TopicPartition("topicA", 0));
consumer.assign(topicPartitions);
properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 1000);
Set<TopicPartition> assignment=new HashSet<>();
while (assignment.size() == 0) {
consumer.poll(Duration.ofSeconds(1));
assignment = consumer.assignment();
}
// 存储分区及时间戳
HashMap<TopicPartition, Long> timestampToSearch = new HashMap<>();
for (TopicPartition topicPartition : assignment) {
// 指定消费每个分区对应一天前的数据
timestampToSearch.put(topicPartition, System.currentTimeMillis() - 24 * 3600 * 1000);
}
// 存储分区及对应的 offset
Map<TopicPartition, OffsetAndTimestamp> offsets = consumer.offsetsForTimes(timestampToSearch);
/*
遍历每个分区,对每个分区设置消费时间
*/
for (TopicPartition topicPartition : assignment) {
OffsetAndTimestamp offsetAndTimestamp = offsets.get(topicPartition);
if (offsetAndTimestamp != null){
// 指定开始消费的位置
consumer.seek(topicPartition, offsetAndTimestamp.offset());
}
}
while (true) {
ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(1));
for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
System.out.println(consumerRecord);
}
}
}
}
标签:kafka,发送,消息,org,apache,import,consumer,properties
From: https://www.cnblogs.com/Y-wee/p/17403475.html