如何解析Kafka 1.0.0 多消费者示例
短信预约 -IT技能 免费直播动态提醒
如何解析Kafka 1.0.0 多消费者示例,相信很多没有经验的人对此束手无策,为此本文总结了问题出现的原因和解决方法,通过这篇文章希望你能解决这个问题。
package kafka.demo;import java.util.HashMap;import java.util.Map;import org.apache.kafka.clients.producer.KafkaProducer;import org.apache.kafka.clients.producer.ProducerRecord;public class KafkaProduderDemo {public static void main(String[] args) {Map<String,Object> props = new HashMap<>();props.put("acks", "1");//配置默认的分区方式props.put("partitioner.class", "org.apache.kafka.clients.producer.internals.DefaultPartitioner");//配置topic的序列化类props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");//配置value的序列化类props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); props.put("bootstrap.servers", "bigdata01:9092,bigdata02:9092,bigdata03:9092");//topicString topic = "test7";KafkaProducer< String, String> producer = new KafkaProducer< String, String>(props);for(int i = 1 ;i <= 100 ; i++) {String line = i+" this is a test ";ProducerRecord<String,String> record = new ProducerRecord<String,String>(topic,line );producer.send(record);}producer.close();}}
package kafka.demo;import java.util.List;import java.util.concurrent.atomic.AtomicBoolean;import org.apache.kafka.clients.consumer.ConsumerRecord;import org.apache.kafka.clients.consumer.ConsumerRecords;import org.apache.kafka.clients.consumer.KafkaConsumer;import org.apache.kafka.common.errors.WakeupException;public class MutilConsumerThread implements Runnable{private AtomicBoolean closed = new AtomicBoolean(false);KafkaConsumer<String, String> consumer = null;String topic = null;public MutilConsumerThread(KafkaConsumer<String, String> consumer,List<String> topic) {this.consumer=consumer;consumer.subscribe(topic);}public void run() {try{while(!closed.get()) {ConsumerRecords<String, String> records = consumer.poll(1000);for(ConsumerRecord<String, String> record: records) {//一组consumer的时候每个partition对应的线程是固定的System.out.println("Thread-Name:"+Thread.currentThread().getName()+" "+"partition:"+record.partition()+" "+record.value());}}}catch(WakeupException e ) {if(!closed.get()) throw e;}finally {consumer.close();}}public void shutdown() {closed.set(true);consumer.wakeup();}}
package kafka.demo;import java.util.ArrayList;import java.util.Arrays;import java.util.List;import java.util.Properties;import java.util.concurrent.ExecutorService;import java.util.concurrent.Executors;import org.apache.kafka.clients.consumer.KafkaConsumer;public class MutiConsumerTest {public static void main(String[] args) throws InterruptedException {Properties props = new Properties();props.put("bootstrap.servers", "bigdata01:9092,bigdata02:9092,bigdata03:9092");props.put("group.id", "group_test7");//配置topic的序列化类props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");//配置value的序列化类props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");//自动同步offset props.put("enable.auto.commit","true"); //自动同步offset的时间间隔 props.put("auto.commit.intervals.ms", "2000"); //当在zookeeper中发现要消费的topic没有或者topic的offset不合法时自动设置为最小值,可以设的值为 latest, earliest, none,默认为largest props.put("auto.offset.reset", "earliest "); String topic = "test7"; List<MutilConsumerThread> consumers = new ArrayList<>(); ExecutorService es = Executors.newFixedThreadPool(3); for(int i = 0 ;i<=2;i++) { KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props); MutilConsumerThread cThread = new MutilConsumerThread(consumer,Arrays.asList(topic)); consumers.add(cThread); es.submit(cThread); } //Thread.sleep(1000L); Runtime.getRuntime().addShutdownHook(new Thread() {@Overridepublic void run() {for(MutilConsumerThread consumer :consumers ) {consumer.shutdown();}} }); }}
看完上述内容,你们掌握如何解析Kafka 1.0.0 多消费者示例的方法了吗?如果还想学到更多技能或想了解更多相关内容,欢迎关注编程网行业资讯频道,感谢各位的阅读!
免责声明:
① 本站未注明“稿件来源”的信息均来自网络整理。其文字、图片和音视频稿件的所属权归原作者所有。本站收集整理出于非商业性的教育和科研之目的,并不意味着本站赞同其观点或证实其内容的真实性。仅作为临时的测试数据,供内部测试之用。本站并未授权任何人以任何方式主动获取本站任何信息。
② 本站未注明“稿件来源”的临时测试数据将在测试完成后最终做删除处理。有问题或投稿请发送至: 邮箱/279061341@qq.com QQ/279061341