欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

kafka笔记五 API

程序员文章站 2022-06-21 18:29:08
...

Kafka 的 Producer 发送消息采用的是异步发送的方式。在消息发送的过程中,涉及到了两个线程——main 线程和Sender 线程,以及一个线程共享变量——RecordAccumulator。

main 线程将消息发送给 RecordAccumulator,Sender 线程不断从 RecordAccumulator 中拉取消息发送到 Kafka broker。
kafka笔记五 API

dependencies

<!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka -->
<dependency>
  <groupId>org.apache.kafka</groupId>
  <artifactId>kafka_2.12</artifactId>
  <version>2.1.1</version>
</dependency>

Producer

public class MyProducer {
    public static void main(String[] args) {
        //1 创建kafka生产者的配置信息
        Properties properties = new Properties();
        //2 kafka配置
        //指定kafka集群
        properties.put("bootstrap.servers","192.168.56.20:9092");
        //ack应答机制
        properties.put("acks","all");
        //重试次数
        properties.put("retries","3");
        //key,value的序列化类
        properties.put("key.serializer","org.apache.kafka.common.serialization.StringSerializer");
        properties.put("value.serializer","org.apache.kafka.common.serialization.StringSerializer");
        //3 创建生产者对象
        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
        //4 发送数据
        for (int i = 0; i < 100; i++) {
            producer.send(new ProducerRecord<>("first","atbdqn--"+i));
        }
        //5 关闭资源
        producer.close();
    }
}

Consumer

public class MyConsumer {
    public static void main(String[] args) {
        //1 创建消费者的配置信息
        Properties properties = new Properties();
        //2 给配置信息赋值
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.56.20:9092");
        // 开启自动提交
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
        // 自动提交的延迟
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,true);
        // kv的反序列化类
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");
        // 消费则组
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,"gp1");
		// 3 创建消费者
        KafkaConsumer consumer = new KafkaConsumer<String,String>(properties);
		// 4 消费者消费的topic
        consumer.subscribe(Arrays.asList("first"));
        // 5 获取数据
        while (true) {
            ConsumerRecords<String,String> consumerRecords = consumer.poll(100);
            //解析并打印consumerRecords
            for (ConsumerRecord<String,String> consumerRecord : consumerRecords) {
                System.out.println(consumerRecord.key()+consumerRecord.value());
            }
        }
    }
}

自定义分区的Producer

		
public class MyPartitioner implements Partitioner {  //实现 Partitioner接口 重写方法
    @Override
    public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
        //自定义分区方法
        Integer integer = cluster.partitionCountForTopic(topic);
        return key.toString().hashCode() % integer;
    }

    @Override
    public void close() {

    }

    @Override
    public void configure(Map<String, ?> configs) {

    }
}
public class PartitionProducer {
    public static void main(String[] args) {
        Properties prop = new Properties();
        prop.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.56.20:9092");
        prop.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");
        prop.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");

        //添加分区器
        prop.put(ProducerConfig.PARTITIONER_CLASS_CONFIG,"com.atbdqn.partitioner.MyPartitioner");

        KafkaProducer<String, String> producer = new KafkaProducer<String, String>(prop);

        for (int i = 0; i < 10; i++) {
            producer.send(new ProducerRecord<String, String>("first", "atnjbdqn--" + i), (metadata, exception) -> {          //lanmbda表达式创建匿名对象
                if (exception == null){
                    System.out.println(metadata.partition()+"--"+metadata.offset());
                } else {
                    exception.printStackTrace();
                }
            });
        }
        producer.close();
    }
}
相关标签: Big Data