Kafka pure Java integration code

Time:2020-6-30

This article mainly introduces the spring pure Java configuration integration Kafka code example, the article through the example code introduction is very detailed, has the certain reference study value to everybody’s study or the work, needs the friend may refer to

KafkaConfig.java

package com.niugang.config;

import java.util.HashMap;
import java.util.Map;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.consumer.OffsetCommitCallback;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
import org.springframework.kafka.listener.AbstractMessageListenerContainer;

import com.niugang.controller.SenderConttoller;

/**
 * 
 * @ClassName: KafkaConfig
 * @De scription:kafka Configuration class, based on spring Java pure configuration
 * @author: niugang
 *@ date: 8:04:26 PM, October 20, 2018
 * @Copyright: [email protected] All rights reserved.
 *
 */
@Configuration
@EnableKafka
public class KafkaConfig {

  private Logger logger = LoggerFactory.getLogger(KafkaConfig.class);

  @Bean
  public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory() {
    ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
    //Offset submission method
    // factory.getContainerProperties().setAckMode(AbstractMessageListenerContainer.AckMode.COUNT);
    //Asynchronous commit offset (default is true)
    // factory.getContainerProperties().setSyncCommits(true);
    //Callback functions are often used to log commit errors
    /*factory.getContainerProperties().setCommitCallback(new OffsetCommitCallback() {

      @Override
      public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
        if (exception != null) {
          logger.error("Commit failed for effsets {}", offsets, exception);
        }

      }
    });*/
    factory.setConsumerFactory(consumerFactory());
    return factory;
  }

  /**
   *Consumer factory configuration
   * 
   * @return
   */
  @Bean
  public ConsumerFactory<String, String> consumerFactory() {
    return new DefaultKafkaConsumerFactory<>(consumerProps());
  }

  /**
   *Configuration of producer factory
   * 
   * @return
   */
  @Bean
  public ProducerFactory<String, String> producerFactory() {
    return new DefaultKafkaProducerFactory<>(senderProps());
  }

  /**
   *Kafka send message template
   * 
   * @return
   */
  @Bean
  public KafkaTemplate<String, String> kafkaTemplate() {
    return new KafkaTemplate<String, String>(producerFactory());
  }

  /**
   *Consumer monitoring
   * 
   * @return
   */
  @Bean
  public ConsumerListener listener() {
    return new ConsumerListener();
  }

  /**
   *Consumption allocation method
   * 
   * @return
   */
  private Map<String, Object> consumerProps() {
    Map<String, Object> props = new HashMap<>();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "kafka_group_1");
    /**
     * enable.auto.commit  Default 5 seconds auto commit offset
     */
    props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
    props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "100");
    props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
    /**
     *Kafka is based on the key value pair. The following configuration is used to configure the deserialization of key and value
     */
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    return props;
  }

  /**
   *Producer configuration method
   * 
   *Producers have three required attributes
   * <p>
   * 1. bootstrap.servers  The list of broker addresses. The list should not contain all the broker addresses,
   *The producer will look up the information of other brokers from a given broker. However, it is recommended to provide at least two broker information. Once one of them goes down, the producer can still connect to the cluster.
   * </p>
   * <p>
   * 2. key.serializer  The key and value of the message broker wants to receive are byte arrays. The producer uses the corresponding class to sequence the key object into a byte array.
   * </p>
   * <p>
   * 3. value.serializer  Serialization method
   * </p>
   * 
   * 
   * @return
   */
  private Map<String, Object> senderProps() {
    Map<String, Object> props = new HashMap<>();
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    /**
     *When a temporary recoverable exception is received from the broker, the producer will resend the message to the broker, but not infinitely
     *If the number of retransmissions reaches the limit, the producer will not try again and return an error.
     *Set through the retries property. By default, the producer will wait 100ms after retrying retries.backoff.ms Property
     */
    props.put(ProducerConfig.RETRIES_CONFIG, 0);
    /**
     *Before considering completing the request, the producer asks the leader for the number of acknowledgments received. This controls the persistence of the send record. The following settings are allowed:
     * <ul>
     * <li>
     *< code > acks = 0 < / code > if set to zero, the producer will not wait for any confirmation from the server. The record is immediately added to the socket buffer and is considered sent. In this case, there is no guarantee that the server has received the record, and
     *The < code > retries < / code > configuration will not take effect (because the client will not normally know of any failures). The offset returned for each record is always set to - 1.
     * <li> <code> acks = 1 </code>
     *This means that the leader writes records to its local log, but does not have to wait for full confirmation from all followers to respond. under these circumstances,
     *If the leader fails immediately after confirming the record, but before the follower copies, the record will be lost.
     * <li><code> acks = all </code>
     *This means that the leader will wait for the complete set of synchronized replicas to confirm the record. This ensures that records will not be lost as long as at least one synchronous copy is still alive. This is the strongest guarantee.
     *This is equivalent to the acks = - 1 setting
     */
    props.put(ProducerConfig.ACKS_CONFIG, "1");
    /**
     *When there are multiple messages to be sent to the unified partition, the producer will put them into the unified batch. Kafka improves throughput through the concept of batches, but also increases latency.
     */
    //The following configuration will trigger the network request and send the message when the cache size reaches 16kb
    props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
    //The maximum time each message is in the cache. If it exceeds this time, it will be ignored batch.size The client sends the message immediately
    props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
    props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
    //Serialization of key
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    //Value serialization method
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    return props;
  }
}

ConsumerListener.java

package com.niugang.config;

import org.springframework.kafka.annotation.KafkaListener;

/**
 * 
 * @ClassName: ConsumerListener  
 *@ Description: consumer monitoring 
 * @author: niugang
 *@ date: October 21, 2018 2:05:21 PM  
 * @Copyright: [email protected] All rights reserved. 
 *
 */
public class ConsumerListener {
  /**
   *Topicpattern: supports regular expressions
   * @param foo
   */
  @KafkaListener(id = "foo", topics = "annotated1")
  public void listen1(String foo) {
    System.out.println ("received message is: + foo)";
  }
}

Source code: https://gitee.com/niugangxy/kafka/tree/master/kafka-spring-boot

The above is the whole content of this article, I hope to help you in your study, and I hope you can support developeppaer more.

Recommended Today

Detailed explanation of how to customize redis command with Lua

preface As a very successful database, redis provides a wealth of data types and commands. Using these, we can easily and efficiently complete many cache operations, but there are always some special problems or needs to be solved. At this time, we may need to customize our own redis data structure and commands. Redis command […]