一、装置 (联合 zookeeper)
因为 kafka 须要依赖于 zookeeper,因而在这里先装置 zookeeper
1. 拉取 zookeeper 镜像
docker pull wurstmeister/zookeeper
2. 启动 zookeeper
docker run -d --name zookeeper -p 2181:2181 -e TZ="Asia/Shanghai" --restart always wurstmeister/zookeeper
咱们能够查看 zookeepr 启动日志
docker logs -f zookeeper
二、装置 kafka
1. 拉取 kafka
docker pull wurstmeister/kafka
2. 启动 kafka
docker run --name kafka \
-p 9092:9092 \
-e KAFKA_BROKER_ID=0 \
-e KAFKA_ZOOKEEPER_CONNECT=< 本人 zookeeper 服务地址 + 端口 > \
-e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT:// 服务地址 +:9092 \
-e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 \
-d wurstmeister/kafka
三、装置 kafka-Map 可视化界面
1. 拉取 kafka-manager 镜像
docker pull sheepkiller/kafka-manager
2. 启动
docker run -d \
-p 8080:8080 \
-v /opt/kafka-map/data:/usr/local/kafka-map/data \
-e DEFAULT_USERNAME=< 初始账户 >\
-e DEFAULT_PASSWORD=< 初始密码 >\
--name kafka-map \
--restart always dushixiang/kafka-map:latest
四、查看运行状态
留神: 我这边应用的是腾讯云的轻量服务器,kafka 启动时回去连贯 zookeeper, 所以大家肯定要记得凋谢 zookeeper 的端口拜访限度;
五、登录 kafka-Map
账户和明码都是刚刚启动时设置的, 咱们能够看到以后集群, 主题 topic,Broker 以及生产组, 工具的具体应用在这就不多做解说了, 能够问下度娘, 有很具体的应用的教程
六、SpringBoot 我的项目集成
1. 新建 boot 我的项目, 并导入相干依赖
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>2.2.2.RELEASE</version>
<relativePath/> <!-- lookup parent from repository -->
</parent>
<dependencies>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>1.18.24</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>2.3.4.RELEASE</version>
</dependency>
</dependencies>
2. 新建目录构造
3. yml 中的配置
4. 我的项目代码:
config:
package com.kafka.config;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import java.util.HashMap;
import java.util.Map;
@Slf4j
@Configuration
@EnableKafka
public class KafkaConfiguration {@Value("${spring.kafka.bootstrap-servers}")
private String bootstrapServers;
@Value("${spring.kafka.consumer.enable-auto-commit}")
private Boolean autoCommit;
@Value("${spring.kafka.consumer.auto-commit-interval}")
private Integer autoCommitInterval;
@Value("${spring.kafka.consumer.group-id}")
private String groupId;
@Value("${spring.kafka.consumer.auto-offset-reset}")
private String autoOffsetReset;
@Value("${spring.kafka.consumer.max-poll-records}")
private Integer maxPollRecords;
@Value("${spring.kafka.producer.batch-size}")
private Integer batchSize;
@Value("${spring.kafka.producer.buffer-memory}")
private Integer bufferMemory;
@Value("${spring.kafka.producer.retries}")
private Integer retries;
// @Value("${spring.kafka.producer.properties.sasl.jaas.config}")
// private String producerJaasConfig;
//
// @Value("${spring.kafka.consumer.properties.sasl.jaas.config}")
// private String consumerJaasConfig;
/**
* 生产者配置信息
*/
@Bean
public Map<String,Object> producerConfigs(){Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.ACKS_CONFIG, "0");
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(ProducerConfig.RETRIES_CONFIG, retries);
props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN");
// props.put("sasl.jaas.config",producerJaasConfig);
return props;
}
/**
* 生产者工厂
*/
@Bean
public ProducerFactory<String,String> producerFactory() {return new DefaultKafkaProducerFactory<>(producerConfigs());
}
/**
* 生产者模板
*/
@Bean
public KafkaTemplate<String, String> kafkaTemplate() {return new KafkaTemplate<>(producerFactory());
}
/**
* 消费者配置信息
*/
@Bean
public Map<String, Object> consumerConfigs() {Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG,false);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 120000);
props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 180000);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN");
// props.put("sasl.jaas.config",consumerJaasConfig);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, autoCommit);
return props;
}
/**
* 消费者批量工厂
*/
@Bean
public KafkaListenerContainerFactory<?> batchFactory() {ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfigs()));
// 设置为批量生产,每个批次数量在 Kafka 配置参数中设置 ConsumerConfig.MAX_POLL_RECORDS_CONFIG
factory.setBatchListener(true);
return factory;
}
/*================================= 新加的 ================================*/
@Bean("adminClient")
public AdminClient adminClient() {KafkaAdmin kafkaAdmin = new KafkaAdmin(consumerConfigs());
return AdminClient.create(kafkaAdmin.getConfig());
}
// @Bean("messageProducer")
// public MessageProducer messageProducer(KafkaTemplate<String, String> kafkaTemplate,
// RedissonClient redissonClient) {// return new MessageProducer(kafkaTemplate, redissonClient);
// }
//
// @Bean("messageTopicGenerator")
// public MessageTopicGenerator messageTopicGenerator(@Qualifier("adminClient") AdminClient adminClient,
// RedissonClient redissonClient,
// StringRedisTemplate stringRedisTemplate,
// ApplicationContext applicationContext,
// Environment environment) {
// return new MessageTopicGenerator(adminClient, redissonClient,
// stringRedisTemplate, applicationContext, environment);
// }
}
kafka
package com.kafka.kafka;
import com.sun.org.glassfish.external.statistics.Statistic;
import lombok.extern.slf4j.Slf4j;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Component;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.ListenableFutureCallback;
import java.util.List;
/**
* @author xiaozhou
* <p></p>
*/
@Slf4j
@Component
public class BaseKafkaProducer {
private final KafkaTemplate<String, String> kafkaTemplate;
public BaseKafkaProducer(KafkaTemplate<String, String> kafkaTemplate) {this.kafkaTemplate = kafkaTemplate;}
/**
* 发送 kafka 信息
*
* @param topic
* @param key
* @param msg 0: 代表指定分区
*/
@Async
public void send(String topic, String key, String msg) {send(topic, key, 0, msg);
}
public void send(String topic, String key, Integer partition, String msg) {ListenableFuture<SendResult<String, String>> future = kafkaTemplate.send(topic, partition, key, msg);
future.addCallback(new ListenableFutureCallback<SendResult<String, String>>() {
@Override
public void onFailure(Throwable e) {//System.out.println("kafka producer fail:" + e.getMessage());
log.debug("kafka producer fail:{}", e.getMessage());
}
@Override
public void onSuccess(SendResult<String, String> sendResult) {//System.out.println("kafka producer success:" + sendResult.toString());
log.debug("kafka producer success:{}", sendResult.toString());
}
});
}
/**
* 批量发送
*
* @param topic
* @param key
* @param msgList
*/
@Async
public void sendBatch(String topic, String key, List<String> msgList) {
msgList.forEach(item -> {send(topic, key, item);
});
}
}
测试 controller
package com.kafka.controller;
import com.kafka.kafka.BaseKafkaProducer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
/**
* @author xiaozhou
* <p></p>
*/
@RestController
@RequestMapping("/kafka")
public class DemoController {
@Autowired
private BaseKafkaProducer baseKafkaProducer;
@GetMapping("/msg/send")
public String send(@RequestParam(value = "id")Integer id) {baseKafkaProducer.send("demo","123",id.toString());
return "success";
}
}
消费者:
package com.kafka.consumer;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.annotation.TopicPartition;
import org.springframework.stereotype.Service;
/**
* @author xiaozhou
* <p></p>
*/
@Slf4j
@Service
public class DemoConsumer {// @KafkaListener(topics = "demo", groupId = "kafka-demo")
@KafkaListener(topicPartitions = {@TopicPartition(topic = "demo",partitions = {"0"})
})
public void listen(ConsumerRecord<String, String> record) {System.out.println("topic:" + record.topic() + ",key:" + record.key() + ",value:" + record.value());
System.out.println("kafka 报文信息:" + record.value());
}
}
5. 启动我的项目
至此,kafka 简易版 demo 搭建并测试通过, 我集体认为, 关键点在于 zookeeper 的装置, 以及后续代码中对 kafka 的配置; 因为这个版本的 demo 没有去开明 kafka 的平安认证, 绝对于简略, 另外一个比较烦人的就是序列化问题, 我刚开始的时候就在序列化问题上卡了很久, 大家能够参考一下我的,, 如有大佬, 勿喷 …….