project-es接收dao项目发过来的kafka消息,保存到es中,用于以后的搜索
1、项目结构
2、pom.xml
<parent>
<artifactId>spring-cloud-project</artifactId>
<groupId>com.meng</groupId>
<version>1.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>project-es</artifactId>
<properties>
<spring-boot.version>2.3.12.RELEASE</spring-boot.version>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<version>${spring-boot.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-elasticsearch</artifactId>
<version>${spring-boot.version}</version>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>1.18.16</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>2.4</version>
</dependency>
<!--kafka-->
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>${spring-boot.version}</version>
</dependency>
<!--consul-->
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-consul-discovery</artifactId>
</dependency>
<!-- <dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-consul-config</artifactId>
</dependency>
这个consul-config我暂时还不清楚什么用,但如果引入这个jar,application.yml文件名就要改名(bootstrap.yml),否则就找不到对应的配置内容
-->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.2.28</version>
</dependency>
</dependencies>
3、application.yml
spring:
application:
name: project-es
elasticsearch:
rest:
uris: 192.168.233.137:9200
cloud:
consul:
host: 192.168.233.137
port: 8500
discovery:
instance-id: ${spring.application.name}:${server.port} #这个id作为唯一识别的id必填
service-name: consul-es
#开启ip地址注册
prefer-ip-address: true
#实例的请求ip
ip-address: ${spring.cloud.client.ip-address}
heartbeat:
enabled: true #不打开心跳机制,控制台会显示红叉
kafka:
bootstrap-servers: 192.168.233.137:9092
consumer:
group-id: esGroup # 指定默认消费者group id --> 由于在kafka中,同一组中的consumer不会读取到同一个消息,依靠groud.id设置组名
auto-offset-reset: earliest # smallest和largest才有效,如果smallest重新0开始读取,如果是largest从logfile的offset读取。一般情况下我们都是设置smallest
enable-auto-commit: true # enable.auto.commit:true --> 设置自动提交offset
auto-commit-interval: 100 #如果'enable.auto.commit'为true,则消费者偏移自动提交给Kafka的频率(以毫秒为单位),默认值为5000。
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer # 指定消息key和消息体的编解码方式
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer # 指定消息key和消息体的编解码方式
server:
port: 83
4、启动类EsApplication
@SpringBootApplication
public class EsApplication {
public static void main(String[] args) {
SpringApplication.run(EsApplication.class , args);
}
}
5、ElasticsearchRepository
public interface BaiDuResultRepository extends ElasticsearchRepository<BaiDuResult , Long> {
}
6、EsKafkaConsumer
@Configuration
@Slf4j
public class EsKafkaConsumer {
@Autowired
private ElasticsearchRestTemplate restTemplate;
@Autowired
private BaiDuResultRepository repository;
@KafkaListener(topics = {"resultMsgTopic"})
public void listen(ConsumerRecord<String, String> record){
Optional<String> kafkaMessage = Optional.ofNullable(record.value());
if (kafkaMessage.isPresent()) {
String itemStr = kafkaMessage.get();
log.info("---接收到kafka消息-----msg:{}" , itemStr);
if(!StringUtils.isEmpty(itemStr)){
BaiDuResult baiDuResult = JSONObject.parseObject(itemStr , BaiDuResult.class);
//保存到es
repository.save(baiDuResult);
}
}
}
}
以上,project-es搜索项目