kafka:
@Configuration
@EnableKafka
public class KafkaConfig {
//********************** 消费者配置 ***********************
@Value("${kafka.consumer.servers}")
private String consumerServers;
@Value("${kafka.consumer.group.id}")
private String groupId;
@Value("${kafka.consumer.enable.auto.commit}")
private boolean enableAutoCommit;
@Value("${kafka.consumer.auto.commit.interval}")
private String autoCommitInterval;
@Value("${kafka.consumer.auto.offset.reset}")
private String autoOffsetReset;
@Value("${kafka.consumer.session.timeout}")
private String sessionTimeout;
@Value("${kafka.consumer.request.timeout}")
private String requestTimeout;
@Value("${kafka.consumer.heartbeat.interval}")
private String heartbeatInterval;
@Value("${kafka.consumer.concurrency}")
private int concurrency;
@Value("${kafka.customer.passtopic}")
private String passtopic;
@Bean
public Map<String, Object> consumerProperties(){
Map<String, Object> configProps = new HashMap<>(16);
configProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, consumerServers);
configProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
configProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
configProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
configProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
configProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
configProps.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
configProps.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, heartbeatInterval);
return configProps;
}
@Bean
public ConsumerFactory<String, String> consumerFactory() {
try {
return new DefaultKafkaConsumerFactory<>(consumerProperties());
} catch (Exception e) {
e.printStackTrace();
return new DefaultKafkaConsumerFactory<>(consumerProperties());
}
}
/**
* Kafka listener container factory concurrent kafka listener container factory.
* @return the concurrent kafka listener container factory
*/
@Bean
public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory() {
//*****借此处读取配置文件 开始*******
// runInitConfigInfo();
//*****借此处读取配置文件 结束*******
ConcurrentKafkaListenerContainerFactory<String, String> factory =
new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(concurrency);
//factory.getContainerProperties().setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL_IMMEDIATE);
System.out.println("kafka configinfo setting end");
return factory;
}
//********************** 生产者配置 ***********************
@Value("${kafka.producer.servers}")
private String produceServers;
@Value("${kafka.producer.retries}")
private int retries;
@Value("${kafka.producer.batch.size}")
private int batchSize;
@Value("${kafka.producer.linger}")
private int linger;
@Value("${kafka.producer.buffer.memory}")
private int bufferMemory;
public Map<String, Object> producerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, produceServers);
props.put(ProducerConfig.RETRIES_CONFIG, retries);
props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
props.put(ProducerConfig.LINGER_MS_CONFIG, linger);
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.ACKS_CONFIG, "1");
return props;
}
@Bean
public ProducerFactory<String, String> producerFactory() {
return new DefaultKafkaProducerFactory<>(producerConfigs());
}
@Bean(value = "kafkaTemplate")
public KafkaTemplate<String, String> kafkaTemplate() {
return new KafkaTemplate<String, String>(producerFactory());
}
}
public class KafkaConsumer {
@Autowired
AddressRepository addressRepository;
/**
* kafkaConsumer:
* @KafkaListener:kafka监听器,topics={“名字”},groupId:propertice中的id
* ConsumerRecord<String,String> record:固定返回形式
*
*/
@KafkaListener(topics = {"wry"},groupId = "kconsumer")
public void listener(ConsumerRecord<String,String> record){
log.info("[kafka消费]");
//获取消息
String message = record.value();
//消息偏移量
long offset = record.offset();
System.out.println("读取的消息:"+message+"\n当前偏移量:"+offset);
// JSON json=JSON.parseObject(message);
String[] split = message.split(",");
Address address=new Address();
address.setId(8l);
address.setName(split[0]);
address.setAddress(split[1]);
addressRepository.save(address);
}
}
@Component
@Slf4j
public class KafkaSendServer {
@Autowired
private KafkaTemplate kafkaTemplate;
/**
* KafkaProducer:
*
* kafkaTemplate.send("topic名字",消息);
* */
public void Kafkasend(String msg){
log.info("【监听到Kafkasend函数】,写入消息"+msg);
kafkaTemplate.send("TopicName",msg);
System.out.println("接受消息"+msg);
}
}
rabbitMq:
config:
@Component
@Slf4j
public class RabbitMQConfig {
@Bean("setQueue")
public Queue setQueue() {
log.info("*******[进入rabbitconfig函数]");
return new Queue("rabbit_mq_test");
}
@Bean("newQueue")
public Queue newQueue() {
return new Queue("rabbit_mq_testNew");
}
@Bean("Excange")
public DirectExchange exchange(){
return new DirectExchange("Excange");
}
@Bean
public Binding connectExchange(Queue setQueue,DirectExchange Excange){
return BindingBuilder.bind(setQueue).to(Excange).with("rabbitmqTest");
}
@Bean
public Binding connected(){
return new Binding("rabbit_mq_testNew",Binding.DestinationType.QUEUE,"amq.direct","rabbitmqTestNew",null);
}
}
producer:
@Component
@Slf4j
public class RabbitSendServer {
@Autowired
RabbitTemplate rabbitTemplate;
public String rabbitSend(){
String msg="rabbitmq";
log.info("--------【今天rabbitSend函数】");
rabbitTemplate.convertAndSend("Excange","rabbitmqTest",msg);
System.out.println(" Rabbit mq ok");
return null;
}
public String newSend(){
String msg="newRmq";
log.info("--------【new】");
rabbitTemplate.convertAndSend("amq.direct","rabbitmqTestNew",msg);
System.out.println(" newRmq mq ok");
return null;
}
}
consumer:
@Component
@Slf4j
public class RabbitConsumer {
@RabbitListener(queues = "rabbit_mq_test")
public void rabbitReceive(Message msg) {
System.out.println("helloWorld模式 received msg : " + new String(msg.getBody()));
}
@RabbitListener(queues = "rabbit_mq_testNew")
public void newRabbitReceive(Message msg) {
System.out.println("new : " + new String(msg.getBody()));
}
}
方法2:
public class Producer {
private final static String QUEUE_NAME="hello";
public static void main(String[] args) throws Exception {
//创建工厂
ConnectionFactory connectionFactory = new ConnectionFactory();
//工厂IP 连接MQ队列
connectionFactory.setHost("127.0.0.1");
//用户名
connectionFactory.setUsername("guest");
//密码
connectionFactory.setPassword("guest");
//创建连接
Connection connection = connectionFactory.newConnection();
//创建信道
Channel channel = connection.createChannel();
/**
* 生成一个队列
* 1. 队列名称
* 2.队列里面的消息是否持久化(磁盘) 默认情况下消息存储在内存中
* false-不持久化(默认)
* true-持久化
* 3.改队列是否只供一个消费者进行消费(是否进行消息共享)
* true-不进行消息共享
* false-进行消息共享(默认)
* 4.是否自动删除 最后一个消费者断开连接以后 该队列是否自动删除 true自动删除 false不自动删除
* 5.其他参数 (延迟消息、死信消息等)
*/
channel.queueDeclare(QUEUE_NAME,false,false,false,null);
String message = "hello world";
/**
*发送一个消息
* 1. 发送到哪个交换机
* 2. 路由key是哪个 本次是队列名称
* 3. 其他参数
* 4. 发送消息的消息体
* */
channel.basicPublish("",QUEUE_NAME,null,message.getBytes());
System.out.println("消息发送完毕");