多线程操作kafka数据消费

线程启动类

package com.zkdj.message;

import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

/**
 *多线程任务类,分配多少个线程去执行任务
 */

import com.zkdj.message.server.Server;

public class Main {
	public static void main(String[] args) {
		//启动服务
		//创建线程池
        ExecutorService executorService = Executors.newFixedThreadPool(10);
        ThreadPool threadPool = new ThreadPool();
        for(int i =0;i<20;i++){
            //为线程池分配任务
            executorService.submit(threadPool);
        }
        //关闭线程池
        executorService.shutdown();
	}
}

class ThreadPool implements Runnable {
	 
    @Override
    public void run() {
    //kafka启动类
    	KafkaServer server = new KafkaServer();
		server.start();
    }

}

kafka消息消费类

package com.zkdj.message.KafkaServer;

import java.util.Collections;
import java.util.Properties;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.log4j.Logger;
import com.zkdj.message.common.ConfigManager;
import com.zkdj.message.common.Constants.Pks;
import net.sf.json.JSONArray;
import net.sf.json.JSONObject;

/**
 * 并发请求
 * 
 * @author sxj
 */
public class KafkaServer {
	protected static final ConfigManager config = ConfigManager.getInstance();
	private Logger log = Logger.getLogger("msg");

	private static int dataSize = 0;

	private static Lock lock = new ReentrantLock();
	 
	/**
	 * 开启服务 1. 一个线程去获取 OSS 文件的元数据, 只获取文件的名称, 把文件名称放到本地集合中 3. 开启多个线程消费文件名称集合 3-1.
	 * 每个线程获取文件内容,并将tmp文件删除,上传到save库中 3-2. 每个线程获取文件内容,删除tmp上传save的同时,发送POST请求到分析
	 */
	@SuppressWarnings("resource")
	public void start() {
		log.info("开启服务...");

		// 每批处理最大文档数
		int maxDocumentNumber = Integer.parseInt(config.get(Pks.MAX_DOCUMENT_NUMBER));

		// ------------------------------------
		try {
			Properties props = new Properties();
			props.put("bootstrap.servers",
					"xgsj-kafka.istarshine.com:9092,xgsj-kafka.istarshine.com:9093,xgsj-kafka.istarshine.com:9094");
			props.put("group.id", "g_zk");
			props.put("enable.auto.commit", "true");
			props.put("auto.offset.reset", "latest");
			props.put("auto.commit.interval.ms", "1000");
			props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
			props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
			// acl Authorizer
			props.put("security.protocol", "SASL_PLAINTEXT");
			props.put("sasl.mechanism", "PLAIN");
			String setProperty = System.setProperty("java.security.auth.login.config",
					"/opt/hw/pull/kafka_client_jaasecs.conf"); // 配置文件路径
			System.out.println("create KafkaConsumer");
			System.out.println("receive data");
			KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
			consumer.subscribe(Collections.singletonList("user_subject_XXX"));
			while (true) {
				JSONArray weiboArticle = new JSONArray();
				ConsumerRecords<String, String> records = consumer.poll(1000);
				for (ConsumerRecord<String, String> record : records) {
					JSONObject json = new JSONObject();
					json = JSONObject.fromObject(record.value());
					weiboArticle.add(json);
				}
				// 处理数据
				//TODO
				lock.lock();
				batchSize += weiboArticle.size();
				lock.unlock();
				log.info("-------------------------------------------------------------------");
			}
		} catch (Exception ex) {
			ex.printStackTrace();
			System.out.println("when calling kafka output error." + ex.getMessage());
		}
		// ------------------------------------
	}


}

 

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值