kafka的Java代码定时上传数据到hdfs

消费者:

package com.zpark.kafkatest.one;

import java.io.*;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Collections;
import java.util.Properties;


import org.apache.hadoop.conf.Configuration;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;

public class ConsumerDemo{
   public static void main(String[] args){
        receive();
  }
  private static void receive(){
   Properties properties = new Properties();
        properties.put("bootstrap.servers", "hdp-3:9092");
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("group.id", "yangk");
        properties.put("enable.auto.commit", true);

        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(properties);
        consumer.subscribe(Collections.singleton("test"));
        //上传信息
        URI uri = null;
        Configuration conf = null;
        String user = "root";
        try {
            uri = new URI("hdfs://hdp-1:9000");
            conf = new Configuration();
            conf = new Configuration();
            //dfs.replication:分布式文件系统副本的数量
            conf.set("dfs.replication", "2");
            //dfs.blocksize:分布式文件系统的块的大小   100M   64+36
            conf.set("dfs.blocksize", "64m");

        } catch (URISyntaxException e) {
            e.printStackTrace();
        }
        try {
            FileOutputStream fos = new FileOutputStream("f:/toupload.txt");
            OutputStreamWriter osw = new OutputStreamWriter(fos);
            try {
                while (true) {
                    ConsumerRecords<String, String> records = consumer.poll(100);
                    for (ConsumerRecord<String, String> record : records) {
                        String msg = "key:" + record.key() + ",value:" + record.value() + ",offset:" + record.offset() + ",topic:" + record.topic() + "\r\n";
                        System.out.printf("key=%s,value=%s,offet=%s,topic=%s", record.key(), record.value(), record.offset(), record.topic());
                        BufferedWriter bw = new BufferedWriter(osw);
                        bw.write(msg);
                        bw.flush();
                    }
                }

            } catch (IOException e) {
                e.printStackTrace();
            }
        } catch (FileNotFoundException e) {
            e.printStackTrace();
        } finally {
            consumer.close();
        }
 }
}

生产者:

package com.zpark.kafkatest.one;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Properties;


public class ProducerDemo {
 public static void main(String[] args) {

         send();
     }
     private static void send(){
         Properties properties = new Properties();
         properties.put("bootstrap.servers","hdp-2:9092");
         properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
         properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
         KafkaProducer<String,String> producer = new KafkaProducer<String,String>(properties);
         ProducerRecord<String,String> records = new ProducerRecord<String,String>("test","大美人");
         producer.send(records);
         producer.close();
     }

}

定时器:

package com.zpark.kafkatest.one;

import java.util.Timer;

public class Time {
    public static void main(String[] args) {
        Timer timer = new Timer();
        timer.schedule(new SimpleTime(),5,30*60*1000L);
    }
}
package com.zpark.kafkatest.one;

import java.io.File;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.TimerTask;

public class SimpleTime extends TimerTask {
    public void run(){
        SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMddhhmmss");
        String formaat = sdf.format(new Date());
        HdfsUtils hdfsUtils = new HdfsUtils();
        hdfsUtils.toHdfs(new File("/f:toupload.txt"));
    }
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值