1、Collection.java:负责收集原始数据(消费者保存在本地的数据)到指定文件夹,并进行上传hdfs,上传成功的文件移动到待清理的文件夹
package csdn;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.TimerTask;
import java.util.UUID;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class Collection extends TimerTask { //TimerTask计时器
//run():计时器任务要执行的操作
@Override
public void run() {
try {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd-HH");
String now = sdf.format(new Date());
System.out.println("-------当前时间:"+now+"--------");
//创建原始数据的对象
File srcFile = new File("E:/hadoop/hdpdata/logs/accesslog");
File[] listFiles = srcFile.listFiles(new FilenameFilter() { //加了一个过滤器
//过滤器内容
@Override
public boolean accept(File dir, String name) {
//如果文件以access.log.开头,返回true,即满足if条件的留下,不满足去掉
if(name.startsWith("kafka.log.")){
return true;
}
return false;
}
});
//创建待上传数据的对象
File toUploadDir = new File("E:/hadoop/hdpdata/logs/toUpload");
for (File file : listFiles) {
System.out.println("原始数据有:"+file.getAbsolutePath());
//将过滤好的文件复制到待上传的目录 copyFileToDirectory复制 moveFileToDirectory移动
FileUtils.copyFileToDirectory(file, toUploadDir, true); //true(如果不存在创建)
}
System.out.println("以上文件已经移动到带上传路径了");
//将文件上传到hdfs
File backUpDir = new File("E:/hadoop/hdpdata/logs/backUp/" + now); //上传后的文件移动到backUpDir文件夹
File[] toUploadFiles = toUploadDir.listFiles();
FileSystem fs =FileSystem.get(new URI("hdfs://hdp-1:9000"),new Configuration(),"root");
for (File file : toUploadFiles) {
//本地上传到hdfs UUID.randomUUID生成随机数+字母
fs.copyFromLocalFile(new Path(file.getAbsolutePath()),new Path("/kafka_logs/"+now+UUID.randomUUID()+".log"));
//移动到待删除目录
FileUtils.moveFileToDirectory(file, backUpDir,true);
}
}catch (IOException | InterruptedException | URISyntaxException e) {
// TODO 自动生成的 catch 块
e.printStackTrace();
}
}
}
2、ClearTask.java:清理任务,解析以时间命名的文件,生成时间戳,利用时间戳判断文件夹是否到1小时,满足就删除
package csdn;
import java.io.File;
import java.io.IOException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.TimerTask;
import org.apache.commons.io.FileUtils;
public class ClearTask extends TimerTask {
//执行清理任务
@Override
public void run() {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd-HH");
String now =sdf.format(new Date());
//时间戳
long time = new Date().getTime();
File backup=new File("E:/hadoop/hdpdata/logs/backUp");
File[] listFiles = backup.listFiles();
for (File file : listFiles) {
System.out.println(file.getAbsolutePath());
try {
//解析字符串的文本,生成 Date
long pass = sdf.parse(file.getName()).getTime();
if((time - pass) / (60*60*1000)>1){ //大于1小时删除
System.out.println(file.getName()+"已删除");
FileUtils.deleteDirectory(file);
}else {
System.out.println(file.getName()+"未达到删除时间!");
}
} catch (ParseException | IOException e) {
// TODO 自动生成的 catch 块
e.printStackTrace();
}
}
}
}
3、Consumer.java:timer.schedule安排定时任务,ConsumerMessage()有死循环,放在最后
package csdn;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Collections;
import java.util.Date;
import java.util.Properties;
import java.util.Timer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
public class Consumer {
private static KafkaConsumer<String, String> consumer; //接受数据方法
private static Properties props; //传递的参数
static SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd-HH");
static String now = sdf.format(new Date());
static {
props = new Properties();
//消费者kafka地址 不用和producer地址一样
props.put("bootstrap.servers", "hdp-2:9092");
props.put("zookeeper.connect", "hdp-1:2181,hdp-2:2181,hdp-3:2181");
//key反序列化
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
//组???????
props.put("group.id", "wang");
}
private static void ConsumerMessage() {
HDFSWriter hdfsWriter = new HDFSWriter();
//允许自动提交位移
props.put("enable.auto.commit", true);
consumer = new KafkaConsumer<String, String>(props);
consumer.subscribe(Collections.singleton("xin"));
//使用轮询拉取数据--消费完成之后会根据设置时长来清除消息,被消费过的消息,如果想再次被消费,可以根据偏移量(offset)来获取
try {
//保存数据的文件夹,如果没有创建
String path = "E:/hadoop/hdpdata/logs/accesslog";
File file = new File(path);
file.mkdir();
while (true) {
ConsumerRecords<String, String> records = consumer.poll(100); //获取数据
FileWriter fw = new FileWriter(file + "/kafka.log." + now, true);
for (ConsumerRecord<String, String> r : records) {
System.out.printf("topic = %s, offset = %s, key = %s, value = %s", r.topic(), r.offset(),
r.key(), r.value());
// hdfsWriter.writer(r.toString());
fw.write(r.toString());
fw.flush();
}
fw.close();
}
} catch (IOException e) {
e.printStackTrace();
} finally {
consumer.close();
}
}
public static void main(String[] args) {
Timer timer = new Timer();
//安排指定的任务从指定的延迟后开始进行重复的固定延迟执行 0:即时执行
timer.schedule(new Collection(),0,60*1000);
timer.schedule(new ClearTask(),0,60*1000);
ConsumerMessage();
}
}