还是继续前几篇文章的代码。
当我们需要爬取的图片量级比较大的时候,就需要多线程爬取下载了。这里我们用到forkjoin pool来处理并发。
1、DownloadTask下载任务类
package com.dyw.crawler.util;
import java.io.File;
import java.io.InputStream;
import java.util.List;
import java.util.concurrent.RecursiveAction;
/**
* forkJoin pool 并行处理下载图片
* Created by dyw on 2017/9/7.
*/
public class DownloadTask extends RecursiveAction {
//每个任务总数
private static final int THRESHOLD = 8;
//传入的所有的url的列表
private List<String> urls;
//开始坐标
private int start;
//结束坐标
private int end;
//保存路径
private String path;
/**
* @param urls url集合
* @param start 开始坐标
* @param end 结束坐标
* @param path 保存路径
*/
public DownloadTask(List<String> urls, int start, int end, String path) {
this.urls = urls;
this.start = start;
this.end = end;
this.path = path;
}
@Override
protected void compute() {
if (end - start < THRESHOLD) {
for (int i = start; i < end; i++) {
String url = urls.get(i);
String[] split = url.split("/");
String imgName = split[split.length - 1];
try {
//文件保存
File file = new File(path + "/" + imgName);
InputStream inputStream = CrawlerUtils.downLoadFromUrl(url);
IOUtils.saveFile(inputStream, file);
System.out.println("success:" + url);
} catch (Exception e) {
System.out.println("fail:" + url);
}
}
} else {
// 如果当end与start之间的差大于THRESHOLD时,将大任务分解成两个小任务。
int middle = (start + end) / 2;
DownloadTask left = new DownloadTask(urls, start, middle, path);
DownloadTask right = new DownloadTask(urls, middle, end, path);
// 并行执行两个“小任务”
left.fork();
right.fork();
}
}
}
2、main主方法
package com.dyw.crawler.project;
import com.dyw.crawler.util.CrawlerUtils;
import com.dyw.crawler.util.DownloadTask;
import com.dyw.crawler.util.IOUtils;
import com.dyw.crawler.util.RegularUtils;
import java.io.File;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.TimeUnit;
/**
* 多线程下载图片
* Created by dyw on 2017/9/7.
*/
public class Project3 {
public static void main(String[] args) {
ForkJoinPool forkJoinPool = new ForkJoinPool();
String path = "C:\\Users\\dyw\\Desktop\\crawler\\photo";
String path1 = "C:\\Users\\dyw\\Desktop\\crawler\\photo1";
String url = "http://www.tuigirlba.cc/page/show/";
List<String> list = new ArrayList<>();
try {
for (int i = 330; i < 380; i++) {
String htmlContent = CrawlerUtils.get(url + i);
List<String> imgUrls = RegularUtils.getIMGUrl(htmlContent);
list.addAll(imgUrls);
}
long l = System.currentTimeMillis();
forkJoinPool.execute(new DownloadTask(list, 0, list.size(), path));
forkJoinPool.shutdown();
//等待 forkJoinPool 20秒
forkJoinPool.awaitTermination(20, TimeUnit.SECONDS);
long l1 = System.currentTimeMillis() - l;
long l2 = System.currentTimeMillis();
//for循环下载
list.forEach(imgUrl -> {
String[] split = imgUrl.split("/");
String imgName = split[split.length - 1];
try {
File file1 = new File(path1 + "/" + imgName);
InputStream inputStream = CrawlerUtils.downLoadFromUrl(imgUrl);
IOUtils.saveFile(inputStream, file1);
System.out.println("success:" + imgUrl);
} catch (Exception e) {
System.out.println("fail:" + imgUrl);
}
});
long l3 = System.currentTimeMillis() - l2;
System.out.println("forkjoin处理时间:"+l1);
System.out.println("没有并行处理时间:"+l3);
} catch (Exception e) {
throw new RuntimeException("获取内容失败!", e);
}
}
}
3、运行结果
从下面2个图片中可以看到,比同步的快很多!
具体代码我上传在github上,需要完整代码的可以自己下载 https://github.com/dingyinwu81/crawler
如果有什么代码修改的建议,请给我留言呗! ☺☺☺