Introduction:
这个小demo用于爬取淘宝网的相关链接。
首先从“www.taobao.com"这个url开始,手机页面上的所有url,然后存入toCrawList 。当toCrawList不为空时,拿出一个url,把它存入数据集并且搜寻这个url上的所有链接充入toCrawList. 这是一个BFS过程。
Framework:
Code:
就网络爬虫来讲这个demo没有太多特别之处,只是根据项目需求,最大的爬取url数有个限制,因此效率上会相对的做出一些优化。另外为了防止URL.openStream()时失去响应,初次尝试使用了java中为方法设置超时时间的模块。
主函数:
public static void main(String[] args) {
can_be_stopped = false;
if (args.length != 3) {
System.out
.println("Usage:java SearchCrawler startUrl maxUrl searchString");
}
String[] paramers = new String[3];
paramers[0] = "http://www.taobao.com";
paramers[1] = "10000";
paramers[2] = "java";
int max = Integer.parseInt(paramers[1]);
dapa crawler = new dapa(paramers[0], max, paramers[2]);
Thread search = new Thread(crawler);
System.out.println("Start searching...");
System.out.println("result:");
search.start();
}
第一个参数是爬取得首地址,第二个参数是最大爬取得url数(为了优化,代码中对toCrawList的大小也进行了限制。例如我想爬取8000个URL,那么toCrawList如果大于8000,则停止对toCrawList的扩充)。第三个参数无意义。
在下载页面时,为了防止openStream卡住,我通过java自带的方法来对openStream设置了超时时间,实现时使用代理模式:
private String downloadPage(URL pageUrl) {
try {
// Open connection to URL for reading.
BufferedReader reader = new BufferedReader(new InputStreamReader(
pageUrl.openStream()));
// Read page into buffer.
String line;
StringBuffer pageBuffer = new StringBuffer();
//int count=1;
while ((line = reader.readLine()) != null) {
pageBuffer.append(line);
}
return pageBuffer.toString();
} catch (Exception e) {
}
return null;
}
private String DownloadPage(URL pageUrl){
//String page=null;
final URL thePage=pageUrl;
final ExecutorService exec = Executors.newFixedThreadPool(1);
Callable<String> call = new Callable<String>() {
public String call() throws Exception {
return downloadPage(thePage);
}
};
try {
Future<String> future = exec.submit(call);
// set db connection timeout to 10 seconds
String obj = future.get(1000 * 30, TimeUnit.MILLISECONDS);
return obj;
} catch (TimeoutException ex) {
System.out.println("====================task time out===============");
ex.printStackTrace();
} catch (Exception e) {
System.out.println("failed to handle.");
e.printStackTrace();
}
// close thread pool
exec.shutdown();
return null;
}
为了防止程序异常结束或超时,每获取一条url我都要把它添加至文件末尾。 当然大量的文件操作也影响了程序的效率。
File file = new File("D:/scope/url3.txt");
BufferedWriter bw = null;
try {
bw = new BufferedWriter(new FileWriter(file, true));
bw.write(url + "\r\n");
bw.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
完整程序:
package snippet;
import java.sql.Connection;
import java.util.*;
import java.net.*;
import java.beans.Statement;
import java.io.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.regex.*;
import javax.sql.DataSource;
// 搜索Web爬行者
public class dapa implements Runnable {
/*
* disallowListCache缓存robot不允许搜索的URL。 Robot协议在Web站点的根目录下设置一个robots.txt文件,
* 规定站点上的哪些页面是限制搜索的。 搜索程序应该在搜索过程中跳过这些区域,下面是robots.txt的一个例子: # robots.txt for
* http://somehost.com/ User-agent: * Disallow: /cgi-bin/ Disallow:
* /registration # /Disallow robots on registration page Disallow: /login
*/
private HashMap<String, ArrayList<String>> disallowListCache = new HashMap<String, ArrayList<String>>();
ArrayList<String> errorList = new ArrayList<String>();// 错误信息
ArrayList<String> result = new ArrayList<String>(); // 搜索到的结果
ArrayList<String> checklist = new ArrayList<String>();
String startUrl;// 开始搜索的起点
int maxUrl;// 最大处理的url数
String searchString;// 要搜索的字符串(英文)
boolean caseSensitive = false;// 是否区分大小写
boolean limitHost = false;// 是否在限制的主机内搜索
int totcount=0;
private static boolean can_be_stopped = false;
public dapa(String startUrl, int maxUrl, String searchString) {
this.startUrl = startUrl;
this.maxUrl = maxUrl;
this.searchString = searchString;
}
public ArrayList<String> getResult() {
return result;
}
public void run() {// 启动搜索线程
try {
crawl(startUrl, maxUrl, searchString, limitHost, caseSensitive);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
System.out.println("over");
}
// 检测URL格式
private URL verifyUrl(String url) {
// 只处理HTTP URLs.
if (!url.toLowerCase().startsWith("http://"))
return null;
URL verifiedUrl = null;
if (url.matches(".*//.*//.*"))
return null;
try {
verifiedUrl = new URL(url);
} catch (Exception e) {
return null;
}
return verifiedUrl;
}
// 检测robot是否允许访问给出的URL.
private boolean isRobotAllowed(URL urlToCheck) {
String host = urlToCheck.getHost().toLowerCase();// 获取给出RUL的主机
// System.out.println("主机="+host);
// 获取主机不允许搜索的URL缓存
ArrayList<String> disallowList = disallowListCache.get(host);
// 如果还没有缓存,下载并缓存。
if (disallowList == null) {
disallowList = new ArrayList<String>();
try {
URL robotsFileUrl = new URL("http://" + host + "/robots.txt");
BufferedReader reader = new BufferedReader(
new InputStreamReader(robotsFileUrl.openStream()));
// 读robot文件,创建不允许访问的路径列表。
String line;
while ((line = reader.readLine()) != null) {
if (line.indexOf("Disallow:") == 0) {// 是否包含"Disallow:"
String disallowPath = line.substring("Disallow:"
.length());// 获取不允许访问路径
// 检查是否有注释。
int commentIndex = disallowPath.indexOf("#");
if (commentIndex != -1) {
disallowPath = disallowPath.substring(0,
commentIndex);// 去掉注释
}
disallowPath = disallowPath.trim();
disallowList.add(disallowPath);
}
}
// 缓存此主机不允许访问的路径。
disallowListCache.put(host, disallowList);
} catch (Exception e) {
return true; // web站点根目录下没有robots.txt文件,返回真
}
}
String file = urlToCheck.getFile();
// System.out.println("文件getFile()="+file);
for (int i = 0; i < disallowList.size(); i++) {
String disallow = disallowList.get(i);
if (file.startsWith(disallow)) {
return false;
}
}
return true;
}
private String downloadPage(URL pageUrl) {
try {
// Open connection to URL for reading.
BufferedReader reader = new BufferedReader(new InputStreamReader(
pageUrl.openStream()));
// Read page into buffer.
String line;
StringBuffer pageBuffer = new StringBuffer();
//int count=1;
while ((line = reader.readLine()) != null) {
pageBuffer.append(line);
}
return pageBuffer.toString();
} catch (Exception e) {
}
return null;
}
private String DownloadPage(URL pageUrl){
//String page=null;
final URL thePage=pageUrl;
final ExecutorService exec = Executors.newFixedThreadPool(1);
Callable<String> call = new Callable<String>() {
public String call() throws Exception {
return downloadPage(thePage);
}
};
try {
Future<String> future = exec.submit(call);
// set db connection timeout to 10 seconds
String obj = future.get(1000 * 30, TimeUnit.MILLISECONDS);
return obj;
} catch (TimeoutException ex) {
System.out.println("====================task time out===============");
ex.printStackTrace();
} catch (Exception e) {
System.out.println("failed to handle.");
e.printStackTrace();
}
// close thread pool
exec.shutdown();
return null;
}
// 从URL中去掉"www"
private String removeWwwFromUrl(String url) {
int index = url.indexOf("://www.");
if (index != -1) {
return url.substring(0, index + 3) + url.substring(index + 7);
}
return (url);
}
// 解析页面并找出链接
private ArrayList<String> retrieveLinks(URL pageUrl, String pageContents,
HashSet crawledList, boolean limitHost,HashSet toCrawlList) throws Exception {
// 用正则表达式编译链接的匹配模式。
Pattern p = Pattern.compile("<a\\s+href\\s*=\\s*\"?(.*?)[\"|>]",
Pattern.CASE_INSENSITIVE);
Matcher m = p.matcher(pageContents);
ArrayList<String> linkList = new ArrayList<String>();
while (m.find()) {
String link = m.group(1).trim();
if (link.length() < 1) {
continue;
}
// 跳过链到本页面内链接。
if (link.charAt(0) == '#') {
continue;
}
if (link.indexOf("mailto:") != -1) {
continue;
}
if (link.toLowerCase().indexOf("javascript") != -1) {
continue;
}
if (link.indexOf("://") == -1) {
continue;
/*
* if (link.charAt(0) == '/') {//处理绝对地 link = "http://" +
* pageUrl.getHost()+":"+pageUrl.getPort()+ link; } else {
* String file = pageUrl.getFile(); if (file.indexOf('/') == -1)
* {//处理相对地址 link = "http://" +
* pageUrl.getHost()+":"+pageUrl.getPort() + "/" + link; } else
* { String path =file.substring(0, file.lastIndexOf('/') + 1);
* link = "http://" + pageUrl.getHost() +":"+pageUrl.getPort()+
* path + link; } }
*/
// link=link+"-----------";
}
int index = link.indexOf('#');
if (index != -1) {
link = link.substring(0, index);
}
link = removeWwwFromUrl(link);
URL verifiedLink = verifyUrl(link);
if (verifiedLink == null) {
continue;
}
/* 如果限定主机,排除那些不合条件的URL */
if (limitHost
&& !pageUrl.getHost().toLowerCase()
.equals(verifiedLink.getHost().toLowerCase())) {
continue;
}
//化简
link = SimpleUrl(link);
// 跳过那些已经处理的链接.
if (crawledList.contains(link)) {
continue;
}
if (toCrawlList.contains(link)) {
continue;
}
linkList.add(link);
}
return (linkList);
}
// 搜索下载Web页面的内容,判断在该页面内有没有指定的搜索字符串
private boolean searchStringMatches(String pageContents,
String searchString, boolean caseSensitive) {
return true;
/*
* String searchContents = pageContents; if (!caseSensitive) {//如果不区分大小写
* searchContents = pageContents.toLowerCase(); }
*
*
* Pattern p = Pattern.compile("[\\s]+"); String[] terms =
* p.split(searchString); for (int i = 0; i < terms.length; i++) { if
* (caseSensitive) { if (searchContents.indexOf(terms[i]) == -1) {
* return false; } } else { if
* (searchContents.indexOf(terms[i].toLowerCase()) == -1) { return
* false; } } }
*
* return true;
*/
}
// 执行实际的搜索操作
public ArrayList<String> crawl(String startUrl, int maxUrls,
String searchString, boolean limithost, boolean caseSensitive)
throws Exception {
System.out.println("searchString=" + searchString);
HashSet<String> crawledList = new HashSet<String>();
LinkedHashSet<String> toCrawlList = new LinkedHashSet<String>();
if (maxUrls < 1) {
errorList.add("Invalid Max URLs value.");
System.out.println("Invalid Max URLs value.");
}
if (searchString.length() < 1) {
errorList.add("Missing Search String.");
System.out.println("Missing search String");
}
if (errorList.size() > 0) {
System.out.println("err!!!");
return errorList;
}
// 从开始URL中移出www
startUrl = removeWwwFromUrl(startUrl);
toCrawlList.add(startUrl);
while (toCrawlList.size() > 0) {
if (maxUrls != -1) {
if (crawledList.size() == maxUrls) {
break;
}
}
// Get URL at bottom of the list.
String url = toCrawlList.iterator().next();
// Remove URL from the to crawl list.
toCrawlList.remove(url);
// Convert string url to URL object.
URL verifiedUrl = verifyUrl(url);
// Skip URL if robots are not allowed to access it.
if (!isRobotAllowed(verifiedUrl)) {
continue;
}
// 增加已处理的URL到crawledList
crawledList.add(url);
if (!can_be_stopped) {
System.out.println("--downloadPage..");
String pageContents = DownloadPage(verifiedUrl);
if (pageContents != null && pageContents.length() > 0) {
// 从页面中获取有效的链接
System.out.println("--retrieveLinks..");
ArrayList<String> links = retrieveLinks(verifiedUrl,
pageContents, crawledList, limitHost,toCrawlList);
// 控制toCrawlList最大容量
// if(toCrawlList.size()<100000)
if(!links.isEmpty())
toCrawlList.addAll(links);
// toCrawlList超过最大搜索数即可不再充入url
if (toCrawlList.size() > 10000)
can_be_stopped = true;
}
}
System.out.println("toCrawList:" + toCrawlList.size());
//if (searchStringMatches(pageContents, searchString,
// caseSensitive)) {
result.add(url);
System.out.println("crawledList:" +crawledList.size());
System.out.println(url);
File file = new File("D:/scope/url3.txt");
BufferedWriter bw = null;
try {
bw = new BufferedWriter(new FileWriter(file, true));
bw.write(url + "\r\n");
bw.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
// }
}
return result;
}
String SimpleUrl(String url) throws Exception {
URL uri =new URL(url);
String result ="http://"+ uri.getHost()+uri.getPath();
return result;
}
// 主函数
public static void main(String[] args) {
can_be_stopped = false;
if (args.length != 3) {
System.out
.println("Usage:java SearchCrawler startUrl maxUrl searchString");
}
String[] paramers = new String[3];
paramers[0] = "http://www.taobao.com";
paramers[1] = "10000";
paramers[2] = "java";
int max = Integer.parseInt(paramers[1]);
dapa crawler = new dapa(paramers[0], max, paramers[2]);
Thread search = new Thread(crawler);
System.out.println("Start searching...");
System.out.println("result:");
search.start();
}
}
输出:
Usage:java SearchCrawler startUrl maxUrl searchString
Start searching...
result:
searchString=java
--downloadPage..
--retrieveLinks..
toCrawList:53
crawledList:1
http://taobao.com
--downloadPage..
--retrieveLinks..
toCrawList:84
crawledList:2
http://style.taobao.com
--downloadPage..
--retrieveLinks..
toCrawList:92
crawledList:3
http://daogou.taobao.com/market/236/searchAuction.htm
--downloadPage..
--retrieveLinks..
toCrawList:107
crawledList:4
http://miiee.taobao.com/themes/theme_333.htm
--downloadPage..
toCrawList:104
crawledList:5
http://reg.taobao.com/member/new_register.jhtml
--downloadPage..
toCrawList:103
crawledList:6
http://i.taobao.com/my_taobao.htm
--downloadPage..
toCrawList:102
crawledList:7
http://vip.taobao.com/growth_info.htm
..