爬虫 source <转>

Spidr 是一个Ruby 的网页爬虫库,可以将整个网站、多个网站、某个链接完全抓取到本地。

http://opensource.csdn.net/p/spidr

网页爬虫汇总,值得收藏

http://apps.hi.baidu.com/share/detail/105705

wiki

http://wiki.blueidea.com/index.php?title=%E7%88%AC%E8%99%AB&diff=6679&oldid=prev

超简单的Web爬虫程序(转贴)&网页爬虫

http://www.lydia-diamond.com/pachong/20100313/vaz8w2z5f1q255972593.html

package test;
/**
* @author Jack.Wang
*
*/
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

// 搜索Web爬行者
public class SearchCrawler implements Runnable {

/*
* disallowListCache缓存robot不允许搜索的URL。 Robot协议在Web站点的根目录下设置一个robots.txt文件,
* 规定站点上的哪些页面是限制搜索的。
* 搜索程序应该在搜索过程中跳过这些区域,下面是robots.txt的一个例子:
* # robots.txt for http://somehost.com/ User-agent:
* Disallow: /cgi-bin/
* Disallow: /registration # Disallow robots on registration page
* Disallow: /login
*/

private HashMap<String, ArrayList<String>> disallowListCache = new HashMap<String, ArrayList<String>>();
ArrayList<String> errorList = new ArrayList<String>();// 错误信息
ArrayList<String> result = new ArrayList<String>(); // 搜索到的结果
String startUrl;// 开始搜索的起点
int maxUrl;// 最大处理的url数
String searchString;// 要搜索的字符串(英文)
boolean caseSensitive = false;// 是否区分大小写
boolean limitHost = false;// 是否在限制的主机内搜索

public SearchCrawler(String startUrl, int maxUrl, String searchString) {
this.startUrl = startUrl;
this.maxUrl = maxUrl;
this.searchString = searchString;
}

public ArrayList<String> getResult() {
return result;
}

public void run() {// 启动搜索线程
crawl(startUrl, maxUrl, searchString, limitHost, caseSensitive);
}

// 检测URL格式
private URL verifyUrl(String url) {
// 只处理HTTP URLs.
if (!url.toLowerCase().startsWith("http://"))
return null;
URL verifiedUrl = null;
try {
verifiedUrl = new URL(url);
} catch (Exception e) {
return null;
}
return verifiedUrl;
}

// 检测robot是否允许访问给出的URL.
private boolean isRobotAllowed(URL urlToCheck) {
String host = urlToCheck.getHost().toLowerCase();// 获取给出RUL的主机
// System.out.println("主机="+host);

// 获取主机不允许搜索的URL缓存
ArrayList<String> disallowList = disallowListCache.get(host);

// 如果还没有缓存,下载并缓存。
if (disallowList == null) {
disallowList = new ArrayList<String>();
try {
URL robotsFileUrl = new URL("http://" + host + "/robots.txt");
BufferedReader reader = new BufferedReader(
new InputStreamReader(robotsFileUrl.openStream()));

// 读robot文件,创建不允许访问的路径列表。
String line;
while ((line = reader.readLine()) != null) {
if (line.indexOf("Disallow:") == 0) {// 是否包含"Disallow:"
String disallowPath = line.substring("Disallow:"
.length());// 获取不允许访问路径

// 检查是否有注释。
int commentIndex = disallowPath.indexOf("#");
if (commentIndex != -1) {
disallowPath = disallowPath.substring(0,
commentIndex);// 去掉注释
}

disallowPath = disallowPath.trim();
disallowList.add(disallowPath);
}
}

// 缓存此主机不允许访问的路径。
disallowListCache.put(host, disallowList);
} catch (Exception e) {
return true; // web站点根目录下没有robots.txt文件,返回真
}
}

String file = urlToCheck.getFile();
// System.out.println("文件getFile()="+file);
for (int i = 0; i < disallowList.size(); i++) {
String disallow = disallowList.get(i);
if (file.startsWith(disallow)) {
return false;
}
}

return true;
}

private String downloadPage(URL pageUrl) {
try {
// Open connection to URL for reading.
BufferedReader reader = new BufferedReader(new InputStreamReader(
pageUrl.openStream()));

// Read page into buffer.
String line;
StringBuffer pageBuffer = new StringBuffer();
while ((line = reader.readLine()) != null) {
pageBuffer.append(line);
}

return pageBuffer.toString();
} catch (Exception e) {
}

return null;
}

// 从URL中去掉"www"
private String removeWwwFromUrl(String url) {
int index = url.indexOf("://www.");
if (index != -1) {
return url.substring(0, index + 3) + url.substring(index + 7);
}

return (url);
}

// 解析页面并找出链接
private ArrayList<String> retrieveLinks(URL pageUrl, String pageContents,
HashSet crawledList, boolean limitHost) {
// 用正则表达式编译链接的匹配模式。
Pattern p = Pattern.compile("<a\\s+href\\s*=\\s*\"?(.*?)[\"|>]",
Pattern.CASE_INSENSITIVE);
Matcher m = p.matcher(pageContents);

ArrayList<String> linkList = new ArrayList<String>();
while (m.find()) {
String link = m.group(1).trim();

if (link.length() < 1) {
continue;
}

// 跳过链到本页面内链接。
if (link.charAt(0) == '#') {
continue;
}

if (link.indexOf("mailto:") != -1) {
continue;
}

if (link.toLowerCase().indexOf("javascript") != -1) {
continue;
}

if (link.indexOf("://") == -1) {
if (link.charAt(0) == '/') {// 处理绝对地
link = "http://" + pageUrl.getHost() + ":"
+ pageUrl.getPort() + link;
} else {
String file = pageUrl.getFile();
if (file.indexOf('/') == -1) {// 处理相对地址
link = "http://" + pageUrl.getHost() + ":"
+ pageUrl.getPort() + "/" + link;
} else {
String path = file.substring(0,
file.lastIndexOf('/') + 1);
link = "http://" + pageUrl.getHost() + ":"
+ pageUrl.getPort() + path + link;
}
}
}

int index = link.indexOf('#');
if (index != -1) {
link = link.substring(0, index);
}

link = removeWwwFromUrl(link);

URL verifiedLink = verifyUrl(link);
if (verifiedLink == null) {
continue;
}

/* 如果限定主机,排除那些不合条件的URL */
if (limitHost
&& !pageUrl.getHost().toLowerCase().equals(
verifiedLink.getHost().toLowerCase())) {
continue;
}

// 跳过那些已经处理的链接.
if (crawledList.contains(link)) {
continue;
}

linkList.add(link);
}

return (linkList);
}

// 搜索下载Web页面的内容,判断在该页面内有没有指定的搜索字符串

private boolean searchStringMatches(String pageContents,
String searchString, boolean caseSensitive) {
String searchContents = pageContents;
if (!caseSensitive) {// 如果不区分大小写
searchContents = pageContents.toLowerCase();
}

Pattern p = Pattern.compile("[\\s]+");
String[] terms = p.split(searchString);
for (int i = 0; i < terms.length; i++) {
if (caseSensitive) {
if (searchContents.indexOf(terms[i]) == -1) {
return false;
}
} else {
if (searchContents.indexOf(terms[i].toLowerCase()) == -1) {
return false;
}
}
}

return true;
}

// 执行实际的搜索操作
public ArrayList<String> crawl(String startUrl, int maxUrls,
String searchString, boolean limithost, boolean caseSensitive) {

HashSet<String> crawledList = new HashSet<String>();
LinkedHashSet<String> toCrawlList = new LinkedHashSet<String>();

if (maxUrls < 1) {
errorList.add("Invalid Max URLs value.");
System.out.println("Invalid Max URLs value.");
}

if (searchString.length() < 1) {
errorList.add("Missing Search String.");
System.out.println("Missing search String");
}

if (errorList.size() > 0) {
System.out.println("err!!!");
return errorList;
}

// 从开始URL中移出www
startUrl = removeWwwFromUrl(startUrl);

toCrawlList.add(startUrl);
while (toCrawlList.size() > 0) {

if (maxUrls != -1) {
if (crawledList.size() == maxUrls) {
break;
}
}

// Get URL at bottom of the list.
String url = toCrawlList.iterator().next();

// Remove URL from the to crawl list.
toCrawlList.remove(url);

// Convert string url to URL object.
URL verifiedUrl = verifyUrl(url);

// Skip URL if robots are not allowed to access it.
if (!isRobotAllowed(verifiedUrl)) {
continue;
}

// 增加已处理的URL到crawledList
crawledList.add(url);
String pageContents = downloadPage(verifiedUrl);

if (pageContents != null && pageContents.length() > 0) {
// 从页面中获取有效的链接
ArrayList<String> links = retrieveLinks(verifiedUrl,
pageContents, crawledList, limitHost);

toCrawlList.addAll(links);

if (searchStringMatches(pageContents, searchString,
caseSensitive)) {
result.add(url);
System.out.println(url);
}
}

}
return result;
}

// 主函数
public static void main(String[] args) {
SearchCrawler crawler = new SearchCrawler("http://www.blogjava.net/Jack2007/", 20,"jack");
Thread search = new Thread(crawler);
System.out.println("Start searching...");
System.out.println("result:");
search.start();
try {
search.join();
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}


网页爬虫

http://blog.csdn.net/sfz_roger/archive/2008/04/10/2277125.aspx


关于HTTP协议的内容,记述如下:

RFC2616中主要描述了HTTP 1.1协议。下面的描述没有实现其各个方面的内容,只提出了一种能够完成所有HTTP网页抓取的最小实现(不能够抓取HTTPS)。

1、首先提交一个URL地址,分为普通的GET网页获取,POST的数据提交两种基本模式。

建立HttpWebReques实例,其中uri是网页的URL的地址:
HttpWebRequest webrequest = (HttpWebRequest) WebRequest.Create(uri);

KeepAlive表示HTTP的连接是长连接:
webrequest.KeepAlive = true;

如果需要,添加引用地址,主要用于防止其他网站的连接引用,比如登陆时,经常需要验证:
if(referer!=null)
{
webrequest.Referer=referer;
}

选择数据的提交方式,有GET、POST两种方式,HEAD不常用:
switch(RequestMethod)
{
case 1:
webrequest.Method="GET";
break;
case 2:
webrequest.Method="POST";
break;
case 3:
webrequest.Method="HEAD";
break;
default:
webrequest.Method="GET";
break;
}

设置User-Agent,经常遇到,在某些网站中,做了限制,User-Agent为空,则不能访问:
webrequest.UserAgent = "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; .NET CLR 1.1.4322; .NET CLR 2.0.50215; fqSpider)";

添加其他的HTTP的Header信息,collHeader是一个NameValue的Collection:
if(collHeader!=null&&collHeader.Count>0)
{
int iCount = collHeader.Count;
string key;
string keyvalue;

for (int i=0; i < iCount; i++)
{
key = collHeader.Keys[i];
keyvalue = collHeader[i];
webrequest.Headers.Add(key, keyvalue);
}
}

设置Content-Type的内容,如果为POST,设置成application/x-www-form-urlencoded,如果是Get设置成text/html:
if(webrequest.Method=="POST")
{
webrequest.ContentType="application/x-www-form-urlencoded";
}
else
{
webrequest.ContentType = "text/html";
}


设置代理服务器地址和端口:
if ((ProxyServer!=null) &&(ProxyServer.Length > 0))
{
webrequest.Proxy = new
WebProxy(ProxyServer,ProxyPort);
}

设置是否允许自动转移:
webrequest.AllowAutoRedirect = true;

设置基本的登陆认证 :
if (NwCred)
{
CredentialCache wrCache =
new CredentialCache();
wrCache.Add(new Uri(uri),"Basic",
new NetworkCredential(UserName,UserPwd));
webrequest.Credentials = wrCache;
}

设置Request的Cookie容器:
webrequest.CookieContainer=Cookies;

设置POST数据:
byte[] bytes = Encoding.ASCII.GetBytes(RequestData);
webrequest.ContentLength=bytes.Length;

Stream oStreamOut = webrequest.GetRequestStream();
oStreamOut.Write(bytes,0,bytes.Length);
oStreamOut.Close();

具体代码如下:

1.

using System.IO;
using System.Net;
using System.Text.RegularExpressions;
namespace datagrid_study
{

public class WebForm3 : System.Web.UI.Page
{
public WebClient web=new WebClient();
private void Page_Load(object sender, System.EventArgs e)
{
// 在此处放置用户代码以初始化页面

Stream str;
str=web.OpenRead(http://news.XXXXXX.com);
StreamReader read=new StreamReader(str,System.Text.Encoding.GetEncoding("GB2312"));
string html=read.ReadToEnd();
string aaa=html.ToString();
string bbb=Regex.Split(aaa,"<td colspan=\"2\">",RegexOptions.IgnoreCase)[1];
string ccc=Regex.Split(bbb,"</td>",RegexOptions.IgnoreCase)[0];
Response.Write(ccc);
}

2.

//根据Url地址得到网页的html源码
private string GetWebContent1(string Url)
{
string strResult="";
try
{
HttpWebRequest request = (HttpWebRequest)WebRequest.Create(Url);
    //声明一个HttpWebRequest请求
request.Timeout = 30000;
//设置连接超时时间
request.Headers.Set("Pragma", "no-cache");
HttpWebResponse response = (HttpWebResponse)request.GetResponse();
Stream streamReceive = response.GetResponseStream();
System.Text.Encoding encoding = System.Text.Encoding.GetEncoding("GB2312");
StreamReader streamReader = new StreamReader(streamReceive, encoding);
strResult = streamReader.ReadToEnd();
}
catch
{
return "";
}
return strResult;
}

例3.

public class WebForm1 : System.Web.UI.Page
{
protected System.Web.UI.WebControls.DataGrid dgData;

private void Page_Load(object sender, System.EventArgs e)
{
// 在此处放置用户代码以初始化页面
dgData.DataSource=getInfo();
dgData.DataBind();

}

//根据Url地址得到网页的html源码
private string GetWebContent1(string Url)
{
string strResult="";
try
{
HttpWebRequest request = (HttpWebRequest)WebRequest.Create(Url);
    //声明一个HttpWebRequest请求
request.Timeout = 30000;
//设置连接超时时间
request.Headers.Set("Pragma", "no-cache");
HttpWebResponse response = (HttpWebResponse)request.GetResponse();
Stream streamReceive = response.GetResponseStream();
System.Text.Encoding encoding = System.Text.Encoding.GetEncoding("GB2312");
StreamReader streamReader = new StreamReader(streamReceive, encoding);
strResult = streamReader.ReadToEnd();
}
catch
{
return "";
}
return strResult;
}


//获取超级链接和文章标题、内容
private DataTable getInfo()
{
//创建datatable
DataTable dt = new DataTable();
dt.Columns.Add("title",typeof(string));
dt.Columns.Add("URL",typeof(string));
dt.Columns.Add("content",typeof(string));
dt.Columns.Add("newsdate",typeof(string));
dt.TableName="newsthief";


string html=GetWebContent1("http://www.xxxxxxxxx");

string strPattern0=@"<td width=84% >";

//计算匹配的个数
int Count=0;
MatchCollection Matches=Regex.Matches(html,strPattern0,RegexOptions.IgnoreCase|RegexOptions.Compiled);
foreach(Match NextMatch in Matches)
{
Count++;

}


string sHtml=html;
string strPattern=@"a[\s]+href=(?<Link>[^\s>]+)[^>]*>(?<Text>[^<]*)</a>";

for(int j=0;j<Count-1;j++)//Count-1
{

string sTemp=Regex.Split(sHtml,"<td width=84% >",RegexOptions.IgnoreCase)[j+1];
string sHref=Regex.Split(sTemp,"</td>",RegexOptions.IgnoreCase)[0];
string sDateTemp=Regex.Split(sTemp,"</td>",RegexOptions.IgnoreCase)[1];
string sDate=Regex.Split(sDateTemp,"<td Width=12% >",RegexOptions.IgnoreCase)[1];
Matches=Regex.Matches(sHref,strPattern,RegexOptions.IgnoreCase|RegexOptions.Compiled);
foreach(Match NextMatch in Matches)
{
string URL="http://www.china-insurance.com/news-center/"+NextMatch.Groups["Link"].Value.ToString().Trim();
string title=NextMatch.Groups["Text"].Value.ToString().Trim();
string htmlContent=GetWebContent1(URL);
string sContentTemp=Regex.Split(htmlContent,"<table width=\"99%\" height=\"100%\" border=\"0\" cellspacing=\"0\" cellpadding=\"0\" align=\"center\">",RegexOptions.IgnoreCase)[1];
string sContent=Regex.Split(sContentTemp,"</table>",RegexOptions.IgnoreCase)[0];
sContentTemp=Regex.Split(sContent,"<font id=zoom>",RegexOptions.IgnoreCase)[2];
sContent=Regex.Split(sContentTemp,"</font>",RegexOptions.IgnoreCase)[0];
//去掉<img ...> @"\<IMG(.[^\<]*)?()\>)"
string sContent1=Regex.Replace(sContent,@"<img[\s\S]*?>","&nbsp",RegexOptions.IgnoreCase|RegexOptions.Compiled);
DataRow dr = dt.NewRow();
dr["title"]=title;
dr["URL"]=URL;
dr["content"]=sContent1;
dr["newsdate"]=sDate;
dt.Rows.Add(dr);
}
}
return dt;


}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值