URI与URL
URI是通用资源标识符,由三部分组成
1. 访问资源命名机制
2. 存放资源的主机名
3. 资源本身的名称
而URL是URI的子集,称为统一资源定位符,由三部分组成
1. 协议
2. 主机IP地址
3. 主机资源的具体地址,如目录与文件名
爬虫最主要的处理对象就是URL。
抓取网页的工具
Java语言是为网络而生的语言,Java将网络资源看成一种文件,使对网络资源的访问呢与获取像对文件操作一样简单。java.net.*包下有关于Java网络操作的工具类,但是由于使用原生API代码量较大,这里选择使用Apache的HttpClient包来抓取内容,HttpClient顾名思义就是客户端,用其访问网络资源本质就是模拟IE客户端行为。
需要注意的是,Apache的HttpClient在4.x版本变成HttpComponent下面的子工具包,与原来的3.x在API上有较大区别。
抓取网页示例
下面是我使用HttpClient3.x和4.x两个版本API写的一段访问指定站点,获取其状态码,并简单抓取和保存网页内容的代码,供比较参考。
- HttpClient3.x
package me.zzx.example;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.commons.httpclient.Header;
import org.apache.commons.httpclient.HttpClient;
import org.apache.commons.httpclient.HttpStatus;
import org.apache.commons.httpclient.NameValuePair;
import org.apache.commons.httpclient.methods.PostMethod;
public class RetrivePage {
private static HttpClient httpClient = new HttpClient();
//设置代理
@SuppressWarnings("unused")
private static void setProxy(String host, int port) {
httpClient.getHostConfiguration().setProxy(host, port);
}
public static boolean downloadPage(String path) throws IOException {
InputStream input = null;
OutputStream output = null;
//得到post方法
PostMethod post = new PostMethod(path);
//设置post方法的参数
NameValuePair[] postData = new NameValuePair[2];
postData[0] = new NameValuePair("username", "zzx");
postData[1] = new NameValuePair("password", "*******");
post.addParameters(postData);
//执行并返回状态码
int statusCode = httpClient.executeMethod(post);
System.out.println(statusCode);
//针对状态码进行处理
if(statusCode == HttpStatus.SC_OK) {
input = post.getResponseBodyAsStream();
//得到文件名
String filename = path.substring(path.lastIndexOf('/') + 1) + "test1.html";
//获得文件输出流
output = new FileOutputStream(filename);
//输出文件
int tempByte = -1;
while(((tempByte = input.read()) >= 0)) {
output.write(tempByte);
}
//关闭输入输出流
input.close();
input = null;
output.close();
output = null;
return true;
//3XX状态码时
} else if(statusCode == HttpStatus.SC_MOVED_PERMANENTLY || statusCode == HttpStatus.SC_MOVED_TEMPORARILY
|| statusCode == HttpStatus.SC_SEE_OTHER || statusCode == HttpStatus.SC_TEMPORARY_REDIRECT) {
//读取新的URL地址
Header header = post.getResponseHeader("location");
if(header != null) {
String newUrl = header.getValue();
if(newUrl == null || newUrl.equals("")) {
newUrl="/";
//使用post转向,发送请求进一步处理
downloadPage(newUrl);
}
}
}
return false;
}
public static void main(String[] args) {
//抓取阿里巴巴网址,输出
try {
RetrivePage.downloadPage("https://www.alibaba.com/");
} catch (IOException e) {
e.printStackTrace();
}
}
}
- HttpClient4.x
package me.zzx.example;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.List;
import org.apache.http.Header;
import org.apache.http.HttpHost;
import org.apache.http.HttpResponse;
import org.apache.http.HttpStatus;
import org.apache.http.NameValuePair;
import org.apache.http.client.HttpClient;
import org.apache.http.client.entity.UrlEncodedFormEntity;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.conn.params.ConnRoutePNames;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.message.BasicNameValuePair;
public class RetrivePage {
private static HttpClient httpClient = new DefaultHttpClient();
//设置代理
@SuppressWarnings("unused")
private static void setProxy(String host, int port) {
HttpHost proxy = new HttpHost(host, port);
httpClient.getParams().setParameter(ConnRoutePNames.DEFAULT_PROXY, proxy);
}
public static boolean downloadPage(String path) throws IOException {
InputStream input = null;
OutputStream output = null;
//得到post方法
HttpPost post = new HttpPost(path);
//设置post方法的参数
List<NameValuePair> params = new ArrayList<NameValuePair>();
params.add(new BasicNameValuePair("username", "zzx"));
params.add(new BasicNameValuePair("password", "******"));
post.setEntity(new UrlEncodedFormEntity(params, "utf-8"));
//执行并返回状态码
HttpResponse response = httpClient.execute(post);
int statusCode = response.getStatusLine().getStatusCode();
//针对状态码进行处理
if(statusCode == HttpStatus.SC_OK) {
input = response.getEntity().getContent();
//得到文件名
String filename = path.substring(path.lastIndexOf('/') + 1) + "test2.html";
//获得文件输出流
output = new FileOutputStream(filename);
//输出文件
int tempByte = -1;
while(((tempByte = input.read()) >= 0)) {
output.write(tempByte);
}
//关闭输入输出流
input.close();
input = null;
output.close();
output = null;
return true;
} else if(statusCode == HttpStatus.SC_MOVED_PERMANENTLY || statusCode == HttpStatus.SC_MOVED_TEMPORARILY
|| statusCode == HttpStatus.SC_SEE_OTHER || statusCode == HttpStatus.SC_TEMPORARY_REDIRECT) {
//读取新的URL地址
Header header = post.getLastHeader("location");
if(header != null) {
String newUrl = header.getValue();
if(newUrl == null || newUrl.equals("")) {
newUrl="/";
//使用post转向,发送请求进一步处理
downloadPage(newUrl);
}
}
}
return false;
}
public static void main(String[] args) {
//抓取阿里巴巴网址,输出
try {
RetrivePage.downloadPage("https://www.alibaba.com/");
} catch (IOException e) {
e.printStackTrace();
}
}
}
HTTP状态码
注意到,在代码中对状态码分成两种处理。第一个条件语句处理的是状态码为2XX的情况,此时成功访问网页,则开始抓取网页内容;第二个条件语句处理的是状态码为3XX的情况,此时网页可能发生了重定向,网络资源仍有可能访问到,故进行了进一步处理。另外还有常见的404之类的4XX和5XX状态码,是典型的访问失败状态码,故最后return false。