爬取一个网页的内容,当然相对路径以及样式都复制不过来,只能复制这个文件的内容。
先将所有异常使用Throws抛出的话
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.net.URL;
public class Src {
public static void main(String[] args) throws Exception { //抛出异常
URL url = new URL("http://www.whxy.edu.cn/mdxw/xxxw.htm"); //访问的url地址,这个地址应该是一个html页面
InputStream is = url.openStream();
BufferedInputStream bis = new BufferedInputStream(is); //用包装流来处理,加速
BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream( new File("d:\\a.html"))); //用包装流
byte [] b = new byte[1024]; //用byte []数组来一次读取1024个字节,这样速度就会快一点
int len = bis.read(b);
while( len != -1) { //-1时,表示读到了最后
bos.write(b);
len = bis.read(b);
}
//最后关流,和创建的方式反着关
bos.close();
bis.close();
}
}
//下面是将所有的异常try-catch
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.MalformedURLException;
import java.net.URL;
public class Src {
public static void main(String[] args) {
URL url = null;
try {
url = new URL("http://www.whxy.edu.cn/mdxw/xxxw.htm");
} catch (MalformedURLException e) {
e.printStackTrace();
}
InputStream is = null;
try {
is = url.openStream();
} catch (IOException e) {
e.printStackTrace();
}
BufferedInputStream bis = new BufferedInputStream(is);
BufferedOutputStream bos = null;
try {
bos = new BufferedOutputStream(new FileOutputStream( new File("d:\\a.html")));
} catch (FileNotFoundException e) {
e.printStackTrace();
}
byte [] b = new byte[1024];
int len = -1;
try {
len = bis.read(b);
while( len != -1) {
bos.write(b);
len = bis.read(b);
}
} catch (IOException e) {
e.printStackTrace();
}finally {
try {
if( bos != null) {
bos.close();
}
} catch (IOException e) {
e.printStackTrace();
}
try {
if( bis != null) {
bis.close();
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
后期还会加上正则表达式那么就可以爬取我们想要的信息了。