先贴上代码
package com.susheng.MoneyMaker;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.URL;
import java.net.URLEncoder;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.DefaultHttpClient;
public class FetchWebData
{
private static final String DirectoryPath = com.susheng.MoneyMaker.common.FilePath.DownLoadDataPath;
// 将URL中的HTML代码下载至本地硬盘
public static void downloadPageContent(String urlStr) throws Exception
{
HttpClient httpclient = new DefaultHttpClient();
HttpGet httpget = new HttpGet(urlStr);
HttpResponse response = httpclient.execute(httpget);
System.out.println(response.getStatusLine().getStatusCode());
HttpEntity entity = response.getEntity();
// 如果目录不存在,则创建文件目录
File file = new File(DirectoryPath);
if (!file.exists())
file.mkdirs();// 创建文件
FileOutputStream os = new FileOutputStream(DirectoryPath
+ URLEncoder.encode(urlStr, "utf-8"));
if (entity != null)
{
InputStream instream = entity.getContent();
@SuppressWarnings("unused")
int l;
byte[] tmp = new byte[2048];
while ((l = instream.read(tmp)) != -1)
{
os.write(tmp, 0, tmp.length);
}
}
os.close();
}
public String getPageContent(String strUrl)
{
try
{
// 根据网址strUrl创建URL对象
URL pageUrl = new URL(strUrl);
// System.out.println(pageUrl.getFile());
// 下载网页
BufferedReader reader = new BufferedReader(new InputStreamReader(
pageUrl.openStream()));
String line;
// 读取网页内容
StringBuffer pageBuffer = new StringBuffer();
while ((line = reader.readLine()) != null)
{
pageBuffer.append(line);
}
return pageBuffer.toString();
} catch (Exception e)
{
System.out.println("自己知道");
}
return null;
}
public static void main(String[] args) throws Exception
{
downloadPageContent("http://www.yanzhenlou.com");
}
}
在使用HttpClient下载网页内容的时候遇到这个链接拒绝。怎么办。怎么找到异常并且处理呢。求指教,感谢!