// 第一种方法
//这种方法是用apache提供的包,简单方便
//但是要用到以下包:commons-codec-1.4.jar
// commons-httpclient-3.1.jar
// commons-logging-1.0.4.jar
public static String createhttpClient(String url, String param) {
HttpClient client = new HttpClient();
String response = null;
String keyword = null;
PostMethod postMethod = new PostMethod(url);
// try {
// if (param != null)
// keyword = new String(param.getBytes("gb2312"), "ISO-8859-1");
// } catch (UnsupportedEncodingException e1) {
// // TODO Auto-generated catch block
// e1.printStackTrace();
// }
// NameValuePair[] data = { new NameValuePair("keyword", keyword) };
// // 将表单的值放入postMethod中
// postMethod.setRequestBody(data);
// 以上部分是带参数抓取,我自己把它注销了.大家可以把注销消掉研究下
try {
int statusCode = client.executeMethod(postMethod);
response = new String(postMethod.getResponseBodyAsString()
.getBytes("ISO-8859-1"), "gb2312");//这里要注意下 gb2312要和你抓取网页的编码要一样
String p = response.replaceAll("//&[a-zA-Z]{1,10};", "")
.replaceAll("<[^>]*>", "");//去掉网页中带有html语言的标签
System.out.println(p);
} catch (Exception e) {
e.printStackTrace();
}
return response;
}
// 第二种方法
// 这种方法是JAVA自带的URL来抓取网站内容
public String getPageContent(String strUrl, String strPostRequest,
int maxLength) {
// 读取结果网页
StringBuffer buffer = new StringBuffer();
System.setProperty("sun.net.client.defaultConnectTimeout", "5000");
System.setProperty("sun.net.client.defaultReadTimeout", "5000");
try {
URL newUrl = new URL(strUrl);
HttpURLConnection hConnect = (HttpURLConnection) newUrl
.openConnection();
// POST方式的额外数据
if (strPostRequest.length() > 0) {
hConnect.setDoOutput(true);
OutputStreamWriter out = new OutputStreamWriter(hConnect
.getOutputStream());
out.write(strPostRequest);
out.flush();
out.close();
}
// 读取内容
BufferedReader rd = new BufferedReader(new InputStreamReader(
hConnect.getInputStream()));
int ch;
for (int length = 0; (ch = rd.read()) > -1
&& (maxLength <= 0 || length < maxLength); length++)
buffer.append((char) ch);
String s = buffer.toString();
s.replaceAll("//&[a-zA-Z]{1,10};", "").replaceAll("<[^>]*>", "");
System.out.println(s);
rd.close();
hConnect.disconnect();
return buffer.toString().trim();
} catch (Exception e) {
// return "错误:读取网页失败!";
//
return null;
}
}
然后写个测试类:
public static void main(String[] args) {
String url = "http://www.fkjava.com";
String keyword = "疯狂Java网";
createhttpClient p = new createhttpClient();
String response = p.createhttpClient(url, keyword); // 第一种方法
// p.getPageContent(url, "post", 100500);//第二种方法
}
呵呵,看看控制台吧,是不是把网页的内容获取了
以下是分析网页过程:
import org.htmlparser.Node;
import org.htmlparser.NodeFilter;
import org.htmlparser.Parser;
import org.htmlparser.filters.TagNameFilter;
import org.htmlparser.tags.TableTag;
import org.htmlparser.util.NodeList;
public class HtmlParserTest
{
public static void testHtml()
{
try
{
String sCurrentLine;
String sTotalString;
sCurrentLine = "";
sTotalString = "";
java.io.InputStream l_urlStream;
java.net.URL l_url = new java.net.URL("http://sports.sina.com.cn/iframe/nba/live/");
java.net.HttpURLConnection l_connection = (java.net.HttpURLConnection) l_url.openConnection();
l_connection.connect();
l_urlStream = l_connection.getInputStream();
java.io.BufferedReader l_reader = new java.io.BufferedReader(new java.io.InputStreamReader(l_urlStream));
while ((sCurrentLine = l_reader.readLine()) != null)
{
sTotalString += sCurrentLine;
}
System.out.println(sTotalString);
System.out.println("====================");
String testText = extractText(sTotalString);
System.out.println(testText);
}
catch (Exception e)
{
e.printStackTrace();
}
}
/**
* 抽取纯文本信息
*
* @param inputHtml
* @return
*/
public static String extractText(String inputHtml) throws Exception
{
StringBuffer text = new StringBuffer();
Parser parser = Parser.createParser(new String(inputHtml.getBytes(),"8859_1"), "8859-1");
// 遍历所有的节点
NodeList nodes = parser.extractAllNodesThatMatch(new NodeFilter() {
public boolean accept(Node node) {
return true;
}
});
Node node = nodes.elementAt(0);
text.append(new String(node.toPlainTextString().getBytes("8859_1")));
return text.toString();
}
/**
* 读取文件的方式来分析内容. filePath也可以是一个Url.
*
* @param resource
* 文件/Url
*/
public static void test5(String resource) throws Exception {
Parser myParser = new Parser(resource);
// 设置编码
myParser.setEncoding("GBK");
String filterStr = "table";
NodeFilter filter = new TagNameFilter(filterStr);
NodeList nodeList = myParser.extractAllNodesThatMatch(filter);
TableTag tabletag = (TableTag) nodeList.elementAt(11);
System.out.println(tabletag.toHtml());
System.out.println("==============");
}
/*
* public static void main(String[] args) { TestYahoo testYahoo = new
* TestYahoo(); testYahoo.testHtml(); }
*/
public static void main(String[] args) throws Exception {
test5("http://sports.yahoo.com/nba/scoreboard");
}
}
Java抓取网页 解析网页HtmlParser
最新推荐文章于 2024-07-12 00:20:35 发布