package com.spider;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.select.Elements;
public class Spider {
/***
* 获取首页
* @param url
* @return
*/
//获取加载document的数据内容
public Document loadDocDataByUrl(String url){
Document doc = null;
try {
doc = Jsoup.connect(url).get();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return doc;
}
/***
* 获取每一个详情页
* @param doc
* @return
*/
//解析document对象
public List<String> parserDoc(Document doc){
//创建一个集合
List<String> list = new ArrayList<String>();
//获取class为new_top的对象
Elements elements = doc.getElementsByClass("news_top");
//获取标签中的超链接
Elements links = elements.get(0).getElementsByTag("a");
//从elements.get(0)中找到a标签
for(int i=0;i<links.size();i++){
//将获取到的a连接中的地址url装入集合中
list.add(links.get(i).attr("href"));
}
return list;
}
//遍历和解析完的对象 解析详细页面
public News parserDetail(Document doc){
String keywords = doc.getElementsByClass("keywords").text();
String[] split = keywords.split(":");
String title = doc.getElementsByClass("main-title").text();
String publishDate = doc.select(".date-source > .date").text();
String content = doc.getElementsByClass("article").text();
String author = doc.getElementsByClass("author").text();
News news = new News();
news.setAuthor(author);
news.setContent(content);
news.setKeywords(keywords);
news.setPublishDate(publishDate);
news.setTitle(title);
return news;
}
public static void main(String[] args) {
Spider spider = new Spider();
Document doc = spider.loadDocDataByUrl("http://www.sina.com.cn/");
//获取要解析的列表返回url
List<String> list = spider.parserDoc(doc);
ArrayList<News> listNews = new ArrayList<News>();
for (String url : list) {
Document detailDoc = spider.loadDocDataByUrl(url);
News news = spider.parserDetail(detailDoc);
listNews.add(news);
System.out.println(news);
}
//System.out.println(listNews.toString());
}
}
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.select.Elements;
public class Spider {
/***
* 获取首页
* @param url
* @return
*/
//获取加载document的数据内容
public Document loadDocDataByUrl(String url){
Document doc = null;
try {
doc = Jsoup.connect(url).get();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return doc;
}
/***
* 获取每一个详情页
* @param doc
* @return
*/
//解析document对象
public List<String> parserDoc(Document doc){
//创建一个集合
List<String> list = new ArrayList<String>();
//获取class为new_top的对象
Elements elements = doc.getElementsByClass("news_top");
//获取标签中的超链接
Elements links = elements.get(0).getElementsByTag("a");
//从elements.get(0)中找到a标签
for(int i=0;i<links.size();i++){
//将获取到的a连接中的地址url装入集合中
list.add(links.get(i).attr("href"));
}
return list;
}
//遍历和解析完的对象 解析详细页面
public News parserDetail(Document doc){
String keywords = doc.getElementsByClass("keywords").text();
String[] split = keywords.split(":");
String title = doc.getElementsByClass("main-title").text();
String publishDate = doc.select(".date-source > .date").text();
String content = doc.getElementsByClass("article").text();
String author = doc.getElementsByClass("author").text();
News news = new News();
news.setAuthor(author);
news.setContent(content);
news.setKeywords(keywords);
news.setPublishDate(publishDate);
news.setTitle(title);
return news;
}
public static void main(String[] args) {
Spider spider = new Spider();
Document doc = spider.loadDocDataByUrl("http://www.sina.com.cn/");
//获取要解析的列表返回url
List<String> list = spider.parserDoc(doc);
ArrayList<News> listNews = new ArrayList<News>();
for (String url : list) {
Document detailDoc = spider.loadDocDataByUrl(url);
News news = spider.parserDetail(detailDoc);
listNews.add(news);
System.out.println(news);
}
//System.out.println(listNews.toString());
}
}