前一段时间运用htmlparser时,获取地址是时直接将html页面的相对地址转换成绝对地址,然而今天在运用jsoup,开始的时候发现只能得到相对地址,最后在网上寻找到了解决办法。
htmlparser获得URLs:
//参数说明:parser为模拟浏览器对URL地址操 dataPath:所解析的URL存放路径 dataName:存放URL的数据库名
public static void extractURL(final Parser parser, final String dataPath, final String dataName){
try {
NodeVisitor visitor = new NodeVisitor(){
//int i = 1;
public void visitTag(Tag tag){
if(tag instanceof LinkTag){
LinkTag link = (LinkTag)tag;
String linkString = link.getLink();
linkString.trim();
String regex = "^(https?|ftp|file)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#/%=~_|]" ;
boolean isMatch = PatternMatcher.stringMatchRegex(regex, linkString) ;
if(isMatch){
if(OperatingDB.writerKeyAndValue(dataPath,dataName,linkString,"")){
String linkTxt = link.getText();
System.out.println(linkString + " " + linkTxt);
//System.out.println("插入URL成功!!" );
}
}
}
}
};
parser.visitAllNodesWith(visitor);
} catch (ParserException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
Jsoup代码:
Element element = doc.body();
//Elements links = element.getElementsByAttributeValue("class", "note");
Elements links = element.select("a[href]");
//links.removeAll();
FileWriter fileWriter = new FileWriter(file, true);
for(int i = 0; i < links.size(); i++ ){
Element link = links.get(i);
System.out.println((link.attr("abs:href") + " "));//这里添加abs后得到的为绝对地址,如果不加所得到的将是相对地址
System.out.println((link.text());//得到代替地址的文本
}