由于之前项目要求是做一个网络爬虫,爬取的文件经过logstash监控再写入文档管理系统,现在附加两个可爬取网络文件的java代码
第一种:
if(content.isEmpty() && !content.contains("http")) {
return;
}
URL httpurl = new URL(content);
int inx = content.lastIndexOf(".");
String fileName = title;
if(inx > 0) {
fileName = fileName+content.substring(inx, content.length());
}else {
fileName = fileName+".html";
}
System.out.println(fileName);
File f = new File("d:/" + fileName);
FileUtils.copyURLToFile(httpurl, f);
第二种:
URL url = new URL(fileLink);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
//设置超时间为3秒
conn.setConnectTimeout(3*1000);
//防止屏蔽程序抓取而返回403错误
conn.setRequestProperty("User-Agent", "Mozilla/4.0 (compatible; MSIE 5.0; Windows NT; DigExt)");
//得到输入流
InputStream inputStream = conn.getInputStream();
//获取自己数组
byte[] getData = readInputStream(inputStream);
//文件保存位置
File file = new File("d:/" + fileName);
FileOutputStream fos = new FileOutputStream(file);
fos.write(getData);
if(fos != null) {
fos.close();
}
if(inputStream != null) {
inputStream.close();
}
public static byte[] readInputStream(InputStream inputStream) throws IOException{
byte[] buffer = new byte[1024];
int len = 0;
ByteArrayOutputStream bos = new ByteArrayOutputStream();
while((len = inputStream.read(buffer)) != -1) {
bos.write(buffer,0,len);
}
bos.close();
return bos.toByteArray();
}