老师要求做个爬虫..遇到一些疑惑求大神们指教,感激不尽有100,000个pdf下载地址的txt,从txt里取地址,然后多线程将pdf下载到本地电脑上
下载方法的主代码:
package crawler;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.UnknownHostException;
public class DownLoading2 {
/**
* @param args
try {
//建立PDF下载的目的文件夹
// String dir = "d:/pdf/";
String dir = "d:/pdf/";
File file = new File(dir);
if (!file.exists()) {
file.mkdirs();
}
%T-TRANSFORM: none; BACKGROUND-COLOR: rgb(223,223,223); TEXT-INDENT: 0px; FONT: 12px/18px verdana, arial, helvetica; WORD-WRAP: break-word; WHITE-SPACE: normal; LETTER-SPACING: normal; COLOR: rgb(0,0,0); WORD-SPACING: 0px; -webkit-text-stroke-width: 0px" /> System.exit(0);
}
//未访问url出队列,入已访问url队列,在done.txt文件中写出,在输出面板中打印
//此处需互斥量
String pdf = LinkQueue.unVisitedUrlDeQueue().toString();
LinkQueue.addVisitedUrl(pdf);
//写入done.txt,若程序意外关闭,就在10000urls.txt中手动删除done.txt中的url,然后重新执行程序即可
try {
//打开一个写文件器,构造函数中的第二个参数true表示以追加形式写文件
FileWriter writer = new FileWriter( dir + "done.txt", true);
System.out.println("IOException");
}
System.out.println(pdf);
//下载pdf文件,文件名以url中的最后一个名字命名
URL u = new URL(pdf);
try{
InputStream i = u.openStream();
String fileName = pdf.substring(pdf.lastIndexOf("/"));
OutputStream bos = new FileOutputStream(new File(dir + fileName));
while ((len = i.read(b)) != -1) {
bos.write(b, 0, len);
}
bos.flush();
bos.close();
i.close();
} catch(UnknownHostException e){
//e.printStackTrace();
System.out.println("UnknownHostException");
HttpURLConnection urlcon=(HttpURLConnection)u.openConnection();
if(urlcon.getResponseCode()>=400){
System.out.println("服务器响应错误");
System.exit(-1);
}
/*if( ){
LinkQueue.removeVisitedUrl(pdf);
}*/
}catch (MalformedURLException e) {
//e.printStackTrace();
e.printStackTrace();
//System.out.println("IOException");
}finally{
}
}}
然后是调用这个方法的主函数:
public static void main(String[] args)throws Exception {
//TODO Auto-generated method stub
//调用存取100,000个url的txt地址
String filePath = "D:\\My Documents\\大三上\\信息检索\\10000urls.txt";
//把url读进内存,并存进队列
readTxtFile(filePath);
for(int i=0;i<20;i++){
for(int j=0;j<30;j++){
Thread t = new Mythread();
t.start();
}
/ORM: none; BACKGROUND-COLOR: rgb(223,223,223); TEXT-INDENT: 0px; FONT: 12px/18px verdana, arial, helvetica; WORD-WRAP: break-word; WHITE-SPACE: normal; LETTER-SPACING: normal; COLOR: rgb(0,0,0); WORD-SPACING: 0px; -webkit-text-stroke-width: 0px" />}
class Mythread extends Thread{
//public int x = 0;
public void run(){
DownLoading2.download();
}
}
我是想在那个循环里就全部下载下来,但是最后只爬到200多个,线程就全部没有反应了...
这是怎么回事......求大神指教可以的话顺便帮我修改下...
下载方法的主代码:
package crawler;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.UnknownHostException;
public class DownLoading2 {
/**
* @param args
try {
//建立PDF下载的目的文件夹
// String dir = "d:/pdf/";
String dir = "d:/pdf/";
File file = new File(dir);
if (!file.exists()) {
file.mkdirs();
}
%T-TRANSFORM: none; BACKGROUND-COLOR: rgb(223,223,223); TEXT-INDENT: 0px; FONT: 12px/18px verdana, arial, helvetica; WORD-WRAP: break-word; WHITE-SPACE: normal; LETTER-SPACING: normal; COLOR: rgb(0,0,0); WORD-SPACING: 0px; -webkit-text-stroke-width: 0px" /> System.exit(0);
}
//未访问url出队列,入已访问url队列,在done.txt文件中写出,在输出面板中打印
//此处需互斥量
String pdf = LinkQueue.unVisitedUrlDeQueue().toString();
LinkQueue.addVisitedUrl(pdf);
//写入done.txt,若程序意外关闭,就在10000urls.txt中手动删除done.txt中的url,然后重新执行程序即可
try {
//打开一个写文件器,构造函数中的第二个参数true表示以追加形式写文件
FileWriter writer = new FileWriter( dir + "done.txt", true);
System.out.println("IOException");
}
System.out.println(pdf);
//下载pdf文件,文件名以url中的最后一个名字命名
URL u = new URL(pdf);
try{
InputStream i = u.openStream();
String fileName = pdf.substring(pdf.lastIndexOf("/"));
OutputStream bos = new FileOutputStream(new File(dir + fileName));
while ((len = i.read(b)) != -1) {
bos.write(b, 0, len);
}
bos.flush();
bos.close();
i.close();
} catch(UnknownHostException e){
//e.printStackTrace();
System.out.println("UnknownHostException");
HttpURLConnection urlcon=(HttpURLConnection)u.openConnection();
if(urlcon.getResponseCode()>=400){
System.out.println("服务器响应错误");
System.exit(-1);
}
/*if( ){
LinkQueue.removeVisitedUrl(pdf);
}*/
}catch (MalformedURLException e) {
//e.printStackTrace();
e.printStackTrace();
//System.out.println("IOException");
}finally{
}
}}
然后是调用这个方法的主函数:
public static void main(String[] args)throws Exception {
//TODO Auto-generated method stub
//调用存取100,000个url的txt地址
String filePath = "D:\\My Documents\\大三上\\信息检索\\10000urls.txt";
//把url读进内存,并存进队列
readTxtFile(filePath);
for(int i=0;i<20;i++){
for(int j=0;j<30;j++){
Thread t = new Mythread();
t.start();
}
/ORM: none; BACKGROUND-COLOR: rgb(223,223,223); TEXT-INDENT: 0px; FONT: 12px/18px verdana, arial, helvetica; WORD-WRAP: break-word; WHITE-SPACE: normal; LETTER-SPACING: normal; COLOR: rgb(0,0,0); WORD-SPACING: 0px; -webkit-text-stroke-width: 0px" />}
class Mythread extends Thread{
//public int x = 0;
public void run(){
DownLoading2.download();
}
}
我是想在那个循环里就全部下载下来,但是最后只爬到200多个,线程就全部没有反应了...
这是怎么回事......求大神指教可以的话顺便帮我修改下...