最近在抓取的时候发现单线程的curl采集多个网页的时候速度特别慢,尤其是数量超过100的时候,都要几分钟才能获取完毕。于是在网上找了curl多线程的采集方法,网上流传的代码都是不完善的,最近经过多次调试,最终解决了,多线程采集获取上百个也没只花费10多秒。
输入参数为url数组,返回结果为对应的网页源码数组。
代码如下:
function curl_multi($urls) {
if (!is_array($urls) or count($urls) == 0) {
return false;
}
$num=count($urls);
$curl = $curl2 = $text = array();
$handle = curl_multi_init();
function createCh($url) {
$ch = curl_init();
curl_setopt ($ch, CURLOPT_URL, $url);
curl_setopt ($ch, CURLOPT_USERAGENT, 'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko');//设置头部
curl_setopt ($ch, CURLOPT_REFERER, $url); //设置来源
curl_setopt ($ch, CURLOPT_ENCODING, "gzip"); // 编码压缩
curl_setopt ($ch, CURLOPT_RETURNTRANSFER, 1);
curl_setopt ($ch, CURLOPT_FOLLOWLOCATION, 1);//是否采集301、302之后的页面
curl_setopt ($ch, CURLOPT_MAXREDIRS, 5);//查找次数,防止查找太深
curl_setopt($ch, CURLOPT_SSL_VERIFYPEER, FALSE); // 对认证证书来源的检查
curl_setopt($ch, CURLOPT_SSL_VERIFYHOST, FALSE); // 从证书中检查SSL加密算法是否存在
curl_setopt ($ch, CURLOPT_TIMEOUT, 20);
curl_setopt ($ch, CURLOPT_HEADER, 0);//输出头部
return $ch;
}
foreach($urls as $k=>$v){
$url=$urls[$k];
$curl[$k] = createCh($url);
curl_multi_add_handle ($handle,$curl[$k]);
}
$active = null;
do {
$mrc = curl_multi_exec($handle, $active);
} while ($mrc == CURLM_CALL_MULTI_PERFORM);
while ($active && $mrc == CURLM_OK) {
if (curl_multi_select($handle) != -1) {
usleep(100);
}
do {
$mrc = curl_multi_exec($handle, $active);
} while ($mrc == CURLM_CALL_MULTI_PERFORM);
}
foreach ($curl as $k => $v) {
if (curl_error($curl[$k]) == "") {
$text[$k] = (string) curl_multi_getcontent($curl[$k]);
}
curl_multi_remove_handle($handle, $curl[$k]);
curl_close($curl[$k]);
}
curl_multi_close($handle);
return $text;
}
$urls=array('http://www.baidu.com',
'http://www.baidu.com',
'http://www.baidu.com',
'http://www.baidu.com',
'http://www.baidu.com',
'http://www.baidu.com',
'http://www.baidu.com',
'http://www.baidu.com'
);
$res=curl_multi($urls);
print_r($res);