<?PHP
//爬酷狗
error_reporting(E_ALL^E_NOTICE);
header("Content-type:text/html;charset=utf-8");
// It may take a whils to crawl a site ...
set_time_limit(10000);
include("libs/PHPCrawler.class.php");
class MyCrawler extends PHPCrawler {
function handleDocumentInfo($DocInfo) {
// print_r($DocInfo);
// exit;
// Just detect linebreak for output ("\n" in CLI-mode, otherwise "<br>").
if (PHP_SAPI == "cli") $lb = "\n";
else $lb = "<br />";
$url = $DocInfo->url;
$pat = "/http:\/\/www\.kugou\.com/";
if(preg_match($pat,$url) > 0){
$this->parseSonglist($DocInfo);
}
//刷新输出缓存
flush();
}
public function parseSonglist($DocInfo){
$content = $DocInfo->content;
$songlistArr = array();
$songlistArr['raw_url'] = $DocInfo->url;
//解析歌曲介绍
$matches = array();
$pat = "/<span>名称:<\/span>([^(<br)]+)<br/";
$ret = preg_match($pat,$content,$matches);
if($ret>0){
$songlistArr['title'] = $matches[1];
}else{
$songlistArr['title'] = '';
}
//解析歌曲
$pat = "/<a title=\"([^\"]+)\" hidefocus=\"/";
$matches = array();
preg_match_all($pat,$content,$matches);
$songlistArr['songs'] = array();
for($i = 0;$i < count($matches[0]);$i++){
$song_title = $matches[1][$i];
array_push($songlistArr['songs'],array('title'=>$song_title));
}
echo "<pre>";
print_r($songlistArr);
echo "</pre>";
}
}
$crawler = new MyCrawler();
//小网站
//(PHPCrawlerUrlCacheTypes::URLCACHE_MEMORY);
//激活SQLite缓存,进行大数据处理
$crawler->setUrlCacheType(PHPCrawlerUrlCacheTypes::URLCACHE_SQLITE);
// URL to crawl
$crawler->setURL("www.kugou.com");
//用来储存临时数据的目录
$crawler->setWorkingDirectory("F:/phpCrawl/url-03.txt");//并没有效果
//只接收内容类型为“text / html”的文件的内容
$crawler->addContentTypeReceiveRule("#text/html#");
//链接扩展
$crawler->addURLFollowRule("#http://www\.kugou\.com/yy/special/single/\d+\.html$# i");
$crawler->addURLFilterRule("#http://www.kugou\.com/yy/special/index/\d+-\d+-2\.html$# i");
// Store and send cookie-data like a browser does
$crawler->enableCookieHandling(true);
// Set the traffic-limit to 1 MB(1000 * 1024) (in bytes,
// for testing we dont want to "suck" the whole site)
//爬取大小无限制
$crawler->setTrafficLimit(0);
// Thats enough, now here we Go
//同时执行5线程
//$crawler->goMultiProcessed(5);(linux)
//go()以单进程模式启动网页
$crawler->go();
// At the end, after the process is finished, we print a short
// report (see method getProcessReport() for more information)
//抓取完成后返回摘要完成信息
$report = $crawler->getProcessReport();
if (PHP_SAPI == "cli") $lb = "\n";
else $lb = "<br />";
echo "Summary:".$lb;
echo "Links followed: ".$report->links_followed.$lb;
echo "Documents received: ".$report->files_received.$lb;
echo "Bytes received: ".$report->bytes_received." bytes".$lb;
echo "Process runtime: ".$report->process_runtime." sec".$lb;
?>
phpcrawler example
最新推荐文章于 2024-06-18 09:42:34 发布