题目地址:
https://leetcode.com/problems/web-crawler-multithreaded/description/
给定一个URL,startUrl
和一个工具类HtmlParser
,实现一个多线程的网络爬虫,从startUrl
开始,将和startUrl
的主机名称相同的网站爬下来。HtmlParser的接口如下:
class HtmlParser {
// Return a list of all urls from a webpage of given url.
// This is a blocking call, that means it will do HTTP request and return when this request is finished.
vector<string> getUrls(string url);
}
这个接口调用getUtrls的时候会阻塞。
代码如下:
class Solution {
public:
vector<string> res;
unordered_set<string> vis;
mutex mu;
void crawlUtil(string& startUrl, HtmlParser& parser, string& host) {
vector<string> v = parser.getUrls(startUrl);
vector<thread*> ts(v.size(), nullptr);
for (int i = 0; i < v.size(); i++) {
// 排除掉已经存过的和不在host下的网页
{
unique_lock<mutex> ul(mu);
if (vis.count(v[i]) || v[i].find(host)) continue;
vis.insert(v[i]);
res.push_back(v[i]);
}
ts[i] = new thread(&Solution::crawlUtil, this, ref(v[i]), ref(parser),
ref(host));
}
// 让未完成的线程完成
for (auto* t : ts)
if (t) t->join();
}
vector<string> crawl(string startUrl, HtmlParser parser) {
res.push_back(startUrl);
vis.insert(startUrl);
string host = "http://";
for (int i = host.size(); i < startUrl.size(); i++)
if (startUrl[i] != '/')
host += startUrl[i];
else
break;
thread t(&Solution::crawlUtil, this, ref(startUrl), ref(parser), ref(host));
t.join();
return res;
}
};