Exercise: Web Crawler
In this exercise you’ll use Go’s concurrency features to parallelize a web crawler.
Modify the Crawl function to fetch URLs in parallel without fetching the same URL twice.
Hint: you can keep a cache of the URLs that have been fetched on a map, but maps alone are not safe for concurrent use!
练习:Web 爬虫
在这个练习中,我们将会使用 Go 的并发特性来并行化一个 Web 爬虫。
修改 Crawl 函数来并行地抓取 URL,并且保证不重复。
提示:你可以用一个 map 来缓存已经获取的 URL,但是要注意 map 本身并不是并发安全的!
原始代码为
package main
import (
"fmt"
)
type Fetcher interface {
// Fetch returns the body of URL and
// a slice of URLs found on that page.
Fetch(url string) (body string, urls []string, err error)
}
// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher) {
// TODO: Fetch URLs in parallel.
// TODO: Don't fetch the same URL twice.
// This implementation doesn't do either:
if depth <= 0 {
return
}
body, urls, err := fetcher.Fetch(url)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("found: %s %q\n", url, body)
for _, u := range urls {
Crawl(u, depth-1, fetcher)
}
return
}
func main() {
Crawl("https://golang.org/", 4, fetcher)
}
// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult
type fakeResult struct {
body string
urls []string
}
func (f fakeFetcher) Fetch(url string) (string, []string, error) {
if res, ok := f[url]; ok {
return res.body, res.urls, nil
}
return "", nil, fmt.Errorf("not found: %s", url)
}
// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
"https://golang.org/": &fakeResult{
"The Go Programming Language",
[]string{
"https://golang.org/pkg/",
"https://golang.org/cmd/",
},
},
"https://golang.org/pkg/": &fakeResult{
"Packages",
[]string{
"https://golang.org/",
"https://golang.org/cmd/",
"https://golang.org/pkg/fmt/",
"https://golang.org/pkg/os/",
},
},
"https://golang.org/pkg/fmt/": &fakeResult{
"Package fmt",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
"https://golang.org/pkg/os/": &fakeResult{
"Package os",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
}
可以看到原始的Crawl函数已经写好了,我们需要做的是
1.要将Crawl中Fetch URL的过程变成并行
2.不能重复Fetch相同的URL
1.并行化Fetch URL
要完成第一点,简单的做法就是直接使用协程,将
for _, u := range urls {
Crawl(u, depth-1, fetcher)
}
改成
for _, u := range urls {
go Crawl(u, depth-1, fetcher)
}
但是这样做有一个问题,就是后续的所有Crawl都用协程进行,主程序没有阻塞,会直接结束。为了让主程序等待所有的协程都结束,利用一个通道done来判断协程结束与否,每当有一个协程结束后就往通道中发送一次数据,改写为
done := make(chan bool, len(urls) )
for _, u := range urls {
go func(url string){
Crawl(url, depth-1, fetcher)
done <- true
}(u)
}
for _, _ = range urls {
<- done
}
2.不重复Fetch相同的URL
要完成第二点,考虑使用一个map来判断url是否已经Fetch过,但是map并不是并发安全的,因此使用一个锁来实现并发安全的map,结构体如下所示
type SafeMap struct{
mp map[string] bool
mu sync.Mutex
}
var sm SafeMap = SafeMap{mp: make( map[string] bool )}
在Crawl函数中,每次判断url是否在map中都需要加锁,并且修改map的代码也要放到临界区内,当这些都结束后,释放锁,如果url在map中,ok会为true,也就是已经Fetch过了这个url,直接返回即可。
sm.mu.Lock()
_, ok := sm.mp[url]
sm.mp[url] = true
sm.mu.Unlock()
if ok || depth <= 0 {
return
}
最后整合上述代码
package main
import (
"fmt"
"sync"
)
type Fetcher interface {
// Fetch returns the body of URL and
// a slice of URLs found on that page.
Fetch(url string) (body string, urls []string, err error)
}
type SafeMap struct{
mp map[string] bool
mu sync.Mutex
}
var sm SafeMap = SafeMap{mp: make( map[string] bool )}
// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher) {
// TODO: Fetch URLs in parallel.
// TODO: Don't fetch the same URL twice.
// This implementation doesn't do either:
sm.mu.Lock()
_, ok := sm.mp[url]
sm.mp[url] = true
sm.mu.Unlock()
if ok || depth <= 0 {
return
}
body, urls, err := fetcher.Fetch(url)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("found: %s %q\n", url, body)
done := make(chan bool, len(urls) )
for _, u := range urls {
go func(url string){
Crawl(url, depth-1, fetcher)
done <- true
}(u)
//go Crawl(u, depth-1, fetcher)
}
for _, _ = range urls {
<- done
}
return
}
func main() {
Crawl("https://golang.org/", 4, fetcher)
}
// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult
type fakeResult struct {
body string
urls []string
}
func (f fakeFetcher) Fetch(url string) (string, []string, error) {
if res, ok := f[url]; ok {
return res.body, res.urls, nil
}
return "", nil, fmt.Errorf("not found: %s", url)
}
// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
"https://golang.org/": &fakeResult{
"The Go Programming Language",
[]string{
"https://golang.org/pkg/",
"https://golang.org/cmd/",
},
},
"https://golang.org/pkg/": &fakeResult{
"Packages",
[]string{
"https://golang.org/",
"https://golang.org/cmd/",
"https://golang.org/pkg/fmt/",
"https://golang.org/pkg/os/",
},
},
"https://golang.org/pkg/fmt/": &fakeResult{
"Package fmt",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
"https://golang.org/pkg/os/": &fakeResult{
"Package os",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
}