下面是a tour of go 中最后一个练习的解法,本文使用了真正的网络io来运行爬虫,由于没有对连接数做限制,所以当depth为3时会发生错误
package main
import (
"fmt"
"log"
"strings"
"sync"
"github.com/PuerkitoBio/goquery"
)
type Fetcher interface {
// Fetch returns the body of URL and
// a slice of URLs found on that page.
Fetch(url string) (body string, urls []string, err error)
}
// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher) {
// TODO: Fetch URLs in parallel.
// TODO: Don't fetch the same URL twice.
// This implementation doesn't do either:
if depth <= 0 {
return
}
_, urls, err := fetcher.Fetch(url)
if err != nil {
fmt.Println(err)
return
}
//fmt.Printf("found: %s %q\n", url, body)
wg.Add(len(urls))
for _, u := range urls {
go func(u string) {
defer wg.Done()
Crawl(u, depth-1, fetcher)
}(u)//note:can't use u directly because u can be changed in later loops
}
return
}
func main() {
fetcher.result = make(map[string]*realResult)
Crawl("http://golang.org/", 2, fetcher)
wg.Wait()
return
}
// realFetcher is Fetcher that returns canned results.
type realFetcher struct {
result map[string]*realResult
mux sync.Mutex
}
type realResult struct {
body string
urls []string
}
func (f realFetcher) Fetch(url string) (string, []string, error) {
f.mux.Lock()
if res, ok := f.result[url]; ok {
f.mux.Unlock()
fmt.Printf("exist:%s------\n", url)
return res.body, res.urls, nil
}
f.mux.Unlock()
fmt.Printf("fetching:%s------\n", url)
doc, err := goquery.NewDocument(url)
if err != nil {
log.Fatal(err)
return "", nil, fmt.Errorf("Process error:%s", url)
}
f.mux.Lock()
f.result[url] = &realResult{doc.Find("body").Text(), nil}
f.mux.Unlock()
// Find the review items
doc.Find("a").Each(func(i int, s *goquery.Selection) {
// For each item found, get the band and title
href, _ := s.Attr("href")
if strings.HasPrefix(href, "//") {
href = "http://" + href[2:]
} else if strings.HasPrefix(href, "/") {
href = url + href[1:]
} else if len(href) == 0 || strings.HasPrefix(href, "#") {
href = ""
} else if !strings.HasPrefix(href, "http") {
href = url + href
}
if href != "" {
f.mux.Lock()
f.result[url].urls = append(f.result[url].urls, href)
f.mux.Unlock()
}
})
f.mux.Lock()
defer f.mux.Unlock()
return doc.Find("body").Text(), f.result[url].urls, nil
}
// fetcher is a populated realFetcher.
var fetcher = realFetcher{}
var wg sync.WaitGroup