实现爬虫的步骤:
- 明确爬虫网址以及目的
- 寻找网址源代码的特点和规律
- 使用http编程来抓取源代码
- 使用正则表达式进行过滤信息,提取有用信息
package main
import (
"fmt"
"net/http"
"os"
"strconv"
)
func HttpGet(page int, ch chan int) (result string, err error) {
url := "https://tieba.baidu.com/f?kw=%E7%BB%9D%E5%9C%B0%E6%B1%82%E7%94%9F&ie=utf-8&pn=" + strconv.Itoa((page-1)*50)
fmt.Printf("正在爬取第%d页....\n", page)
fmt.Println("url: ", url)
if err != nil {
fmt.Println("err = ", err)
return
}
response, err1 := http.Get(url)
if err1 != nil {
err = err1
return
}
defer response.Body.Close()
buf := make([]byte, 1024*4)
for {
n, err := response.Body.Read(buf)
if n == 0 || err != nil {
fmt.Println("Finish....")
break
}
result += string(buf[:n])
}
fileName := string("./file/" + strconv.Itoa(page) + ".html")
file, err := os.Create(fileName)
if err != nil {
fmt.Println("create failed err = ", err)
return
}
file.WriteString(result)
file.Close()
ch <- page
return
}
func DoWork(start, end int) {
fmt.Printf("正在爬取%d -> %d的页面\n", start, end)
page := make(chan int)
for i := start; i <= end; i++ {
go HttpGet(i, page)
}
for i := start; i <= end; i++ {
fmt.Printf("%d页完成!\n", <-page)
}
}
func main() {
var start, end int
fmt.Println("请输入起始页(>=1):")
fmt.Scan(&start)
fmt.Println("请输入终止页(>=起始页)")
fmt.Scan(&end)
DoWork(start, end)
}