以爬取百度贴吧为例,步骤如下:
1.确定爬取网址
2.爬取网页内容
3.处理数据
4.将信息保存到文件
参考代码:
package main
import (
"fmt"
"io"
"net/http"
"os"
"strconv"
)
func HttpGet(url string) (result string, err error) {
resp, err1 := http.Get(url)
if err1 != nil {
err = err1
return
}
defer resp.Body.Close()
// 读取网页内容
buf := make([]byte, 1024 * 4)
for {
n, err2 := resp.Body.Read(buf)
if err2 != nil {
if err2 == io.EOF {
// 读取完毕
break
} else {
fmt.Println("resp.Body.Read err = ", err2)
return
}
}
result += string(buf[:n])
}
return
}
func DoWork(start, end int) {
fmt.Println("正在爬取", start, "到", end, "的页面")
// 明确爬取的范围或者网址
for i := start; i <= end; i++ {
url := "https://tieba.baidu.com/f?kw=%E7%BB%9D%E5%9C%B0%E6%B1%82%E7%94%9F&ie=utf-8&pn=" +
strconv.Itoa((i-1)*50)
fmt.Println(url)
// 爬取网页内容
result, err := HttpGet(url)
if err != nil {
fmt.Println("HttpGet err = ", err)
continue
}
// 把内容写入到文件
fileName := strconv.Itoa(i) + ".html"
f, err1 := os.Create(fileName)
if err1 != nil {
fmt.Println("os.Create err = ", err1)
continue
}
f.WriteString(result)
f.Close()
}
}
func main() {
var start, end int
fmt.Println("请输入起始页 (>= 1):")
fmt.Scan(&start)
fmt.Println("请输入终止页 (>= 起始页):")
fmt.Scan(&end)
DoWork(start, end)
}
github源码: https://github.com/YTCfight/Baidu-Tieba-Simple-Crawler