1.爬虫思路:
1)明确目标(要知道你准备在哪个范围或网站去搜索)
2)爬(将所有的网站的内容全部爬下来)
3)取(去掉对我们没用处的数据)
4)处理数据(按照我们想要的方式存储和使用)
2.百度贴吧爬虫
package main
import (
"fmt"
"net/http"
"os"
"strconv"
)
//爬取网页内容
func HttpGet(url string) (result string, err error) {
resp, err1 := http.Get(url)
if err1 != nil {
fmt.Println("http.Get err=", err)
return
}
defer resp.Body.Close()
//读取网页Body中的内容
buf := make([]byte, 4*1024)
for {
n, err := resp.Body.Read(buf)
if n == 0 {//读取结束或者出现问题了
fmt.Println("resp.Body.Read err=", err)
break
}
result += string(buf[:n])
}
return
}
func DoWork(start, end int) {
fmt.Printf("正在爬取%d到%d的页面", start, end)
//明确目标(要知道准备在哪个范围或网站去搜索)
//下一页+50
for i := start; i <= end; i++ {
url := "https://tieba.baidu.com/f?kw=%E7%BB%9D%E5%9C%B0%E6%B1%82%E7%94%9F&ie=utf-8&pn=" + strconv.Itoa((i-1)*50)
//爬(将所有的网站的内容全部爬下来)
result, err := HttpGet(url)
if err != nil {
fmt.Println("HttpGet err=", err)
continue
}
//把爬下来的内容写到文件中
fileName := strconv.Itoa(i) + ".html"
f, err1 := os.Create(fileName)
if err1 != nil {
fmt.Println("os.Create err=", err1)
continue
}
f.WriteString(result)
f.Close()//关闭文件
}
}
func main() {
var start, end int
fmt.Printf("请输入起始页(大于等于1):")
fmt.Scan(&start)
fmt.Printf("请输入终止页(大于等于起始页):")
fmt.Scan(&end)
DoWork(start, end)
}
3.并发百度贴吧爬虫
package main
import (
"fmt"
"net/http"
"os"
"strconv"
)
//爬取网站内容
func HttpGet(url string) (result string, err error) {
resp, err1 := http.Get(url)
if err1 != nil {
err = err1
return
}
defer resp.Body.Close()
//读取网页内容
buf := make([]byte, 4*1024)
for {
n, err := resp.Body.Read(buf)
if n == 0 {
fmt.Println("resp.Body.Read err=", err)
break
}
result += string(buf[:n])
}
return
}
//爬取一个网页
func SpiderPage(i int, page chan int) {
url := "https://tieba.baidu.com/f?kw=%E7%BB%9D%E5%9C%B0%E6%B1%82%E7%94%9F&ie=utf-8&pn=" + strconv.Itoa((i-1)*50)
//爬(将所有的网站的内容全部爬下来)
result, err := HttpGet(url)
if err != nil {
fmt.Println("HttpGet err", err)
return
}
//把内容写入到文件
fileName := strconv.Itoa(i) + ".html"
f, err1 := os.Create(fileName)
if err1 != nil {
fmt.Println("os.Create err=", err1)
return
}
f.WriteString(result)
f.Close()//关闭文件
page <- i
}
func DoWork(start, end int) {
fmt.Printf("正在爬取第%d页到第%d页\n", start, end)
page := make(chan int) //不能光声明不给分配空间
//明确目标(要知道准备在哪个范围或网站去搜索),下一页+50
for i := start; i <= end; i++ {
go SpiderPage(i, page)
}
for i := start; i <= end; i++ {
fmt.Printf("第%d个页面爬取完成", <-page)
}
}
func main() {
var start, end int
fmt.Printf("请输入起始页(大于等于1):")
fmt.Scan(&start)
fmt.Printf("请输入终止页(大于等于起始页):")
fmt.Scan(&end)
DoWork(start, end)
}
4.并发,爬取网页链接
package main
import (
"fmt"
"net/http"
"os"
"regexp"
"strconv"
"strings"
)
func HttpGet(url string) (result string, err error) {
resp, err1 := http.Get(url)//发送Get请求
if err1 != nil {
err = err1
return
}
defer resp.Body.Close()
//读取网页内容
buf := make([]byte, 4*1024)
for {
n, err := resp.Body.Read(buf)
if n == 0 {
fmt.Println("resp.Body.Read err=", err)
break
}
result += string(buf[:n])
}
return
}
func SpiderOneJoy(url string) (title, content string, err error) {
result, err1 := HttpGet(url)
if err1 != nil {
err = err1
return
}
//取标题:<h1>标题</h1>
re := regexp.MustCompile(`<h1>(?s:(.*?))</h1>`)
if re == nil {
err = fmt.Errorf("%s", "regexp.MustCompile err")
return
}
tmpTitle := re.FindAllStringSubmatch(result, 1)
for _, data := range tmpTitle {
title = data[1]
title = strings.Replace(title, "\t", "", -1)
break
}
//取内容:
re = regexp.MustCompile(`<div class="content-txt pt10">(?s:(.*?))<a id="prev" href="`)
if re == nil {
err = fmt.Errorf("%s", "regexp.MustCompile err")
return
}
tmpContent := re.FindAllStringSubmatch(result, -1)
for _, data := range tmpContent {
content = data[1]
content = strings.Replace(content, "\r\n", "", -1)
content = strings.Replace(content, "\n", "", -1)
content = strings.Replace(content, "\r", "", -1)
content = strings.Replace(content, " ", "", -1)
content = strings.Replace(content, "\t", "", -1)
content = strings.Replace(content, "<br/>", "", -1)
content = strings.Replace(content, "<br />", "", -1)
break
}
return
}
func StoreJoyToFile(i int, fileTitle, fileContent []string) {
//新建文件
fileName := strconv.Itoa(i) + ".txt"
f, err := os.Create(fileName)
if err != nil {
fmt.Println("os.Create err=", err)
return
}
defer f.Close()
len := len(fileTitle)
//往文件里写内容
for i := 0; i < len; i++ {
f.WriteString(fileTitle[i] + "\n")
f.WriteString(fileContent[i] + "\n")
f.WriteString("---------\n")
}
}
func SpiderPage(i int, page chan int) {
//明确爬取的网址
url := "https://www.pengfu.com/xiaohua_" + strconv.Itoa(i) + ".html"
fmt.Printf("正在爬取第%d个网页:%s\n", i, url)
//开始爬取页面的内容
result, err := HttpGet(url)
if err != nil {
fmt.Println("HttpGet err=", err)
return
}
//取内容,<h1 class="dp-b"><a href="一个段子url连接 "
re := regexp.MustCompile(`<h1 class="dp-b"><a href="(?s:(.*?))"`)
if re == nil {
fmt.Println("regexp.MustCompile err")
return
}
//取关键信息
joyUrls := re.FindAllStringSubmatch(result, -1)
fileTitle := make([]string, 0)
fileContent := make([]string, 0)
for _, data := range joyUrls {
url := data[1]
//爬子页面
title, content, err := SpiderOneJoy(url)
if err != nil {
fmt.Println("SpiderOneJoy err=", err)
continue
}
fileTitle = append(fileTitle, title)//追加内容
fileContent = append(fileContent, content)//追加内容
}
StoreJoyToFile(i, fileTitle, fileContent)
page <- i//爬完之后,将页编号加入通道
}
func DoWork(start, end int) {
fmt.Printf("准备爬取第%d页到%d页的网址\n", start, end)
page := make(chan int)
for i := start; i <= end; i++ {
//定义一个函数,爬主页面
go SpiderPage(i, page)
}
for i := start; i <= end; i++ {
fmt.Printf("第%d页爬取结束\n", <-page)
}
}
func main() {
var start, end int
fmt.Printf("请输入起始页(大于等于1):")
fmt.Scan(&start)
fmt.Printf("请输入终止页(大于等于起始页):")
fmt.Scan(&end)
DoWork(start, end)//工作函数
}