Go语言学习-5-捧腹网爬虫练习2

package main

import (
	"fmt"
	"net/http"
	"os"
	"regexp"
	"strconv"
	"strings"
)

func HttpGet(url string) (result string, err error) {
	resp, err1 := http.Get(url)
	if err1 != nil {
		err = err1
		return
	}
	defer resp.Body.Close()
	//读取网页内容
	buf := make([]byte, 4*1024)
	for {
		n, _ := resp.Body.Read(buf)
		if n == 0 {

			//fmt.Println("err = ", err)
			break
		}
		result += string(buf[:n])
	}
	return
}
func SpiderOneJoy(url string) (title, content string, err error) {
	result, err1 := HttpGet(url)
	if err1 != nil {
		//fmt.Println("HttpGet err = ", err)
		err = err1
		return
	}
	//取关键信息
	//取标题
	re1 := regexp.MustCompile(`<title>(?s:(.*?))</title>`)
	if re1 == nil {
		err = fmt.Errorf("%s", "regexp.MustCompile err")
		return
	}
	tmpTitle := re1.FindAllStringSubmatch(result, 1) //参数为1,表示只要第一个
	for _, data := range tmpTitle {
		title = data[1]
		title = strings.Replace(title, "\t", "", -1)
		break
	}
	//取内容
	re2 := regexp.MustCompile(`<div class="con-txt">(?s:(.*?))<`)
	if re2 == nil {
		err = fmt.Errorf("%s", "regexp.MustCompile err")
		return
	}
	tmpContent := re2.FindAllStringSubmatch(result, 1) //参数为1,表示只要第一个
	for _, data := range tmpContent {
		content = data[1]
		content = strings.Replace(content, "\t", "", -1)
		content = strings.Replace(content, "\n", "", -1)
		content = strings.Replace(content, "\r", "", -1)
		break
	}
	return
}
func StoreJoyFile(i int, fileTitle, fileContent []string) {
	//新建文件
	f, err := os.Create(strconv.Itoa(i) + ".txt")
	if err != nil {
		fmt.Println("os.Create err = ", err)
		return
	}
	defer f.Close()

	//写内容
	n := len(fileTitle)
	for i := 0; i < n; i++ {
		f.WriteString(fileTitle[i] + "\n")
		f.WriteString(fileContent[i])
		f.WriteString("\n----------------------------------------------------\n")
	}
}
func SpiderPape(i int) {
	url := "https://m.pengfue.com/xiaohua_" + strconv.Itoa(i) + ".html"
	fmt.Printf("正在爬取第%d个网页 : %s \n", i, url)
	//开始爬取页面内容
	result, err := HttpGet(url)
	if err != nil {
		fmt.Println("httpget err = ", err)
		return
	}
	//fmt.Println("r = ", result)
	//取段子url
	//解释表达式
	//取关键信息
	//
	re := regexp.MustCompile(`<h1 class="f18"><a href="(?s:(.*?))"`)
	if re == nil {
		fmt.Println("regex.MustCompile err")
		return
	}
	joyUrls := re.FindAllStringSubmatch(result, -1)
	//fmt.Println("joyUrls = ", joyUrls)
	fileTitle := make([]string, 0)
	fileContent := make([]string, 0)
	// 取网址
	//var txtContent string
	for _, data := range joyUrls {
		//fmt.Println("url ", data[1])
		//爬取每一个段子

		title, content, err := SpiderOneJoy(data[1])
		if err != nil {
			fmt.Println("SpiderOneJoy err = ", err)
			continue
		}
		//fmt.Printf("title = #%v#\n", title)
		//fmt.Printf("content = #%v#\n", content)
		fileTitle = append(fileTitle, title)
		fileContent = append(fileContent, content)
	}
	//fmt.Println(fileTitle)
	//fmt.Println(fileContent)
	StoreJoyFile(i, fileTitle, fileContent)
}
func DoWork(start, end int) {
	fmt.Printf("准备爬取第%d到%d的网页", start, end)
	for i := start; i <= end; i++ {
		SpiderPape(i)
	}
}
func main() {
	var start, end int
	fmt.Printf("输入起始页(>= 1):")
	fmt.Scan(&start)
	fmt.Printf("输入终止页(>= 起始页):")
	fmt.Scan(&end)
	DoWork(start, end) //工作函数
}

 

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值