20221119day12作业:顶点小说全栈抓取、京东3页数据抓取、震坤行3页数据抓取

一、顶点小说全站抓取(代码运行下去理论上能抓就行)

先获取所有分类链接

import requests
from lxml import etree
import pymysql
headers = {
'Host': 'www.23us.co',
'Referer': 'https://www.baidu.com/link?url=Kf8hqBe68nV-DipI3bExcurGlRpGxvq2j0kBbVOrk37&wd=&eqid=91dc6e33001b84210000000263787966',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'

}

db = pymysql.connect(user='root',password='123456',db='顶点小说')#建立链接
cursor = db.cursor()#游标


def get_max_page(urls):
    source = requests.get(urls).content.decode('gbk')
    max_page = etree.HTML(source).xpath('//a[@class="last"]/text()')
    return int(max_page[0])

def save_chapter_list(**kwargs):
    book_name = kwargs.get('book_name','未取到')
    book_id = kwargs.get('book_id','未取到')
    status = kwargs.get('status',0)
    chapter_list_url = kwargs.get('chapter_list_url','未取到')
    #写入sql语句
    sql = 'insert into book(book_name,book_id,status,chapter_list_url) values ("{}","{}","{}","{}")'.format(book_name,book_id,status,chapter_list_url)
    cursor.execute(sql)
    db.commit()

def save_book(**kwargs):
    chapter_name = kwargs.get('chapter_name','未取到')
    bid = kwargs.get('bid','未取到')
    status = kwargs.get('status',0)
    chapter_contents = kwargs.get('chapter_contents','未取到')
    chapter_url = kwargs.get('chapter_url','未取到')
    sql = 'insert into chapters(chapter_name,bid,status,chapter_contents,chapter_url) values ("{}","{}","{}","{}","{}")'.format(chapter_name,bid,status,chapter_contents,chapter_url)
    # print(sql)
    cursor.execute(sql)
    db.commit()
    pass

# 第3步:获取章节列表页信息
def get_book_chapter_list(urls):
    source = requests.get(urls).content.decode('gbk')
    hrefs = etree.HTML(source).xpath('//tr/td[1]/a')  #获取书的链接

    # 把get_book_id('http://www.23us.co/class/3_1.html')变成http://www.23us.co/html/3/3553
    for i in hrefs:
        hrefs = i.xpath('@href')[0]  #公共部分
        book_name = i.xpath('text()')[0]  #书名
        book_id = hrefs.split('/')[-1]  #根据/拆分,取最后一个
        chapter_list_url = 'http://23us.co/html/{}/{}/'.format(book_id[:2],book_id) #章节列表页链接
        #存值和数据库里面的字段对上
        save_chapter_list(book_name=book_name,book_id=book_id,status=0,chapter_list_url=chapter_list_url)
        yield chapter_list_url


def get_contents(urls):
    #正则匹出书的id,'https://23.us.co/html/75/75084/29219318.html'
    import re
    source = requests.get(urls).content.decode('gbk')

    demo = re.compile('http://23us\.co/html/\d+/(\d+)/\d+\.html')
    book_id = demo.findall(source)[0]  #书的id
    title = etree.HTML(source).xpath('//h1/text()')[0]  #题目
    contents = ''.join(etree.HTML(source).xpath('//dd[@id="contents"]/text()'))  #内容
    return save_book(bid=book_id,chapter_name = title,chapter_contents=contents,chapter_url = urls)


def get_contents_title(urls):
    source = requests.get(urls).content.decode('gbk')
    hrefs = etree.HTML(source).xpath('//tr/td/a/@href')
    if len(hrefs)>0:  #先做个判断,完整的hrefs29219318.html
        for href in hrefs:
            chapter_url = urls+href  #章节的链接
            get_contents(chapter_url)


for type in range(1,11):  #遍历
    first_page_url = 'http://23us.co/class/{}_1.html'.format(type)  #和链接拼接,获取所有分类下的第1页的链接
    max_page = get_max_page(first_page_url)  #最大页数
    for page in range(1,max_page+1):  #遍历起始页~最大页
        #'http://23us.co/class/{}_1.html'利用:split('_')[0],根据_分割取前半部分,_页数再加.html,可得到每一页的链接
        every_page_url = first_page_url.split('_')[0]+'_'+str(page)+'.html'  #每一页的链接
        chapter_list_url = get_book_chapter_list(every_page_url)
        for urls in chapter_list_url:
            get_contents_title(urls)

二、京东商城三页数据抓取

# 首先寻找s_new.php?keyword=%E8%8B%B9%E6%9E%9C&qrst=1&sugges
# 首页链接
# https://search.jd.com/Search?keyword=%E8%8B%B9%E6%9E%9C&enc=utf-8&suggest=1.his.0.0&wq=&pvid=652f33ee3c6940a8a8a1fbb376df10f3
#
# https://search.jd.com/s_new.php?keyword=%E8%8B%B9%E6%9E%9C&qrst=1&suggest=1.his.0.0&wq=%E8%8B%B9%E6%9E%9C&stock=1&pvid=652f33ee3c6940a8a8a1fbb376df10f3&page=1click=0
# 第1页下半部本
# https://search.jd.com/s_new.php?keyword=%E8%8B%B9%E6%9E%9C&qrst=1&suggest=1.his.0.0&wq=%E8%8B%B9%E6%9E%9C&stock=1&pvid=652f33ee3c6940a8a8a1fbb376df10f3&page=2&s=26&scrolling=y&log_id=1669279334193.4075&tpl=1_M&isList=0&show_items=100026667858,100026667872,10060060919711,100026667910,100027743464,100026667880,10053064404367,100038005189,100038004339,100038004353,100014352501,100015852622,10059999976386,100008348508,100027743410,100038004397,100034710036,100008348542,10060552162473,100008348530,100038089871,100038004367,100038089809,100026809200,10051966429575,100038089887,100038325457,10058881289310,10037139655204,100038089855
# 第2页上半部分和下半部本
# https://search.jd.com/s_new.php?keyword=%E8%8B%B9%E6%9E%9C&qrst=1&suggest=1.his.0.0&wq=%E8%8B%B9%E6%9E%9C&stock=1&pvid=652f33ee3c6940a8a8a1fbb376df10f3&page=3&s=56&click=0
# https://search.jd.com/s_new.php?keyword=%E8%8B%B9%E6%9E%9C&qrst=1&suggest=1.his.0.0&wq=%E8%8B%B9%E6%9E%9C&stock=1&pvid=652f33ee3c6940a8a8a1fbb376df10f3&page=4&s=86&scrolling=y&log_id=1669279373020.5179&tpl=1_M&isList=0&show_items=10049638316236,100030101538,100044025921,10058908310957,100013068007,10047839401820,10050485280145,55503948289,100014352527,10065644760300,10049727109533,10033629792612,10044392193508,10020180486779,10057405983433,72247401303,3877149,10062570130862,10045630151903,100030101516,10055840647627,10056531996648,10059507231925,10060422239266,66753551547,10057799807780,100001660806,10060339498310,10057405983435,10037452592874
# import requests
# from lxml import etree
# headers = {
# 'referer': 'https://search.jd.com/',
# 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.56'
# }
# # source1 = requests.get('https://search.jd.com/s_new.php?keyword=%E8%8B%B9%E6%9E%9C&qrst=1&suggest=1.his.0.0&wq=%E8%8B%B9%E6%9E%9C&stock=1&pvid=652f33ee3c6940a8a8a1fbb376df10f3&page=1click=0',headers=headers).text
# source2 = requests.get('https://search.jd.com/s_new.php?keyword=%E8%8B%B9%E6%9E%9C&qrst=1&suggest=1.his.0.0&wq=%E8%8B%B9%E6%9E%9C&stock=1&pvid=652f33ee3c6940a8a8a1fbb376df10f3&page=2&s=26&scrolling=y&log_id=1669279334193.4075&tpl=1_M&isList=0&show_items=100026667858,100026667872,10060060919711,100026667910,100027743464,100026667880,10053064404367,100038005189,100038004339,100038004353,100014352501,100015852622,10059999976386,100008348508,100027743410,100038004397,100034710036,100008348542,10060552162473,100008348530,100038089871,100038004367,100038089809,100026809200,10051966429575,100038089887,100038325457,10058881289310,10037139655204,100038089855',headers=headers).text
# # print(source1,source2)
# divs = etree.HTML(source2).xpath('//div[@class="gl-i-wrap"]')
# # content = etree.HTML(source).xpath('//div[@class="gl-i-wrap"]/div[3]/a/em//text()')
# print(len(divs))
# for div in divs:
#     price = div.xpath("div[2]/strong/i/text()")[0]
#     content = ''.join(div.xpath("div[3]/a/em//text()"))
#     print(price,content)



import requests
from lxml import etree
import re
headers = {
'referer': 'https://search.jd.com/',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.56'
}

# 写循环翻页,取出3页数据
p = 1 #计数
for page in range(1,7,2):
    source1 = requests.get('https://search.jd.com/s_new.php?keyword=%E8%8B%B9%E6%9E%9C&qrst=1&suggest=1.his.0.0&wq=%E8%8B%B9%E6%9E%9C&stock=1&pvid=652f33ee3c6940a8a8a1fbb376df10f3&page={}click=0'.format(page),headers=headers).text
    demo = re.compile('wids:\'(.*?)\'')
    num = demo.findall(source1)[0]
    print(num)
    source2 = requests.get('https://search.jd.com/s_new.php?keyword=%E8%8B%B9%E6%9E%9C&qrst=1&suggest=1.his.0.0&wq=%E8%8B%B9%E6%9E%9C&stock=1&pvid=652f33ee3c6940a8a8a1fbb376df10f3&page={}&s=26&scrolling=y&log_id=1669279334193.4075&tpl=1_M&isList=0&show_items={}'.format(page+1,num),headers=headers).text
    # print(source1,source2)
    divs1 = etree.HTML(source1).xpath('//div[@class="gl-i-wrap"]')
    divs2 = etree.HTML(source2).xpath('//div[@class="gl-i-wrap"]')
    # content = etree.HTML(source).xpath('//div[@class="gl-i-wrap"]/div[3]/a/em//text()')
    divs = divs1+divs2
    print(len(divs))
    for div in divs:
        price = div.xpath("div[2]/strong/i/text()")[0]
        content = ''.join(div.xpath("div[3]/a/em//text()"))
        print(price,content)

    print('当前第{}页'.format(p))
    p+=1

三、震坤行三页数据抓取

# 找到了带有口罩信息的pc,这个十post请求,提交比较重要的信息
# https://www.zkh.com/servezkhApi/search/product/pc?traceId=339496351669356422414
# {"catalogueId":"","cityCode":510100,"clp":true,"suggestModelDTO":null,"extraFilter":{"showIndustryFeatured":false,"inStock":false},"from":0,"fz":false,"keyword":"口罩","productFilter":{"brandIds":[],"properties":{}},"searchType":{"notNeedCorrect":false},"size":20,"suggestPriceOnly":false,"searchScene":1,"showType":"pic","brandId":"","rangeFilter":[{"min":"","max":"","rangeType":"price"}],"sort":0,"section":2,"spuSize":6,"needClassifiedBrand":true,"searchSceneNo":""}
# https://www.zkh.com/servezkhApi/search/product/pc?traceId=672217471669356556959
# {"catalogueId":"","cityCode":510100,"clp":true,"suggestModelDTO":null,"extraFilter":{"showIndustryFeatured":false,"inStock":false},"from":0,"fz":false,"keyword":"口罩","productFilter":{"brandIds":[],"properties":{}},"searchType":{"notNeedCorrect":false},"size":20,"suggestPriceOnly":false,"searchScene":1,"showType":"pic","brandId":"","rangeFilter":[{"min":"","max":"","rangeType":"price"}],"sort":0,"section":3,"spuSize":6,"needClassifiedBrand":true,"searchSceneNo":""}


headers={
'origin': 'https://www.zkh.com',
'referer': 'https://www.zkh.com/search.html?keywords=%E5%8F%A3%E7%BD%A9&hasLinkWord=1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.56',
'cookie': 'AGL_USER_ID=f5353657-0d49-42b9-a71b-4befb2d69bca; _bl_uid=h8lqhag8p5R0aswX0nX4tqgxjjqv; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2218493e308593d1-0fdf9f5bef6ccb-7d5d5475-921600-18493e3085a716%22%2C%22first_id%22%3A%22%22%2C%22props%22%3A%7B%22%24latest_utm_medium%22%3A%22sem1%22%2C%22%24latest_utm_campaign%22%3A%22C-%E5%93%81%E7%89%8C%22%2C%22%24latest_utm_content%22%3A%22%E6%A0%B8%E5%BF%83%22%7D%2C%22identities%22%3A%22eyIkaWRlbnRpdHlfY29va2llX2lkIjoiMTg0OTNlMzA4NTkzZDEtMGZkZjlmNWJlZjZjY2ItN2Q1ZDU0NzUtOTIxNjAwLTE4NDkzZTMwODVhNzE2In0%3D%22%2C%22history_login_id%22%3A%7B%22name%22%3A%22%22%2C%22value%22%3A%22%22%7D%2C%22%24device_id%22%3A%2218493e308593d1-0fdf9f5bef6ccb-7d5d5475-921600-18493e3085a716%22%7D; anonymous_id=18493e308593d1-0fdf9f5bef6ccb-7d5d5475-921600-18493e3085a716; webSource=https%3A%2F%2Fwww.zkh.com%2F%3Futm_source%3Dbaidu%26utm_medium%3Dsem1%26utm_ter%3D%25E9%259C%2587%25E5%259D%25A4%25E8%25A1%258Cmro%26utm_content%3D%25E6%25A0%25B8%25E5%25BF%2583%26utm_campaign%3DC-%25E5%2593%2581%25E7%2589%258C%26sdclkid%3DAL2D152ibrDiAOqpALe_%26bd_vid%3D11204147592075616689; sensorsdata2015session=%7B%7D; citycode=%7B%22provinceName%22%3A%22%E5%9B%9B%E5%B7%9D%E7%9C%81%22%2C%22cityName%22%3A%22%E6%88%90%E9%83%BD%E5%B8%82%22%2C%22provinceCode%22%3A510000%2C%22cityCode%22%3A510100%7D; Hm_lvt_c9156633fc15595028b4d81a3571a23f=1668928445,1669356182; utmStore=%7B%22flow_type%22%3A%22%E5%85%8D%E8%B4%B9%22%2C%22%24utm_source%22%3A%22baidu%22%2C%22%24utm_medium%22%3A%22sem1%22%2C%22%24utm_content%22%3A%22%E6%A0%B8%E5%BF%83%22%2C%22%24utm_campaign%22%3A%22C-%E5%93%81%E7%89%8C%22%7D; p_pub_key=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC36XGQaO8cG2ifwZNixxe7HVyqlzELwo2DC+LwgvE0Q8rjwLXxSucPAJrYnA3C3c8/moKiVHEs9U4rciZv4jW2FyG6ivXRnHouHpSVjl83LfYbL2QwXyDurSfGSelPDgC5QCs11TgF26N3FEa4f/kvypcEfNIgkK0MHBBK7Gp4cwIDAQAB; p_pub_gr=1669356018056; zaf_ukey=d368446d79fd4ebdb66ed5c753f50530; Hm_lpvt_c9156633fc15595028b4d81a3571a23f=1669357141; JSESSIONID=E307A9B5A975766DC4EA2527E66B7FD3'
}
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

ゆきな

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值