第一个爬虫

第一个爬虫

# -*- coding = utf-8 -*-
# @Time : 2021/1/20/0020 22:20
# @Author : WXH
# @File : dangdang.py
# @Software : PyCharm
'''
import json
import requests
import re
def main(page):
    url = 'http://bang.dangdang.com/books/fivestars/01.00.00.00.00.00-recent30-0-0-1-' + str(page)
    html = request_dandan(url)
    items = parse_result(html)
    for item in items:
        write_item_to_file(item)

def request_dandan(url):
    try:
        response = requests.get(url)
        if response.status_code == 200:
            return response.text
    except requests.RequestException:
        return None

def parse_result(html):
    pattern = re.compile('<li>.*?list_num.*?(\d+).</div>.*?<img src="(.*?)".*?class="name".*?title="(.*?)">.*?class="star">.*?class="tuijian">(.*?)</span>.*?class="publisher_info">.*?target="_blank">(.*?)</a>.*?class="biaosheng">.*?<span>(.*?)</span></div>.*?<p><span\sclass="price_n">&yen;(.*?)</span>.*?</li>',re.S)
    items = re.findall(pattern, html)
    for item in items:
        yield {
            'range': item[0],
            'iamge': item[1],
            'title': item[2],
            'recommend': item[3],
            'author': item[4],
            'times': item[5],
            'price': item[6]
        }

def write_item_to_file(item):
    print('正在写入-->' + str(item))
    with open('book.txt', 'a', encoding='utf-8') as f:
        f.write(json.dumps(item, ensure_ascii=False) + '\n')
        f.close()

if __name__ == "__main__":
   for i in range(1,26):
       main(i)
'''
'''
import json
import requests
import re
from bs4 import BeautifulSoup
def main(page):
    url = 'https://s.weibo.com/top/summary?Refer=top_hot&topnav=1&wvr=6'
    html = request_dandan(url)
    soap = BeautifulSoup(html, 'lxml')
    items = soap.find_all('td', class_='td-02')
    items = parse_result(html)
    for item in items:
        write_item_to_file(item)

def request_dandan(url):
    try:
        response = requests.get(url)
        if response.status_code == 200:
            return response.text
    except requests.RequestException:
        return None

def parse_result(html):
    pattern = re.compile('<td class="td-01 ranktop">(\d+)</td>.*?<a href=.*?target="_blank">(.*?)</a>',re.S)

    items = re.findall(pattern, html)
    for item in items:
        yield {
            'rank': item[0],
            'text': item[1],
        }

def write_item_to_file(item):
    print('正在写入-->' + item.a.text)
    with open('hot.txt', 'a', encoding='utf-8') as f:
        #f.write(json.dumps(item, ensure_ascii=False) + '\n')
        f.write(item.a.text + '\n')
        f.close()

if __name__ == "__main__":
    main(0)
'''


import requests
import re
from bs4 import BeautifulSoup
import xlwt
def main(page):
    url = 'https://movie.douban.com/top250?start=' + str(page*25) + '&filter='
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
    }
    html = request(url)
    soup = BeautifulSoup(html, 'lxml')
    items = soup.find(class_='grid_view').find_all('li')
    n = 1+page*25
    for item in items:

        name = item.find(class_='title').text
        img = item.find('a').find('img').get('src')
        index = item.find('em').text
        cast = item.find('p').text
        score = item.find('span', class_='rating_num').text
        #intr = item.find(class_='inq').text
        n = n+1
        print('writing-' + str(n))
        sheet.write(n, 0, index)
        sheet.write(n, 1, name)
        sheet.write(n, 2, score)
        sheet.write(n, 3, cast)

        #print(index + '|' + name + '|' + cast + '|' + score + '|' + intr)

        #write_item_to_file(item)


def request(url):
    headers = {
        # 假装自己是浏览器
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
    }
    try:
        response = requests.get(url, headers=headers)
        return response.text
    except requests.RequestException:
        return None


def write_item_to_file(item):
    print('正在写入-->' + item.find('em').text)
    book = xlwt.Workbook(encoding='utf-8', style_compression=0)



if __name__ == "__main__":
    book = xlwt.Workbook(encoding='utf-8', style_compression=0)
    sheet = book.add_sheet('豆瓣电影Top250', cell_overwrite_ok=True)
    sheet.write(0, 0, '排名')
    sheet.write(0, 1, '名称')
    sheet.write(0, 2, '评分')
    sheet.write(0, 3, '作者')
    for page in range(0, 25):
        main(page)
    book.save(u'豆瓣最受欢迎的250部电影.xls')

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值