爬虫学习,爬取网站数据一

#_*_ codeing: utf-8 _*_
import json
import os

import requests
from requests.exceptions import RequestException
import re
from bs4 import BeautifulSoup
import random
import urllib
import urllib.request

from multiprocessing import Pool


def get_context_from_url(url):
    my_headers = [
        "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14",
        "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)"
    ]
    randdom_header = random.choice(my_headers)
    req = urllib.request.Request(url)
    req.add_header("User-Agent", randdom_header)
    req.add_header("GET", url)
    response = urllib.request.urlopen(req)
    context = response.read().decode('utf-8')
    return context


def parse_html_by_re(content):
    pattern = re.compile('<dd>.*?board-index.*?>([\d+])</i>.*?data-src="(.*?)".*?name"><a.*?>(.*?)</a>.*?star">(.*?)</p>'
                         '.*?releasetime">(.*?)</p>.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>', re.S)
    items = re.findall(pattern, content)
    #print(items)
    for item in items:
        yield{
            'index': item[0],
            'image': item[1],
            'title': item[2],
            'actor': item[3].strip()[3:],
            'time': item[4].strip()[5:],
            'score': item[5] + item[6]
        }




def write_html_context_to_file(content):
      with open('d:\python\movie.txt', 'a', encoding='utf-8') as fp:
        fp.write(json.dumps(content, ensure_ascii=False) + '\n')
        fp.close()

def main(offset):
    url = 'https://maoyan.com/board/4?offset=' + str(offset)
    #for i in range()
    content = get_context_from_url(url)
    for item in parse_html_by_re(content):
      write_html_context_to_file(item)



if __name__ == '__main__':
    if os.access('d:\python\movie.txt', os.F_OK):
        os.remove('d:\python\movie.txt')

    #for i in range(10):
        #main(i*10)

    pool = Pool()
    pool.map(main, [i*10 for i in range(10)])
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值