# -*- coding:utf-8 -*-
#获取要爬取的html
import re
from multiprocessing.pool import Pool
import requests
def get_html_page(url):
#添加请求头,模拟网络
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"}
try:
# 采用requests进行访问网络
response = requests.get(url,headers = headers)
if response.status_code == 200:
return response.text
return None
except Exception as e:
print(e)
return None
def parse_html_page(html):
#正则表达式表示好难啊,这个还是用别人的,自己还得继续加油学习
pattern = re.compile('<dd>.*?board-index.*?>(\d+)</i>.*?data-src="(.*?)".*?name"><a'
+'.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'
+'.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>', re.S)
items = re.findall(pattern, html)
for item in items:
yield {
'index': item[0],
'image': item[1],
'title': item[2],
'actor': item[3].strip()[3:],
'time': item[4].strip()[5:],
'score': item[5]+item[6]
}
def main(offset):
url = 'http://maoyan.com/board/4?offset=' + str(offset)
html = get_html_page(url)
for item in parse_html_page(html):
#输出结果
print(item)
#可以写入文件或者存入数据库,依据需求而定
# write_to_file(item)
#主方法入口
if __name__ == '__main__':
pool = Pool()
pool.map(main, [i*10 for i in range(10)])
pool.close()
#获取要爬取的html
import re
from multiprocessing.pool import Pool
import requests
def get_html_page(url):
#添加请求头,模拟网络
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"}
try:
# 采用requests进行访问网络
response = requests.get(url,headers = headers)
if response.status_code == 200:
return response.text
return None
except Exception as e:
print(e)
return None
def parse_html_page(html):
#正则表达式表示好难啊,这个还是用别人的,自己还得继续加油学习
pattern = re.compile('<dd>.*?board-index.*?>(\d+)</i>.*?data-src="(.*?)".*?name"><a'
+'.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'
+'.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>', re.S)
items = re.findall(pattern, html)
for item in items:
yield {
'index': item[0],
'image': item[1],
'title': item[2],
'actor': item[3].strip()[3:],
'time': item[4].strip()[5:],
'score': item[5]+item[6]
}
def main(offset):
url = 'http://maoyan.com/board/4?offset=' + str(offset)
html = get_html_page(url)
for item in parse_html_page(html):
#输出结果
print(item)
#可以写入文件或者存入数据库,依据需求而定
# write_to_file(item)
#主方法入口
if __name__ == '__main__':
pool = Pool()
pool.map(main, [i*10 for i in range(10)])
pool.close()
pool.join()
很高兴再次将自己学习的新东西写入博客,希望大家多多支持,一起学习