崔庆才 Python3爬虫入门到精通课程视频练习
利用多进程和re 爬取猫眼电影TOP100的电影数据
爬虫与数据分析:Github
代码如下:
# -*- coding:utf-8 -*-
import requests
import re
import json
from multiprocessing import Pool
def get_one_page(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36'
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.text
return None
import pandas as pd
def parse_one_page(html):
pattern = re.compile(
'<dd>.*?board-index.*?>(.*?)</i>.*?data-src="(.*?)".*?name.*?a.*?>(.*?)</a>.*?star.*?>(.*?)</p>.*?releasetime.*?>(.*?)</p>.*?integer.*?>(.*?)</i>.*?fraction.*?>(.*?)</i>.*?</dd>',
re.S)
items = re.findall(pattern, html)
#print(items)
content = []
for item in items:
dataset = {}
dataset['index']=item[0]
print(dataset['index'])
dataset['image']=item[1]
dataset['title']=item[2].strip()
dataset['actor']=item[3].strip()[3:] if len(item[3]) > 3 else ''
dataset['time'] =item[4].strip()[5:] if len(item[4]) > 5 else ''
dataset['score']=item[5].strip() + item[6].strip()
content.append(dataset)
return content
def write_to_file(content):
df = pd.DataFrame(content)
#print(df.index)
df.to_csv('maoyan.csv',index=False,mode='a+')
def main(offset):
url = 'http://maoyan.com/board/4?offset=' + str(offset)
html = get_one_page(url)
data=parse_one_page(html)
write_to_file(data)
import time
if __name__ == '__main__':
start=time.time()
pool=Pool()
pool.map(main,[i*10 for i in range(10)])
# for i in range(10):
# main(offset=i * 10)
print('花费时间:',time.time()-start)
多线程
一般通过两种方式实现多线程,
第一种方式是把一个函数传入并创建实例,然后调用start方法执行;
第二种方式是直接从threading.Thread继承并创建线程类,然后重写_init_方法和run方法。
import time
import random
import threading
def t_run(urls):
"""
线程执行代码
"""
# threading.current_thread()返回当前的Thread对象,对应于调用者控制的线程。
# 如果调用者控制的线程不是通过threading模块创建的,则返回一个只有有限功能的虚假线程对象。
print('Current %s is running...' % threading.current_thread().name)
for url in urls:
print(' threading %s -----> %s ' % (threading.current_thread().name, url))
time.sleep(random.random())
print('%s ended.' % threading.current_thread().name)
if __name__ == '__main__':
# 创建两个线程实例
t1 = threading.Thread(target=t_run, name='Thread_1', args=(['url1', 'url2'],))
t2 = threading.Thread(target=t_run, name='Thread_2', args=(['url3', 'url4'],))
# 启动线程
t1.start()
t2.start()
# 等待线程结束
t1.join()
t2.join()
print('%s ended.' % threading.current_thread().name)
第二种方式用threading.Thread继承创建线程类
# -*- coding: utf-8 -*-
import time
import random
import threading
class MyThread(threading.Thread):
"""
定义线程类
"""
def __init__(self, name, urls):
"""
初始化,重写线程
"""
threading.Thread.__init__(self, name=name)
self.urls = urls
def run(self):
"""
执行函数
"""
# 打印当前线程名
print('Current %s is running...' % threading.current_thread().name)
for url in self.urls:
print('Thread %s ------> %s' % (threading.current_thread().name, url))
time.sleep(random.random())
print('%s ended.' % threading.current_thread().name)
if __name__ == '__main__':
print('%s is running...' % threading.current_thread().name)
t1 = MyThread(name='Thread_1', urls=['url1', 'url2'])
t2 = MyThread(name='Thread_2', urls=['url3', 'url4'])
t1.start()
t2.start()
t1.join()
t2.join()
print('%s ended.' % threading.current_thread().name)
from multiprocessing.dummy import Pool as ThreadPool
if __name__ == '__main__':
pool = ThreadPool() #默认为5
pool.map(main,[i*10 for i in range(10)])##多线程工作
pool.close()
pool.join()
from queue import Queue
from threading import Thread
queue_url = Queue(maxsize=10) # 设置队列最大空间为10
result_queue = Queue()
for i in range(10):
t = Thread(target=mian, args=(i,))
t.start()
print('main thread stop')