#队列基础
from queue import Queue
q = Queue()#队列
#FIFO
for i in range(100):
q.put(i)#入队
while True:
if q.empty():
break
print(q.get())#出队
#版本1.0
import time
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import os
def save_to_html(html_str,filename):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.mkdir(dirname)
with open(filename,'w',encoding='utf-8') as fp:
fp.write(html_str)
print('下载完成!',filename)
def get_content_by_selenium(url):
driver = webdriver.PhantomJS()
wait = WebDriverWait(driver,20)
#请求
driver.get(url)
#等待
wait.until(EC.presence_of_all_elements_located((By.XPATH,'//div[@class="recruit-list"]')))
#获取页面内容
html_str = driver.page_source
return html_str
def main():
base_url = 'https://careers.tencent.com/search.html?index=%s'
for i in range(1,20):
html_str = get_content_by_selenium(base_url %i)
save_to_html(html_str,'./tencent/{}.html'.format(i))
if __name__ == '__main__':
start = time.time()
main()
print(time.time()-start)#64.02966213226318
#版本1.1
import time
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import os
import threading
def save_to_html(html_str,filename):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.mkdir(dirname)
with open(filename,'w',encoding='utf-8') as fp:
fp.write(html_str)
print('下载完成!',filename)
def get_content_by_selenium(url):
driver = webdriver.PhantomJS()
wait = WebDriverWait(driver,20)
#请求
driver.get(url)
#等待
wait.until(EC.presence_of_all_elements_located((By.XPATH,'//div[@class="recruit-list"]')))
#获取页面内容
html_str = driver.page_source
return html_str
def download(i):
base_url = 'https://careers.tencent.com/search.html?index=%s'
html_str = get_content_by_selenium(base_url %i)
save_to_html(html_str,'./tencent/{}.html'.format(i))
if __name__ == '__main__':
start = time.time()
#用第一种创建线程来开启多线程
crawl_list = []
for i in range(1, 20):
# download(i)
#用这种方法开启线程,弊端很大
#任务数就是线程数,这样如果任务比较大,容易造成崩溃。
#任务无法按照顺序来执行
t = threading.Thread(target=download,args=(i,))
t.start()
crawl_list.append(t)
#阻塞主线程,我们join方法
for t in crawl_list:
t.join()#阻塞当前线程,指导t线程运行结束
print(time.time()-start)#21.61223602294922
#版本1.11
import time
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import os
import threading
class Tencent(threading.Thread):
def __init__(self,page):
super().__init__()
self.page = page
def save_to_html(self,html_str,filename):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.mkdir(dirname)
with open(filename,'w',encoding='utf-8') as fp:
fp.write(html_str)
print('下载完成!',filename)
def get_content_by_selenium(self,url):
driver = webdriver.PhantomJS()
wait = WebDriverWait(driver,20)
#请求
driver.get(url)
#等待
wait.until(EC.presence_of_all_elements_located((By.XPATH,'//div[@class="recruit-list"]')))
#获取页面内容
html_str = driver.page_source
return html_str
def download(self,i):
base_url = 'https://careers.tencent.com/search.html?index=%s'
html_str = self.get_content_by_selenium(base_url %i)
self.save_to_html(html_str,'./tencent/{}.html'.format(i))
def run(self):
self.download(self.page)
if __name__ == '__main__':
start = time.time()
#用第一种创建线程来开启多线程
crawl_list = []
for i in range(1, 20):
# download(i)
#用这种方法开启线程,弊端很大
#任务数就是线程数,这样如果任务比较大,容易造成崩溃。
#任务无法按照顺序来执行
# t = threading.Thread(target=download,args=(i,))
t = Tencent(i)
t.start()
crawl_list.append(t)
#阻塞主线程,我们join方法
for t in crawl_list:
t.join()#阻塞当前线程,指导t线程运行结束
print(time.time()-start)#21.61223602294922
#版本1.12
import time
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import os
import threading
from queue import Queue
class Tencent(threading.Thread):
def __init__(self,q,name):
super().__init__()
self.q = q
self.name = name
# self.page = page
def save_to_html(self,html_str,filename):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.mkdir(dirname)
with open(filename,'w',encoding='utf-8') as fp:
fp.write(html_str)
print('下载完成!',filename)
def get_content_by_selenium(self,url):
driver = webdriver.PhantomJS()
wait = WebDriverWait(driver,20)
#请求
driver.get(url)
#等待
wait.until(EC.presence_of_all_elements_located((By.XPATH,'//div[@class="recruit-list"]')))
#获取页面内容
html_str = driver.page_source
return html_str
def download(self,i):
base_url = 'https://careers.tencent.com/search.html?index=%s'
html_str = self.get_content_by_selenium(base_url %i)
self.save_to_html(html_str,'./tencent/{}.html'.format(i))
def run(self):
while True:
#任务队列没任务,就不用继续获取
if self.q.empty():
break
i = self.q.get()
print('============第{}页==================@{}'.format(i,self.name))
self.download(i)
if __name__ == '__main__':
start = time.time()
#用第一种创建线程来开启多线程
crawl_list = []
#1.线程数量可控
#指定一个list,这个list的长度就是线程数量
#2.任务按照顺序执行
#将任务放到队列中
#步骤:
#1创建一个任务队列
q = Queue()
#2初始化任务队列
for i in range(1, 20):
q.put(i)#将页码放入任务队列
# download(i)
#用这种方法开启线程,弊端很大
#任务数就是线程数,这样如果任务比较大,容易造成崩溃。
#任务无法按照顺序来执行
# t = threading.Thread(target=download,args=(i,))
# t = Tencent(i)
# t.start()
# crawl_list.append(t)
#3、指定一个list,这个list的长度就是线程数量
crawl_list = ['aa','bb','cc','dd']
#4、遍历这个list,开启线程
join_list = []
for crawl in crawl_list:
t = Tencent(q,crawl)
t.start()
join_list.append(t)
#阻塞主线程,我们join方法
for t in join_list:
t.join()#阻塞当前线程,指导t线程运行结束
print(time.time()-start)#24.451398611068726