from bs4 import BeautifulSoup
from selenium import webdriver
import re,time
driver = webdriver.Chrome(executable_path=r'E:\rj\Chrome\chromedriver.exe')
with open('f://ty.txt','r') as f:
f=f.read()
z=f.strip().split('\n')
#driver.execute_script("window.scrollTo(0,70000)")
for i in z:
print('00000000')
try:
driver.get(i)
time.sleep(4)
except:
continue
a=driver.page_source
num=re.search(r'<span class="operator-count">\((\d+)\)</span>',str(a)).group(1)
num=int(num)//20+1
print(num)
for i in range(num):
driver.execute_script("window.scrollTo(0, document.body.scrollHeight)")
time.sleep(4)
print('11111')
time.sleep(3)
a=driver.page_source
movieurl=re.findall(r'https://movie.douban.com/subject/\d+?/',str(a))
movieurl=set(movieurl)
for i in movieurl:
with open('f://jq.txt','a') as f:
f.write(i+'\n')
#bs=BeautifulSoup(a,'lxml')
#ty=bs.find("div","types").find_all("a")
#tyurl=['https://movie.douban.com'+i.get('href') for i in ty]
#for i in tyurl:
# with open('f://ty.txt','a') as f:
# f.write(i+'\n')
##获取类型ty的链接写入文件
from selenium import webdriver
import re,time
driver = webdriver.Chrome(executable_path=r'E:\rj\Chrome\chromedriver.exe')
with open('f://ty.txt','r') as f:
f=f.read()
z=f.strip().split('\n')
#driver.execute_script("window.scrollTo(0,70000)")
for i in z:
print('00000000')
try:
driver.get(i)
time.sleep(4)
except:
continue
a=driver.page_source
num=re.search(r'<span class="operator-count">\((\d+)\)</span>',str(a)).group(1)
num=int(num)//20+1
print(num)
for i in range(num):
driver.execute_script("window.scrollTo(0, document.body.scrollHeight)")
time.sleep(4)
print('11111')
time.sleep(3)
a=driver.page_source
movieurl=re.findall(r'https://movie.douban.com/subject/\d+?/',str(a))
movieurl=set(movieurl)
for i in movieurl:
with open('f://jq.txt','a') as f:
f.write(i+'\n')
#bs=BeautifulSoup(a,'lxml')
#ty=bs.find("div","types").find_all("a")
#tyurl=['https://movie.douban.com'+i.get('href') for i in ty]
#for i in tyurl:
# with open('f://ty.txt','a') as f:
# f.write(i+'\n')
##获取类型ty的链接写入文件