python

from selenium import webdriver
import re
from bs4 import BeautifulSoup
import time
def url_open(driver,content):#爬取源代码
elem=driver.find_element_by_css_selector(’#q’)
elem.send_keys(content)
submit=driver.find_element_by_css_selector(’#J_TSearchForm > div.search-button > button’)
submit.click()
html=driver.page_source
return html
def url_xi(html):#变为B
soup = BeautifulSoup(html, ‘lxml’)
return soup
def url_page(soup):#页数
page_number=soup.find(‘div’,class_=‘total’)
number=page_number.get_text()
page=re.findall(r’\d+’,number)
for i in page:
for m in range(2,int(i)+1):
time.sleep(2)
input=driver.find_element_by_css_selector(’#mainsrp-pager > div > div > div > div.form > input’)
input.clear()
input.send_keys(m)
sure=driver.find_element_by_css_selector(’#mainsrp-pager > div > div > div > div.form > span.btn.J_Submit’)
sure.click()
html1=driver.page_source
soup1=url_xi(html1)
url_jie(soup1)

def url_jie(soup):#对源代码进行解析
res = re.compile(r’<img alt="(.?)" .?/>’, re.S)#爬取名称时所用的正则表达式
res1=re.compile(r’

(. ?)
’, re.S)#爬取销售情况
res2=re.compile(r’ (.?)’,re.S)
address=soup.find_all(‘div’,class_=‘location’)
title=soup.find_all(‘img’,class_=‘J_ItemPic img’)#名称
price=soup.find_all(‘div’,class_=‘price g_price g_price-highlight’)#价格
shop=soup.find_all(‘a’,class_=‘shopname J_MouseEneterLeave J_ShopInfo’)#店铺
list=[]
add1=[]
store=[]
for i in price:
# print(i.get_text())
list.append(i.get_text().replace(’\n’,’’))
for address1 in address:
add1.append(address1.get_text().replace(’\n’,’’))
for m in shop:
m = m.get_text()
store.append(m.replace(’\n’,’’))
sale = soup.find_all(‘div’, class_=‘deal-cnt’)#销售人数
place=soup.find_all(‘div’,class_=‘location’)
for title1,sale1,price1,list1,address1,store1 in zip(title,sale,price,list,add1,store):
data={
‘title1’: re.findall(res, str(title1)),
‘sale1’:re.findall(res1,str(sale1)),
# ‘price1’:price.get_text()
‘list1’:list1,
‘address1’:address1,
‘store1’:store1
}
print(data)

if name==‘main’:
url=“https://www.taobao.com/
content = input(“请输入查询内容:”)
driver=webdriver.Firefox()
driver.get(url)
html=url_open(driver,content)
soup=url_xi(html)
url_jie(soup)
url_page(soup)
from selenium import webdriver
import re
from bs4 import BeautifulSoup
import time
def url_open(driver,content):#爬取源代码
elem=driver.find_element_by_css_selector(’#q’)
elem.send_keys(content)
submit=driver.find_element_by_css_selector(’#J_TSearchForm > div.search-button > button’)
submit.click()
html=driver.page_source
return html
def url_xi(html):#变为B
soup = BeautifulSoup(html, ‘lxml’)
return soup
def url_page(soup):#页数
page_number=soup.find(‘div’,class_=‘total’)
number=page_number.get_text()
page=re.findall(r’\d+’,number)
for i in page:
for m in range(2,int(i)+1):
time.sleep(2)
input=driver.find_element_by_css_selector(’#mainsrp-pager > div > div > div > div.form > input’)
input.clear()
input.send_keys(m)
sure=driver.find_element_by_css_selector(’#mainsrp-pager > div > div > div > div.form > span.btn.J_Submit’)
sure.click()
html1=driver.page_source
soup1=url_xi(html1)
url_jie(soup1)

def url_jie(soup):#对源代码进行解析
res = re.compile(r’<img alt="(.?)" .?/>’, re.S)#爬取名称时所用的正则表达式
res1=re.compile(r’

(. ?)
’, re.S)#爬取销售情况
res2=re.compile(r’ (.?)’,re.S)
address=soup.find_all(‘div’,class_=‘location’)
title=soup.find_all(‘img’,class_=‘J_ItemPic img’)#名称
price=soup.find_all(‘div’,class_=‘price g_price g_price-highlight’)#价格
shop=soup.find_all(‘a’,class_=‘shopname J_MouseEneterLeave J_ShopInfo’)#店铺
list=[]
add1=[]
store=[]
for i in price:
# print(i.get_text())
list.append(i.get_text().replace(’\n’,’’))
for address1 in address:
add1.append(address1.get_text().replace(’\n’,’’))
for m in shop:
m = m.get_text()
store.append(m.replace(’\n’,’’))
sale = soup.find_all(‘div’, class_=‘deal-cnt’)#销售人数
place=soup.find_all(‘div’,class_=‘location’)
for title1,sale1,price1,list1,address1,store1 in zip(title,sale,price,list,add1,store):
data={
‘title1’: re.findall(res, str(title1)),
‘sale1’:re.findall(res1,str(sale1)),
# ‘price1’:price.get_text()
‘list1’:list1,
‘address1’:address1,
‘store1’:store1
}
print(data)

if name==‘main’:
url=“https://www.taobao.com/
content = input(“请输入查询内容:”)
driver=webdriver.Firefox()
driver.get(url)
html=url_open(driver,content)
soup=url_xi(html)
url_jie(soup)
url_page(soup)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值