这里写自定义目录标题
前言
这学期学了python爬虫,所以用python写了一个爬取京东商品关注信息的程序,由于这是我的第一篇CSDN,所以可能格式都不是很好看,希望见谅。
代码
下面就直接上代码好了
其中selenium对应的安装参考这里说的很详细了
selenium对应三大浏览器(谷歌、火狐、IE)驱动安装.
requests 和 BeatifulSoup需要用管理员权限进入cmd使用pip install XXXX来安装
其它直接在pycharm里面直接导入就可以了
//encoding = 'utf-8'
import requests
from lxml import etree
import csv
from bs4 import BeautifulSoup
from selenium import webdriver
import re
def getHTMLText(goods):
url = 'https://search.jd.com/Search?keyword='+ goods+ '&enc=utf-8'
head={
'authority': 'search.jd.com',
'method': 'GET',
'path': '/s_new.php?keyword=%E6%89%8B%E6%9C%BA&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%E6%89%8B%E6%9C%BA&cid2=653&cid3=655&page=4&s=84&scrolling=y&log_id=1529828108.22071&tpl=3_M&show_items=7651927,7367120,7056868,7419252,6001239,5934182,4554969,3893501,7421462,6577495,26480543553,7345757,4483120,6176077,6932795,7336429,5963066,5283387,25722468892,7425622,4768461',
'scheme': 'https',
'referer': 'https://search.jd.com/Search?keyword=%E6%89%8B%E6%9C%BA&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%E6%89%8B%E6%9C%BA&cid2=653&cid3=655&page=3&s=58&click=0',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
'Cookie':'qrsc=3; pinId=RAGa4xMoVrs; xtest=1210.cf6b6759; ipLocation=%u5E7F%u4E1C; _jrda=5; TrackID=1aUdbc9HHS2MdEzabuYEyED1iDJaLWwBAfGBfyIHJZCLWKfWaB_KHKIMX9Vj9_2wUakxuSLAO9AFtB2U0SsAD-mXIh5rIfuDiSHSNhZcsJvg; shshshfpa=17943c91-d534-104f-a035-6e1719740bb6-1525571955; shshshfpb=2f200f7c5265e4af999b95b20d90e6618559f7251020a80ea1aee61500; cn=0; 3AB9D23F7A4B3C9B=QFOFIDQSIC7TZDQ7U4RPNYNFQN7S26SFCQQGTC3YU5UZQJZUBNPEXMX7O3R7SIRBTTJ72AXC4S3IJ46ESBLTNHD37U; ipLoc-djd=19-1607-3638-3638.608841570; __jdu=930036140; user-key=31a7628c-a9b2-44b0-8147-f10a9e597d6f; areaId=19; __jdv=122270672|direct|-|none|-|1529893590075; PCSYCityID=25; mt_xid=V2_52007VwsQU1xaVVoaSClUA2YLEAdbWk5YSk9MQAA0BBZOVQ0ADwNLGlUAZwQXVQpaAlkvShhcDHsCFU5eXENaGkIZWg5nAyJQbVhiWR9BGlUNZwoWYl1dVF0%3D; __jdc=122270672; shshshfp=72ec41b59960ea9a26956307465948f6; rkv=V0700; __jda=122270672.930036140.-.1529979524.1529984840.85; __jdb=122270672.1.930036140|85.1529984840; shshshsID=f797fbad20f4e576e9c30d1c381ecbb1_1_1529984840145'
}
try:
r =requests.get(url,headers = head ,timeout = 30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return ""
def searchGoods(brand):
soup = BeautifulSoup(brand,'html.parser')
data1 = soup.find('ul',{
"class":"J_valueList v-fixed"})
datali =data1.find_all('li')
Goods_href=[]
Goods_name=[]
for li in datali:
Goods_name.append(li.a.attrs['title'])
Goods_href.append(li.a.attrs['href'])
count = 0
for j in range(len(Goods_href)):
print("<<<{}.".format(count+1),"品牌 :"+Goods_name[j])
count = count+1
judge = 1
while(judge):
Goods_num = input("请输入品牌对应序号:")
if Goods_num.isdigit():
judge = 0
else:
print("您的输入有误,请输入数字:")
continue
a = int(Goods_num)
if a>count:
print("输入序号过大,请重新输入:")
judge = 1
elif a<1:
print("输入序号过小,请重新输入:")
judge = 1
print("选择的品牌是: "+Goods_name[int(Goods_num)-1])
brand_url = "https://search.jd.com/"+Goods_href[int(Goods_num)-1]
return brand_url
def orderBy(brand_url):
judge = 1
while(judge):
kind = input("按照:综合 / 销量 / 评论数 / 新品 / 价格 进行排序(默认综合)")
strinfo