抓取贴吧

 
 
import urllib.request
from urllib import parse
from lxml import etree
import re

class Tieba():
def __init__(self):
pass
self. headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0"}

def sendRequest(self,url,begin,end):
for value in range(begin,end+1):
index = (value - 1) * 50
u = "&pn="+str(index)
fullurl = url + u
# print(fullurl)
request = urllib.request.Request(fullurl,headers=self.headers)
response = urllib.request.urlopen(request).read()
#print(response)
content = re.compile(r'href="/p(.*?)"')
line_list = content.findall(response.decode())
for line in line_list:
url = "https://tieba.baidu.com/p" + line
# print(url)
self.URLlHandle(url)
def URLlHandle(self,urll):
request = urllib.request.Request(urll, headers=self.headers)
html = urllib.request.urlopen(request).read()
content = etree.HTML(html)
line_list = content.xpath('//img[@class="BDE_Image"]/@src')
for line in line_list:
self.writeImage(line)
def writeImage(self,url):
request = urllib.request.Request(url,headers=self.headers)
image = urllib.request.urlopen(request).read()
filename = url[-10:]
print("下载%s中" % (filename))
with open(filename,'wb') as f:
f.write(image)
if __name__ == "__main__":
url = "https://tieba.baidu.com/f?"
keyword = input("请输入搜寻的关键字 >>")
beginPage = int(input("请输入起始页 >>"))
endPage = int(input("请输入结束页 >>"))
kw = parse.urlencode({"kw":keyword})
fullurl = url + kw
pattern = Tieba()
pattern.sendRequest(fullurl,beginPage,endPage)
 

转载于:https://www.cnblogs.com/angle90/p/9157065.html

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值