第一个爬虫
'''
import json
import requests
import re
def main(page):
url = 'http://bang.dangdang.com/books/fivestars/01.00.00.00.00.00-recent30-0-0-1-' + str(page)
html = request_dandan(url)
items = parse_result(html)
for item in items:
write_item_to_file(item)
def request_dandan(url):
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
except requests.RequestException:
return None
def parse_result(html):
pattern = re.compile('<li>.*?list_num.*?(\d+).</div>.*?<img src="(.*?)".*?class="name".*?title="(.*?)">.*?class="star">.*?class="tuijian">(.*?)</span>.*?class="publisher_info">.*?target="_blank">(.*?)</a>.*?class="biaosheng">.*?<span>(.*?)</span></div>.*?<p><span\sclass="price_n">¥(.*?)</span>.*?</li>',re.S)
items = re.findall(pattern, html)
for item in items:
yield {
'range': item[0],
'iamge': item[1],
'title': item[2],
'recommend': item[3],
'author': item[4],
'times': item[5],
'price': item[6]
}
def write_item_to_file(item):
print('正在写入-->' + str(item))
with open('book.txt', 'a', encoding='utf-8') as f:
f.write(json.dumps(item, ensure_ascii=False) + '\n')
f.close()
if __name__ == "__main__":
for i in range(1,26):
main(i)
'''
'''
import json
import requests
import re
from bs4 import BeautifulSoup
def main(page):
url = 'https://s.weibo.com/top/summary?Refer=top_hot&topnav=1&wvr=6'
html = request_dandan(url)
soap = BeautifulSoup(html, 'lxml')
items = soap.find_all('td', class_='td-02')
items = parse_result(html)
for item in items:
write_item_to_file(item)
def request_dandan(url):
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
except requests.RequestException:
return None
def parse_result(html):
pattern = re.compile('<td class="td-01 ranktop">(\d+)</td>.*?<a href=.*?target="_blank">(.*?)</a>',re.S)
items = re.findall(pattern, html)
for item in items:
yield {
'rank': item[0],
'text': item[1],
}
def write_item_to_file(item):
print('正在写入-->' + item.a.text)
with open('hot.txt', 'a', encoding='utf-8') as f:
#f.write(json.dumps(item, ensure_ascii=False) + '\n')
f.write(item.a.text + '\n')
f.close()
if __name__ == "__main__":
main(0)
'''
import requests
import re
from bs4 import BeautifulSoup
import xlwt
def main(page):
url = 'https://movie.douban.com/top250?start=' + str(page*25) + '&filter='
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
}
html = request(url)
soup = BeautifulSoup(html, 'lxml')
items = soup.find(class_='grid_view').find_all('li')
n = 1+page*25
for item in items:
name = item.find(class_='title').text
img = item.find('a').find('img').get('src')
index = item.find('em').text
cast = item.find('p').text
score = item.find('span', class_='rating_num').text
n = n+1
print('writing-' + str(n))
sheet.write(n, 0, index)
sheet.write(n, 1, name)
sheet.write(n, 2, score)
sheet.write(n, 3, cast)
def request(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
}
try:
response = requests.get(url, headers=headers)
return response.text
except requests.RequestException:
return None
def write_item_to_file(item):
print('正在写入-->' + item.find('em').text)
book = xlwt.Workbook(encoding='utf-8', style_compression=0)
if __name__ == "__main__":
book = xlwt.Workbook(encoding='utf-8', style_compression=0)
sheet = book.add_sheet('豆瓣电影Top250', cell_overwrite_ok=True)
sheet.write(0, 0, '排名')
sheet.write(0, 1, '名称')
sheet.write(0, 2, '评分')
sheet.write(0, 3, '作者')
for page in range(0, 25):
main(page)
book.save(u'豆瓣最受欢迎的250部电影.xls')