python 个人爬虫实战

本文详细介绍了使用Python进行个人网络爬虫的实践过程,包括设置HTTP请求头、处理反爬策略、数据解析及存储等方面,旨在帮助初学者掌握爬虫基本技能。
摘要由CSDN通过智能技术生成
from bs4 import BeautifulSoup
from urllib import request
import pandas as pd
import time
from datetime import datetime

head={}
head['User-Agent']='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'

#如需使用cookie登录,将cookie地址传入head即可
#如:
"""header = {"Content-Type":"application/json", 
          "Host":"ts.21cn.com",
          "Connection": "keep-alive",
          "Accept":" */*",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.9",
          "Referer": "https://ts.21cn.com/merchant/show/id/7037",
            "User-Agent":'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
            "Cookie":"你的cookie地址,按F12在network》XHR》cookie"  }
"""

jts_url_list=[]
for i in range(1,11):
    jts_url_list.append("https://ts.21cn.com/front/api/ranking/merchantPostList.do?pageNo="+str(i)+"&merchantId=7037&listType=1&offset=b911ff99c1c81c99")

output=pd.DataFrame()
for jts_url in jts_url_list:
    jts_req=request.Request(url=jts_url,headers=head)
    jts_respon=request.urlopen(jts_req)
    jts_html=jts_respon.read().decode('utf-8','ignore')
    jts_dict=eval(jts_html)
    jts_ctime_list=[]
    jts_id_list=[]
    jts_picture_list=[]
    jts_title_list=[]
    jts_shortTopic_list=[]
    jts_shuqiu_list=[]
    jts_tail_url_list=[]
    output2=pd.DataFrame()
    for jts_url_num in range(10):
        jts_ctime_list.append(jts_dict.get('postList')[jts_url_num].get('ctime'))
        jts_id_list.append(jts_dict.get('postList')[jts_url_num].get('id'))
        jts_picture_list.append(jts_dict.get('postList')[jts_url_num].get('picture'))
        jts_title_list.append(jts_dict.get('postList')[jts_url_num].get('title'))
        jts_shortTopic_list.append(jts_dict.get('postList')[jts_url_num].get('shortTopic'))
        jts_shuqiu_list.append(jts_dict.get('postList')[jts_url_num].get('shuqiu'))
        jts_tail_url_list.append("https://ts.21cn.com/tousu/show/id/"+str(jts_dict.get('postList')[jts_url_num].get('id')))
        output2 = pd.DataFrame({'ctime':jts_ctime_list,
                               'id':jts_id_list,
                               'picture':jts_picture_list,
                               'title':jts_title_list,
                               'shortTopic':jts_shortTopic_list,
                               'shuqiu':jts_shuqiu_list,
                               'tail_url':jts_tail_url_list})
    output=pd.concat([output,output2],ignore_index=True)
    time.sleep(2)
    print(str(jts_url)+"完成")

def stamp_to_datetime(stamp):
    """
    将时间戳(1539100800)转换为 datetime2018-10-09 16:00:00格式并返回
    :param stamp:
    :return:
    """
    time_stamp_array = datetime.utcfromtimestamp(stamp)
    date_time = time_stamp_array.strftime("%Y-%m-%d %H:%M:%S")
    # 如果直接返回 date_time则为字符串格式2018-10-09 16:00:00
    date = datetime.strptime(date_time,"%Y-%m-%d %H:%M:%S")
    return date

output['ctime']=output.ctime.apply(lambda txt:stamp_to_datetime(txt))

output.to_excel(r'爬虫结果.xlsx',index=False)

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值