Python爬虫案例

爬取某度地图某商超地址信息

import requests
import pymysql
import random
import time
def get_content(pn,nn,supermarket):
    try:
        url = 'https://map.baidu.com/'
        params = {
            'newmap': 1,
            'reqflag': 'pcmap',
            'biz': 1,
            'from': 'webmap',
            'da_par': 'baidu',
            'pcevaname': 'pc4.1',
            'qt': 'con',
            'from': 'webmap',
            'c': 257,
            'wd': supermarket,
            'pn': pn,
            'nn': nn,
            'db': 0,
            'sug': 0,
            'addr': 0,
            'da_src': 'shareurl',
            'on_gel': 1,
            'src': 7,
            'gr': 3,
            'l': 13,
            'device_ratio': 1,
            'tn': 'B_NORMAL_MAP',
            'ie': 'utf-8',
            'newfrom': 'zhuzhan_webmap'

        }
        header = [
            "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
            ....
        ]
        header = random.choice(header) #随机选择请求头
        headers = {
            'User-Agent':header
        }
        resp = requests.get(url = url ,headers = headers,params = params)
        content = resp.json() #此处返回json数据
        # print(content)
        return content
    except Exception as e:
        pass
        print("url请求响应问题:",e)

def get_parse(content,supermarket): #提取数据
    try:
        datas = []
        if 'content' in content:
            content = content['content'][:10]
            for store in content:
                province = store['admin_info']['province_name']
                city = store['admin_info']['city_name']
                area = store['admin_info']['area_name']
                town = store['admin_info']['town_name']
                supermarket = supermarket
                store_name = store['name']
                addr = store['addr']
                datas.append((province,city,area,town,supermarket,store_name,addr))
            return datas #返回提取数据
        else:
            print("该页没数据!!")
            return datas
    except Exception as e:
        print("解析错误信息:",e)

def get_mysqlconnect(): #创建mysql连接
    db = pymysql.connect(host='192.168.xx.xx',
                         user='root',
                         password='root',
                         port=3306,
                         database='demo')
    cur = db.cursor() #创建游标
    return db,cur 返回mysql连接对象、游标

def close_mysqlconnect(db, cur): #关闭mysql连接对象、游标
    db.close()
    cur.close()

def main(pn):
    try:
        total_datas = []
        supermarket = 'xxx'  #需要爬取商超,填写商超名称
        for pn in range(1,pn): #pn 爬取pn页
            nn = (pn-1) * 10
            content = get_content(pn, nn,supermarket)
            datas = get_parse(content,supermarket)
            total_datas.extend(datas) #列表合并
            t = random.randint(1,4) 
            time.sleep(t) #1~4s休眠时间
            print(str(pn) +'页 '+ '休眠' + str(t) + '秒' )
        if total_datas != []: #判断是否为空列表
            total_datas = str(total_datas)[1:-1]
            db,cur = get_mysqlconnect()
            sql = """
            insert into supermarket_info (省,市,区县,乡镇,超市,超市分店,详细地址) values
            """
            sql = sql + total_datas
            cur.execute(sql) #执行sql语句
            db.commit() #提交事务
            close_mysqlconnect(db, cur) #关闭连接
    except Exception as e:
        print("插入错误信息:",e)
    finally:
        print("*"*30+"爬取完毕"+"*"*30)

if __name__ == '__main__' :
    pn = 11
    main(pn)

爬取某东商品信息

import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import random

def get_headers():
    ua = [
        "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
       ...
    ]
    useragent = random.choice(ua)
    return useragent

def get_content(page,s):
    url = 'https://search.jd.com/Search'
    useragent = get_headers()
    headers = {
        'Cookie':'xxx', #登录cookie信息
        'User-Agent':useragent
    }
    params = {
        'keyword': 'xxx', #需要爬取的商品
        'qrst': 1,
        'suggest': '1.his.0.0',
        'wq': 'xxx', #需要爬取的商品
        'stock': 1,
        'ev': 'exbrand_xxx(HAOYIKANG) ^',
        'pvid': '5059b011112a4068831c4dd4555e7791',
        'page': page,
        's': s,
        'click': 0
    }
    respone = requests.get(url=url,headers=headers,params=params) #发送请求,并返回响应结果
    content = respone.text #获取返回文本数据
    return content

def get_analysis():
    datas = []
    for page in range(1,86):
        s = 1 + (page -1) * 30
        content = get_content(page,s)
        soup = BeautifulSoup(content,'lxml') #解析文本
        products = soup.find_all('div', class_='gl-i-wrap')
		#提取数据
        for product in products:
            price = product.find('div', class_='p-price').find('i').get_text()
            prod_info = product.find('div',class_='p-name p-name-type-2').find('em').get_text()
            storename = product.find('span',class_='J_im_icon').find('a').get_text()
            datas.append((storename,prod_info,price))
        t = random.randint(1,4)
        time.sleep(t) #休眠时间
        print('正在爬取'+str(page)+'页!!'+ '休眠' +str(t)+'秒')
    return datas

def save_datas(datas): #保存数据
    columns = ['店铺名称','商品信息','商品信息']
    pd.DataFrame(datas,columns=columns).to_excel('./data/xxx_京东.xlsx',index=False)

def main():
    datas = get_analysis()
    save_datas(datas)

if __name__ == '__main__':
    main()

爬取某宝商品信息

import requests
import re
import pandas as pd
import random
import time
#构造获取网页文本信息函数
def get_content(page):
    url = r'https://s.taobao.com/search'
    params = {
        'q': 'xxx', #商品名称
        'suggest': 'history_4',
        'commend': 'all',
        'ssid': 's5-e',
        'search_type': 'item',
        'sourceId': 'tb.index',
        'spm': 'a21bo.jianhua.201856-taobao-item.2',
        'ie': 'utf8',
        'initiative_id': 'tbindexz_20170306',
        '_input_charset': 'utf-8',
        'wq': '',
        'suggest_query': '',
        'source': 'suggest',
        'bcoffset': 2,
        'ntoffset': 2,
        'p4ppushleft': '2,47',
        's': page #page第几页商品信息
    }
   #制定请求头
    headers = {
        'Cookie':'xxx', #填写cookie信息,绕过登录
        'Host':'s.taobao.com',
        'Referer':'https://www.taobao.com/', 
        'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
    }

    respone = requests.get(url=url,headers=headers,params=params) #发送请求,获取响应信息
    content = respone.text #返回文本数据
    # print(content)
    return content #函数返回值
#构造解析函数
def parser_content(content):
    data = []
    title = re.findall('"raw_title":"([\s\S]*?)"',content) #正则表达式解析 [\s\S]代表任意字符
    shopName = re.findall('"shopName":"([\s\S]*?)"',content)
    price = re.findall('"view_price":"([\s\S]*?)"',content)
    itemLoc = re.findall('"item_loc":"([\s\S]*?)"',content)
    for i in range(len(title)): 
        data.append((title[i],shopName[i],price[i],itemLoc[i]))
    return data

if __name__ == '__main__':
    datas = []
    tag = ['商品信息','店铺名称','商品价格','商品地址']
    for page in range(0,100,10):
        content = get_content(page)
        data = parser_content(content)
        for x in data:
            datas.append(x)
        t = random.randint(1,3)
        time.sleep(t)
        print(str(page)+'位置'+str(t)+'秒')
    pd.DataFrame(datas,columns=tag).to_excel('./data/xxx.xlsx',index=False)

持续更新中…

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

MtoSlc

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值