爬虫案例

一、爬取猫眼电影TOP100
二、爬取魔法花园所有花种
三、爬取魔法花园魔法屋
四、爬取pixabay图片
五、爬取饿了么单个地区商家信息
六、爬取淘女郎
七、爬取FIND ICONES
八、爬取斗图网图片
九、未完待续..
 
一、爬取猫眼电影TOP100
from multiprocessing import Pool
import requests
from bs4 import BeautifulSoup
import re
import os
from random import choice
 
 
def get_page_url():
    pass
 
 
def parse_page_url(url):
    print('Downloading:',url)
    headers = [
        'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
        'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',
        'Mozilla/5.0 (Windows NT 6.2; rv:16.0) Gecko/20100101 Firefox/16.0'
        ]
    header = {'user-agent': choice(headers)}
 
    try:
        r = requests.get(url,headers=header)
        if r.status_code is 200:
            print('访问成功')
            return r.text
        else:
            print('返回状态码不为200,请修改代码')
            return None
    except (ConnectionError):
        print('Error occurred')
        return None
    except Exception as result:
        print('捕获到其他异常')
        print(result)
        return None
 
 
def get_text(text):
    soup = BeautifulSoup(text, 'lxml')
    pattern = re.compile(
        '<dd>.*?board-index.*?>(.*?)</i>' + '.*?data-val="{movieId:(.*?)}" href='
        '.*?src="(.*?)"/>\n<img.*?name.*?>(.*?)</a></p>.*?class="star">(.*?)</p>' +
        '.*?"releasetime">(.*?)</p>.*?</dd>', re.S
        )
    
    items = re.findall(pattern, str(soup))
    for item in items:
        yield {
            'name': item[3].split('>')[1],
            'id':item[1],
            'action': item[4].split(':')[1].split(' ')[0],
            'time': item[5].split(':')[1],
            #'img':'http:' + item[2]
            }
 
 
def write_text(content):
    print('正在保存:%s,请稍等...'%content['name'])
    with open('c://moayanmove.txt', 'a', encoding='utf-8') as f:
        f.write(str(content) + '\n')
        print('保存成功')
        f.close()
 
 
def main(Groups):
    urls = 'http://maoyan.com/board/4?offset=' + str(Groups)
    texts = parse_page_url(urls)
    for item in get_text(texts):
        write_text(item)
 
 
if __name__ == '__main__':
    pool = Pool()
    Groups = ([i*10 for i in range(10)])
    pool.map(main, Groups)
    pool.close()
    pool.join()
 
 
二、爬取魔法花园所有花种
import requests
import re
from bs4 import BeautifulSoup
import time
import random
import xlwt
url_half = 'http://h5.pinpinhu.com/gd-graph/index.action?uid=3640469&sid=7ddb5d624f398d043b74d1fa21af3c8f4ec83ad1&pageNo={}'
url_list = [url_half.format(i) for i in range(24)]
 
 
# print(url_list)
def get_html(url):
    res = requests.get(url, headers=headers)
    r_text = res.text.replace(u'\u266d', u'').replace(u'\xa9', u'')
    pattern = r'<img src="(.*?)"/>\n<a href=\'(.*?)\'>(.*?)</a>'
 
    t = re.findall(pattern, r_text, re.S)
    flower_list = []
    for item in t:
        time.sleep(random.random())
        flower_dict = {}
        flower_img = 'http://h5.pinpinhu.com' + item[0]
        flower_id = item[1].rsplit(';', maxsplit=1)[1].rsplit('=', maxsplit=1)[1]
        flower_name = item[2]
        _s = item[1].split(';')
        valid_url = 'http://h5.pinpinhu.com' + _s[0] + _s[1].split('amp')[0] + _s[2].split('amp')[0] + _s[3]
        pattern = '(.*?)HOME_SESSION.*?jvm1(.*)'
        url_half = re.findall(pattern, valid_url, re.S)[0]
        flower_url = url_half[0] + url_half[1]
        flower_dict['flower_name'] = flower_name
        flower_dict['flower_id'] = flower_id
        flower_dict['flower_url'] = flower_url
 
        res_2 = requests.get(flower_url, headers=headers)
        a_2 = res_2.text.replace(u'\u266d', u'').replace(u'\xa9', u'')
        li_2 = re.findall(r'花之图谱(.*?)返回花之图谱', a_2, re.S)[0]
        pattern_level = r'.*花种等级:(.*?)<br/>.*'
        pattern_price = r'.*种子价格:(.*?)家币<br/>.*'
        pattern_max = r'.*[预计成花|成花数量]:(.*?)<br/>.*保底数量.*'
        pattern_min = r'.*保底数量:(.*?)<br/>.*'
 
        pattern_time = r'.*预计时间:(.*?)<br/>.*'
        pattern_word = r'.*[鲜花花语|鲜花话语]:(.*?)<br/>.*'
        try:
            s_level = re.findall(pattern_level, li_2, re.S)[0]
        except:
            s_level = ''
        try:
            s_price = re.findall(pattern_price, li_2, re.S)[0]
        except:
            s_price = ''
        try:
            s_max = re.findall(pattern_max, li_2, re.S)[0]
        except:
            s_max = re.findall(pattern_max, li_2, re.S)
        try:
            s_min = re.findall(pattern_min, li_2, re.S)[0]
        except:
            s_min = re.findall(pattern_min, li_2, re.S)
        try:
            s_time = re.findall(pattern_time, li_2, re.S)[0]
        except:
            s_time = re.findall(pattern_time, li_2, re.S)
        try:
            s_word = re.findall(pattern_word, li_2, re.S)[0]
        except:
            s_word = re.findall(pattern_word, li_2, re.S)
        if s_price:
            pattern_other = '商城购买'
        else:
            pattern_other = r'.*[鲜花花语|鲜花话语]:.*?<br/>(.*?)<br/>'
        try:
            s_other = re.findall(pattern_other, li_2, re.S)[0]
        except:
            s_other = re.findall(pattern_other, li_2, re.S)
        
        flower_dict['level'] = s_level
        flower_dict['price'] = s_price
        flower_dict['max'] = s_max
        flower_dict['min'] = s_min
        flower_dict['time'] = s_time
        flower_dict['word'] = s_word
        flower_dict['other'] = s_other
        flower_dict['flower_img'] = flower_img
        flower_list.append(flower_dict)
    return flower_list
 
 
headers = {
'user-agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
 
}
flower_list = []
for i in range(1,24):
    # 普通:http://h5.pinpinhu.com/gd-graph/index.action?uid=3640469&sid=7ddb5d624f398d043b74d1fa21af3c8f4ec83ad1&pageNo=23
    # 独特:http://h5.pinpinhu.com/gd-graph/index.action?uid=3640469&sid=7ddb5d624f398d043b74d1fa21af3c8f4ec83ad1&type=1&pageNo=6
    # 珍稀:http://h5.pinpinhu.com/gd-graph/index.action?uid=3640469&sid=7ddb5d624f398d043b74d1fa21af3c8f4ec83ad1&type=2&pageNo=6
    # 极品:http://h5.pinpinhu.com/gd-graph/index.action?uid=3640469&sid=7ddb5d624f398d043b74d1fa21af3c8f4ec83ad1&type=3&pageNo=7
 
    url = 'http://h5.pinpinhu.com/gd-graph/index.action?uid=3981003&sid=3dd8cff09524f7af218b2af16ef6d26a5b69c6f2&type=0&pageNo={}'.format(i)
    print('------------第{}页--------共8页--------'.format(i))
    a = get_html(url)
    flower_list += a
    time.sleep(random.randint(1,5))
 
print(flower_list)
time.sleep(1)
workbook = xlwt.Workbook(encoding='utf-8', style_compression=0)
sheet = workbook.add_sheet('普通花朵', cell_overwrite_ok=True)
x = 1
for item in flower_list:
        time.sleep(0.1)
 
        sheet.write(x, 0, item['flower_name'])
        sheet.write(x, 1, item['flower_id'])
        sheet.write(x, 2, item['flower_url'])
        sheet.write(x, 3, item['level'])
        sheet.write(x, 4, item['price'])
        sheet.write(x, 5, item['max'])
        sheet.write(x, 6, item['min'])
        sheet.write(x, 7, item['time'])
        sheet.write(x, 8, item['word'])
        sheet.write(x, 9, item['other'])
        sheet.write(x, 10, item['flower_img'])
        x += 1
 
workbook.save(r'普通.xls')
 
 
三、爬取魔法花园魔法屋
import requests
import re
import time
import random
 
 
def get_html(url):
    headers = {
    'user-agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
    }
    res = requests.get(url=url, headers=headers)
    s = res.text.replace(u'\u266d', u'').replace(u'\xa9', u'')
    pattern = r'.*极品(.*?)返回花园首页.*'
    a = re.findall(pattern, s, re.S)[0]
    pattern = r'.*?<img src="(.*?)"/><a href=\'(.*?)\'>(.*?)</a>.*?'
    b = re.findall(pattern, a, re.S)
    flower_list = []
    for item in b:
        time.sleep(random.random())
        flower_dict = {}
        flower_img = 'http://h5.pinpinhu.com' + item[0]
        flower_id = item[1].rsplit('id=', maxsplit=1)[1].split('&')[0]
        flower_name = item[2]
        _s = item[1].split(';')
        valid_url = 'http://h5.pinpinhu.com' + _s[0] + _s[1].split('amp')[0] + _s[2].split('amp')[0] + _s[3]
        pattern = '(.*?)HOME_SESSION.*?jvm1(.*)'
        url_half = re.findall(pattern, valid_url, re.S)[0]
        flower_url = url_half[0] + url_half[1]
 
        res_2 = requests.get(flower_url, headers=headers)
        a_2 = res_2.text.replace(u'\u266d', u'').replace(u'\xa9', u'')
        li_2 = re.findall(r'魔法屋(.*?)返回花园首页', a_2, re.S)[0]
        pattern_level = r'.*种子等级:(.*?)<br/>.*'
        pattern_compound = r'.*合成需消耗:(.*?)所需花朵不足.*'
        level = re.findall(pattern_level, li_2, re.S)[0]
        compound = re.findall(pattern_compound, li_2, re.S)[0]
        flower_dict['name'] = flower_name
        flower_dict['level'] = level
        flower_dict['flower_id'] = flower_id
 
        _a = compound.replace(r'<br/>', '').replace('\n', '')
        li_half = [i.rsplit('(', maxsplit=1) for i in _a.replace('/0)', '|').split('|')]
        detail_list = [i for i in li_half if not i==['']]
        flower_dict['detail_list'] = detail_list
        flower_dict['flower_img'] = flower_img
        flower_dict['flower_url'] = flower_url
        flower_list.append(flower_dict)
    return flower_list
 
flower_list = []
for i in range(1,2):
    url = 'http://h5.pinpinhu.com/gd-synthesis/index.action?uid=3981003&sid=0a05853e3cc8ea83eeb896806280c894be5aa59b&type=0&subType=0'
    print('---------%s-------'%i)
    a = get_html(url)
    flower_list += a
    time.sleep(3)
 
import xlwt
time.sleep(1)
workbook = xlwt.Workbook(encoding='utf-8', style_compression=0)
sheet = workbook.add_sheet('普通花朵合成', cell_overwrite_ok=True)
x = 1
for item in flower_list:
    time.sleep(0.1)
    sheet.write(x, 0, item['name'])
    sheet.write(x, 1, int(item['flower_id']))
    sheet.write(x, 2, str(item['level']))
    sheet.write(x, 3, str(item['detail_list']))
    sheet.write(x, 4, item['flower_img'])
    sheet.write(x, 5, item['flower_url'])
    x += 1
 
workbook.save(r'1.xls')
 
 
四、爬取pixabay图片
#pixabay图片下载
#原网址  https://pixabay.com/
import requests
from bs4 import BeautifulSoup
import os
root = 'F:\\图片'
url_s = []
pixabay_day = []
base_url = 'https://pixabay.com/zh/photos/?order=ec&pagi='
for i in range(1,2):
    urls = base_url + str(i)
    url_s.append(urls)
for urls in url_s:
    request = requests.get(urls)
    content = request.content
    soup = BeautifulSoup(content,'lxml')
    img_list = soup('img')
    for img in img_list:
        url =img['src']
        url = url.split('/static')[0]
        pixabay_day.append(url)
        path = root + url.split('/')[-1]
        try:
            if not os.path.exists(root):
                os.mkdir(root)
            if not os.path.exists(path):
                r = requests.get(url)
                with open(path,'wb') as f:
                    f.write(r.content)
                    f.close()
                    print('文件保存成功')
            else:
                print('文件已存在')
        except:
            print('爬取失败')
 
五、爬取饿了么单个地区商家信息
import urllib.request
import os
import time
import json
from  openpyxl  import Workbook
from  openpyxl  import load_workbook
 
keywordExcel="keyword.xlsx"  #关键字检索外卖地点保存路径
keywords=["郑州","商丘"]  #关键字集合
targetDir ="test"  #文件保存路径
 
 
def reqsetting():#首先构造请求头headers,url目前暂时保存根路径
    weburl = "https://mainsite-restapi.ele.me/v2/pois?"
    #extra1="extras%5B%5D=count&geohash=wx4g0bmjetr7&keyword=%E6%9C%9D%E9%98%B3&limit=20&type=nearby"
    webheaders={
    "Accept":"application/json, text/plain, */*",
    "Accept-Language":"zh-CN,zh;q=0.8",
    "Connection":"keep-alive",
    "Cookie":"ubt_ssid=plds7ye19rj2rghg3oaar8hkt89yy7f1_2017-02-07; _utrace=ac9073c509bedb74b28a1482bd95a9d8_2017-02-07",
    "Host":"mainsite-restapi.ele.me",
    "Origin":"https://www.ele.me",
    "Referer":"https://www.ele.me/place/wx4g4h5shqf",
    "User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.75 Safari/537.36"
        }
    req=urllib.request.Request(url=weburl,headers=webheaders)
    return req
 
 
def write2Excel(jsondata,title):#根据不同的关键字将数据写入到excel中
    fileName=keywordExcel
    if(os.path.exists(fileName)):
        wb=load_workbook(fileName)
    else:
        wb=Workbook()
    ws=wb.create_sheet(title)    
    ws.column_dimensions["A"].width =10.0
    ws.append(["ID","城市","geohash","名称","地址","商家总数","经度","纬度","request_id","short_address"])
    ws.column_dimensions["A"].width =30.0
    ws.column_dimensions["B"].width =10.0
    ws.column_dimensions["C"].width =18.0
    ws.column_dimensions["D"].width =20.0
    ws.column_dimensions["E"].width =50.0
    ws.column_dimensions["F"].width =10.0
    ws.column_dimensions["G"].width =10.0
    ws.column_dimensions["H"].width =10.0
    ws.column_dimensions["I"].width =25.0
    ws.column_dimensions["J"].width =40.0
    for i in range(len(jsondata)):
        row=jsondata[i]
        ws.append([row["id"],row["city"],row["geohash"],row["name"],row["address"],row["count"],
                   row["longitude"],row["latitude"],row["request_id"],row["short_address"]])
    wb.save(fileName)
    
 
def excelName():#根据日期生成文件
    if not os.path.isdir(targetDir):  
        os.mkdir(targetDir)
    excelName=str(time.strftime ("%Y-%m-%d")+".xlsx")
    completePath=targetDir+"\\"+excelName
    return completePath
 
 
def reqsetting():#初始化url请求,需要实时替换的是extral  和  header里的referer
    weburl = "https://mainsite-restapi.ele.me/shopping/restaurants?"
    extra1="extras%5B%5D=activities&geohash=wx4g56v1d2m&latitude=39.91771&limit=24&longitude=116.51698&offset=0&terminal=web"
    webheaders={
    "Accept":"application/json, text/plain, */*",
    "Accept-Language":"zh-CN,zh;q=0.8",
    "Connection":"keep-alive",
    "Cookie":"ubt_ssid=plds7ye19rj2rghg3oaar8hkt89yy7f1_2017-02-07; _utrace=ac9073c509bedb74b28a1482bd95a9d8_2017-02-07",
    "Host":"mainsite-restapi.ele.me",
    "Origin":"https://www.ele.me",
    #"Referer":"https://www.ele.me/place/wx4g56v1d2m",
    "User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.75 Safari/537.36"
        }
    req=urllib.request.Request(url=weburl,headers=webheaders)
    return req
 
 
def write2Excel(jsondata,title):#根据不同的商圈地点写入数据,每个商圈地点占用excel 的一个sheet
    fileName=excelName()
    isexit="false"
    if(os.path.exists(fileName)):
        wb=load_workbook(fileName)
        isexit="true"
    else :
       wb=Workbook()
    if(wb.__contains__(title)):
        ws=wb[title]
        ws.append([])
    else:
        ws=wb.create_sheet(title)
        ws.column_dimensions["A"].width =10.0
        ws.column_dimensions["B"].width =40.0
        ws.column_dimensions["C"].width =60.0
        ws.column_dimensions["D"].width =10.0
        ws.column_dimensions["E"].width =18.0
        ws.column_dimensions["F"].width =10.0
        ws.append(["ID","店名","地址","距离","人均消费","月销售额"])
    
    for i  in range(len(jsondata)):
        row=jsondata[i]
        #print(type(row))
        if("average_cost" not  in row.keys()):
            row["average_cost"]="无人均消费数据"
        ws.append([row["id"],row["name"],row["address"],row["distance"],row["average_cost"],row["recent_order_num"]])
    wb.save(fileName)
    
 
def readKeyWordFromExcel():#从上一节生成的keywordExcel 中加载商圈地点
    fileName=keywordExcel
    if(os.path.exists(fileName)):
        wb=load_workbook(fileName)
    else:
        return
    for title in wb.sheetnames:
        ws=wb[title]
        for i in range(2,ws.max_row):
            infos={}#商圈地点数据,为生成请求参数做准备
            infos["name"]=ws.cell(row=i,column=4).value
            print("正在爬取 %s 附近外卖商家的数据..." % infos["name"])
            infos["ID"]=ws.cell(row=i,column=1).value
            infos["geohash"]=ws.cell(row=i,column=3).value
            infos["longitude"]=ws.cell(row=i,column=7).value
            infos["latitude"]=ws.cell(row=i,column=8).value
            if(infos["geohash"]):
                req=reqsetting()
                req.add_header("Refer","https://www.ele.me/place/%s" % infos["geohash"])#修改请求头的refer
                newUrl=req.get_full_url()
                offset=0
                contentBytes=""
                while(contentBytes!="[]"):#构造请求参数,基本上只修改offset 偏移量数据
                    params={
                    "extras[]":"activities",
                    "geohash":"%s" % infos["geohash"],
                    "latitude":"%s" % infos["latitude"],
                    "longitude":"%s" % infos["longitude"],
                    "terminal":"web",
                    "limit":24,
                    "offset":offset
                       }
                    params=urllib.parse.urlencode(params)#请求参数编码
                    req.full_url=newUrl+params   #重新生成url请求
                    webpage=urllib.request.urlopen(req)
                    contentBytes = webpage.read().decode("utf-8")
                    if(contentBytes!="[]"):
                        jsondata=json.loads(contentBytes)        
                        write2Excel(jsondata,infos["name"])#将请求数据写入excel中
                        offset+=24 #便宜
                    else :
                        break
 
 
if __name__ == '__main__':  #程序运行入口
    if(os.path.exists(keywordExcel)):
        os.remove(keywordExcel)
    req=reqsetting()
    newUrl=req.get_full_url()
    for keyword in keywords:#遍历关键字集合,构造不同的请求参数,附加到URL 请求上
        params={
        "extras[]":"count",
        "geohash":"wx4g0bmjetr7",
        "keyword":"%s" % keyword,
        "limit":"20",
        "type":"nearby"
            }
        params=urllib.parse.urlencode(params)#将请求参数进行编码
        req.full_url=newUrl+params#重新构造请求参数
       
        webpage=urllib.request.urlopen(req)#获取数据
        contentBytes = webpage.read().decode("utf-8")
        jsondata=json.loads(contentBytes)#将数据解析成json格式
        write2Excel(jsondata,keyword)#将数据写入excel 中
    time.sleep(10)
    offset=0;
    readKeyWordFromExcel()
 
 
六、爬取淘女郎
#!-*- coding:utf-8 -*-
 
import urllib.request
import re
def getUrlList(url):
    html = urllib.request.urlopen(url)
    response = str(html.read())
    return response
def getImageList(html):
    pattern = re.compile('.*?"avatarUrl":"(.*?)","cardUrl":"(.*?)","city":"(.*?)","height":"(.*?)",.*?"realName":"(.*?)",.*?"totalFavorNum":(.*?),"userId":(.*?),.*?"weight":"(.*?)".*?',re.S)
    items = re.findall(pattern, html)
    for item in items:
        yield {
            'avatarUrl': 'http:' + str(items[0]).split('\'')[1],
            'cardUrl': 'http:' + str(item[1]),
            'city': item[2],
            'height': item[3],
            'realName': item[4],
            'totalFavorNum': item[5],
            'userId': item[6],
            'weight': item[7],
        }
 
def main():
    url = 'https://mm.taobao.com/tstar/search/tstar_model.do?_input_charset=utf-8'
    html = getUrlList(url)
    for item in getImageList(html):
        print(item)
 
if __name__ == '__main__':
    main()
 
七、爬取FIND ICONES
#FIND ICONES图标集
#源网址 https://findicons.com/
 
import requests
import os
from bs4 import BeautifulSoup
 
root = 'D:/图片/'
base_urls = 'https://findicons.com/pack/2787/beautiful_flat_icons/'
for i in range(1,8):
    base_url = base_urls + str(i)
    r = requests.get(base_url)
    content = r.content
    soup = BeautifulSoup(content,'lxml')
    img_list = soup('img')
    for img in img_list:
        url = img['src']
        path = root + url.split('/')[-1]
        try:
            if not os.path.exists(root):
                os.mkdir(root)
            if not os.path.exists(path):
                r = requests.get(url)
                with open(path,'wb') as f:
                    f.write(r.content)
                    f.close()
                    print('文件保存成功')
            else:
                print('文件已存在')
        except:
            print('爬取失败')
 
 
八、爬取斗图网图片
#斗图网图片下载
 
from tkinter import *
import os
import requests
from bs4 import BeautifulSoup
 
def do():
    urls = 'https://www.doutula.com/photo/list/?page=3'
    root = 'E:/图片/'
    request = requests.get(urls)
    content = request.content
    soup = BeautifulSoup(content,'lxml')
    img_list = soup.find_all('img',attrs = {'class':'img-responsive lazy image_dta'})
    for img in img_list:
        url_a = 'http:' + img['data-original']
        url = url_a.split('!')[0]
        path = root + url.split('/')[-1]
        try:
            if not os.path.exists(root):
                os.mkdir(root)
            if not os.path.exists(path):
                r = requests.get(url)
                with open(path,'wb') as f:
                    f.write(r.content)
                    f.close()
                    print('文件保存成功')
            else:
                print('文件已存在')
        except:
            print('爬取失败')
app = Tk()
Button(text = 'click',command = do).pack()
 
app.mainloop()
 
九、未完待续..
 
 

转载于:https://www.cnblogs.com/changwoo/p/9930610.html

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值