requests lxml实战1

#coding:utf-8
import  requests
from lxml import  etree
import os
import sys
import urllib
reload(sys)
sys.setdefaultencoding('utf-8')

path=os.path.dirname(__file__).decode('gbk')
print type(path)
req=requests.get("http://bbs.seller.aliexpress.com/bbs/thread.php?fid=34")
sys_url="http://bbs.seller.aliexpress.com/bbs/"
# html_text=req.content
# html=etree.HTML(html_text)
# titles=html.xpath(".//*[@id='threadlist']/tr/td[2]/a[last()]/text()")  #文本标题
# filenames=html.xpath(".//*[@id='threadlist']/tr/td[2]/a[last()]/@id")  #文件夹名称
# hrefs=html.xpath(".//*[@id='threadlist']/tr/td[2]/a[last()]/@href")   #具体内容链接
# for title in titles:
#     "用标题做文件夹名称"
#     folder=os.path.join(path, filenames[titles.index(title)])  #文件夹
#     if not os.path.exists(folder):
#         os.makedirs(folder)
#     "获取页面详情 存储文字和图片"
#     html_page=requests.get(sys_url + hrefs[titles.index(title)])
#     html_text=etree.HTML(html_page.content)
#     "存储html"
#     html_name= hrefs[titles.index(title)].split('=')[1] + '.html'  #html 文件名
#     print 'html_name',html_name
#     if not os.path.exists(os.path.join(folder,html_name)):
#         with open(os.path.join(folder, html_name), 'wb') as file:
#             file.write(html_page.content)
# def download_pic(hrefs,path):
#     """
#     :param path: 图片的获取路径 list
#     :return: 图片存储于本地
#     """
#     html_page = requests.get(sys_url + hrefs[titles.index(title)])
#     html_text = etree.HTML(html_page.content)
#     "提取图片,放到文件夹"
#     imgs_url=html_text.xpath(path)
#     print imgs_url
#     for img_url in imgs_url:
#         img_path=folder+'\\'+img_url.split('/')[-1]  #图片存放
#         if not os.path.exists(img_path):
#             urllib.urlretrieve(img_url, folder + '\\' + img_url.split('/')[-1])
#     return True
# def download_html(hrefs,split_key,num=1):
#     "存储html"
#     html_name= hrefs[titles.index(title)].split(split_key)[num] + '.html'  #html 文件名
#     print 'html_name',html_name
#     if not os.path.exists(os.path.join(folder,html_name)):
#         with open(os.path.join(folder, html_name), 'wb') as file:
#             file.write(html_page.content)

def analysis_html(url,title_xpath,folder_xpath,href_xpath):
    "解析网站 返回html的list"
    req = requests.get(url)
    html_text = req.content
    html = etree.HTML(html_text)
    titles=html.xpath(title_xpath)
    folders=html.xpath(folder_xpath)
    hrefs=html.xpath(href_xpath)
    return  titles,folders,hrefs

def download(titles,folders,hrefs):
    "下载需要信息"
    for title in titles:
        "用标题做文件夹名称"
        folder = os.path.join(path, folders[titles.index(title)])  # 文件夹
        if not os.path.exists(folder):
            os.makedirs(folder)
        "获取页面详情 存储文字和图片"
        html_page = requests.get(sys_url + hrefs[titles.index(title)])
        html_text = etree.HTML(html_page.content)
        "存储html"
        html_name = hrefs[titles.index(title)].split('=')[1] + '.html'  # html 文件名
        print 'html_name', html_name
        if not os.path.exists(os.path.join(folder, html_name)):
            with open(os.path.join(folder, html_name), 'wb') as file:
                file.write(html_page.content)
    html_page = requests.get(sys_url + hrefs[titles.index(title)])
    html_text = etree.HTML(html_page.content)
    "提取图片,放到文件夹"
    imgs_url = html_text.xpath(path)
    print imgs_url
    for img_url in imgs_url:
        img_path = folder + '\\' + img_url.split('/')[-1]  # 图片存放
        if not os.path.exists(img_path):
            urllib.urlretrieve(img_url, folder + '\\' + img_url.split('/')[-1])





 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值