爬虫(二)--BeautifulSoup re(正则)

'''
import requests
import re
from xlwt import Workbook
import xlrd
import time


def key_name(number):
    # 获取页面的内容并返回
    name = '手机'
    URL_1 = "https://s.taobao.com/search?ie=utf8&initiative_id=staobaoz_20170905&stats_click=search_radio_all%3A1&js=1&imgfile=&q="
    URL_2 = "&suggest=0_1&_input_charset=utf-8&wq=u&suggest_query=u&source=suggest&p4ppushleft=5%2C48&s="
    URL = (URL_1 + name + URL_2 + str(number))
    # print(URL)
    res = requests.get(URL)
    return res.text


def find_date(text):
    # 根据整个页面的信息,获取商品的数据所在的HTML源码并放回
    reg = r',"data":{"spus":\[({.+?)\]}},"header":'
    reg = re.compile(reg)
    info = re.findall(reg, text)
    return info[1]


def manipulation_data(info, N, sheet):
    # 解析获取的HTML源码,获取数据
    Date = eval(info)

    for d in Date:
        T = " ".join([t['tag'] for t in d['tag_info']])
        # print(d['title'] + '\t' + d['price'] + '\t' + d['importantKey'][0:len(d['importantKey'])-1] + '\t' + T)

        sheet.write(N, 0, d['title'])
        sheet.write(N, 1, d['price'])
        sheet.write(N, 2, T)
        N = N + 1
    return N


def main():
    book = Workbook()
    sheet = book.add_sheet('淘宝手机数据')
    sheet.write(0, 0, '品牌')
    sheet.write(0, 1, '价格')
    sheet.write(0, 2, '配置')
    book.save('淘宝手机数据.xls')
    # k用于生成链接,每个链接的最后面的数字相差48.
    # N用于记录表格的数据行数,便于写入数据
    k = 0
    N = 1
    for i in range(10 + 1):
        text = key_name(k + i * 48)
        info = find_date(text)
        N = manipulation_data(info, N, sheet)

        book.save('淘宝手机数据.xls')
        print('下载第' + str(i) + '页完成')


if __name__ == '__main__':
    main()
'''


'''
import re
text = input("Please input your Email address:\n")
if re.match(r'^[0-9a-zA-Z_]{0,19}@[0-9a-zA-Z]{1,13}\.[com,cn,net]{1,3}$',text):
#if re.match(r'[0-9a-zA-Z_]{0,19}@163.com',text):
    print('Email address is Right!')
else:
    print('Please reset your right Email address!')
'''

'''
import urllib.request as ur
from bs4 import BeautifulSoup

html = ur.urlopen("http://bj.ganji.com/zpshichangyingxiao/")
bs0bj = BeautifulSoup(html.read(),"html.parser")
print(bs0bj("a"))
print("----------")
print(bs0bj.a)
'''

'''
import urllib.request as ur
from bs4 import BeautifulSoup

html = ur.urlopen("http://bj.ganji.com/zpshichangyingxiao/")
bs0bj = BeautifulSoup(html.read(),"html.parser")
links =bs0bj.findAll("a")
for link in links:
    print(link)
'''

'''
import urllib.request as ur
from bs4 import BeautifulSoup

html = ur.urlopen("http://bj.ganji.com/zpshichangyingxiao/")
bs0bj = BeautifulSoup(html.read(),"html.parser")
links = bs0bj.findAll("a",attrs={"class":"list_title gj_tongji"})
for link in links:
    print(link.text)
'''

'''
import urllib.request as ur
from bs4 import BeautifulSoup

html = ur.urlopen("http://bj.ganji.com/zpshichangyingxiao/")
bs0bj = BeautifulSoup(html.read(),"html.parser")
links = bs0bj.findAll("a",attrs={"class":"list_title gj_tongji"})
for link in links:
    print(link.attrs["href"])
'''

#next_siblings    previous_siblings  parent parents   children  descendants
'''
import urllib.request as ur
from bs4 import BeautifulSoup

html = ur.urlopen("http://bj.ganji.com/zpshichangyingxiao/")
bs0bj = BeautifulSoup(html.read(),"html.parser")
print(bs0bj.find("a").children)
print("0-------------")
print(bs0bj.find("a").descendants)
'''

'''
import re
rec = re.compile("[a-z]",re.I)
print(rec.match("A"))
'''

'''
import re
pattern = re.compile(r'\d+')
m = pattern.match("one12twothree34four")
print(m)
m = pattern.match("one12twothree34four",2,10)
print(m)
m = pattern.match("one12twothree34four",3,10)
print(m)
print(m.group(0))
print(m.start(0))
print(m.end(0))
print(m.span(0))
print(m.groups())
'''

'''
import re
pattern = re.compile(r'\d+')
result1 = pattern.findall("www123456789")
result2 = pattern.findall("123456789qww",0,10)
print(result1)
print(result2)
'''

'''
import urllib.request as ur
from bs4 import BeautifulSoup
import re

html = ur.urlopen("http://bj.ganji.com/chongwu/")
bs0bj = BeautifulSoup(html.read(),"html.parser")
links = bs0bj.findAll("a",{"title":re.compile("?$")})
for link in links:
    if "title" in link.attrs:
        print(link.attrs["title"])
'''

'''
import urllib.request as ur
from bs4 import BeautifulSoup

html = ur.urlopen("http://bj.ganji.com/chongwu/")
bs0bj = BeautifulSoup(html.read(),"html.parser")
links = bs0bj.findAll(lambda a:len(a.attrs) == 2)
for link in links:
    print(link.attrs)
'''

'''
import urllib.request as ur
from bs4 import BeautifulSoup

html = ur.urlopen("http://www.lvmama.com/")
bs0bj = BeautifulSoup(html.read(),"html.parser")
links = bs0bj.findAll("img")
for link in links:
    if "src" in link.attrs:
        print(link.attrs["src"])
'''

'''
import urllib.request as ur
from bs4 import BeautifulSoup
import re
html = ur.urlopen("http://bj.ganji.com/chongwu/")
bs0bj = BeautifulSoup(html.read(),"html.parser")
links = bs0bj.findAll("a",{"title":re.compile("?$")})
for link in links:
    if "title" in link.attrs:
        print(link.attrs["title"])
'''

import urllib.request as ur
from bs4 import BeautifulSoup

html = ur.urlopen("http://bj.ganji.com/chongwu/")
bs0bj = BeautifulSoup(html.read(),"html.parser")
links = bs0bj.findAll(lambda a:len(a.attrs)==2)
for link in links:
    print(link.attrs)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值