简单Python爬虫的编写

# -*- coding: utf-8 -*-
from urllib.request import urlopen
from bs4 import BeautifulSoup
import  re
import  datetime
import  random


def gethtml(url):
    html=urlopen(url)
    return html

def byclass(tag,css):
    namelist=bsObj.findAll(tag,css)
    return  namelist

html = gethtml("http://sydw.huatu.com/sc/")
bsObj = BeautifulSoup(html)
print(bsObj)
print("-------------------class search--------------------------")
namelist=bsObj.findAll("div",{"class":"list_top"})
print(namelist)
print("------------------- 过滤  --------------------------")
for name in namelist:
    print(name.get_text())

print("------------------- 元素的横向和纵向查找  --------------------------")

html=gethtml("http://www.pythonscraping.com/pages/page3.html")
obj=BeautifulSoup(html)
namelist=obj.find("table",{"id":"giftList"}).children
for name in namelist:
    print(name)

print("------------------- 兄弟标签处理 --------------------------")
namelist=obj.find("table",{"id":"giftList"}).tr.next_siblings
for name in  namelist:
    print(name)

print("------------------- 正则匹配 --------------------------")
#  *表示匹配前面的字符
namelist=obj.findAll("img",{"src":re.compile("\.\.\/img\/gifts\/img.*\.jpg")})
for name in  namelist:
    print(name)
    print("过滤属性:"+name.attrs["src"])

print("------------------- 动态页面的爬询 --------------------------")
web=gethtml("http://en.wikipedia.org/wiki/Kevin_Bacon")
webobj=BeautifulSoup(web)
node=webobj.findAll("a")
for link in node:
    if "href" in link.attrs:
        print(link.attrs["href"])


random.seed(datetime.datetime.now())
def getlinks(articurl):
    html=urlopen("http://en.wikipedia.org"+articurl)
    bsObj=BeautifulSoup(html)
    return bsObj.findAll("div",{"id":"bodyContent"})\
        .findAll("a",href=re.compile("^(/wiki/)((?!:).)*$"))

    links = getlinks("/wiki/Kevin_Bacon",)
while len(links) > 0:
    newArticle = links[random.randint(0, len(links)-1)].attrs["href"]
    print(newArticle)
    links = getLinks(newArticle)
  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值