# -*- coding: utf-8 -*-
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import datetime
import random
def gethtml(url):
html=urlopen(url)
return html
def byclass(tag,css):
namelist=bsObj.findAll(tag,css)
return namelist
html = gethtml("http://sydw.huatu.com/sc/")
bsObj = BeautifulSoup(html)
print(bsObj)
print("-------------------class search--------------------------")
namelist=bsObj.findAll("div",{"class":"list_top"})
print(namelist)
print("------------------- 过滤 --------------------------")
for name in namelist:
print(name.get_text())
print("------------------- 元素的横向和纵向查找 --------------------------")
html=gethtml("http://www.pythonscraping.com/pages/page3.html")
obj=BeautifulSoup(html)
namelist=obj.find("table",{"id":"giftList"}).children
for name in namelist:
print(name)
print("------------------- 兄弟标签处理 --------------------------")
namelist=obj.find("table",{"id":"giftList"}).tr.next_siblings
for name in namelist:
print(name)
print("------------------- 正则匹配 --------------------------")
# *表示匹配前面的字符
namelist=obj.findAll("img",{"src":re.compile("\.\.\/img\/gifts\/img.*\.jpg")})
for name in namelist:
print(name)
print("过滤属性:"+name.attrs["src"])
print("------------------- 动态页面的爬询 --------------------------")
web=gethtml("http://en.wikipedia.org/wiki/Kevin_Bacon")
webobj=BeautifulSoup(web)
node=webobj.findAll("a")
for link in node:
if "href" in link.attrs:
print(link.attrs["href"])
random.seed(datetime.datetime.now())
def getlinks(articurl):
html=urlopen("http://en.wikipedia.org"+articurl)
bsObj=BeautifulSoup(html)
return bsObj.findAll("div",{"id":"bodyContent"})\
.findAll("a",href=re.compile("^(/wiki/)((?!:).)*$"))
links = getlinks("/wiki/Kevin_Bacon",)
while len(links) > 0:
newArticle = links[random.randint(0, len(links)-1)].attrs["href"]
print(newArticle)
links = getLinks(newArticle)
简单Python爬虫的编写
最新推荐文章于 2024-02-16 10:53:15 发布