python 爬虫 标签文本beautifullsoup_Python常用爬虫代码总结方便查询

该博客主要介绍了使用BeautifulSoup解析HTML页面的方法,包括各种查找标签的技巧,如根据class、id、属性等。还涉及编码解码、正则表达式、时间操作和数据库操作等Python常用技巧。
摘要由CSDN通过智能技术生成

beautifulsoup解析页面

from bs4 import beautifulsoup

soup = beautifulsoup(htmltxt, "lxml")

# 三种装载器

soup = beautifulsoup("

", "html.parser")

### 只有起始标签的会自动补全,只有结束标签的会自动忽略

### 结果为:

soup = beautifulsoup("

", "lxml")

### 结果为:

soup = beautifulsoup("

", "html5lib")

### html5lib则出现一般的标签都会自动补全

### 结果为:

# 根据标签名、id、class、属性等查找标签

### 根据class、id、以及属性alog-action的值和标签类别查询

soup.find("a",class_="title",id="t1",attrs={"alog-action": "qb-ask-uname"}))

### 查询标签内某属性的值

pubtime = soup.find("meta",attrs={"itemprop":"datepublished"}).attrs['content']

### 获取所有class为title的标签

for i in soup.find_all(class_="title"):

print(i.get_text())

### 获取特定数量的class为title的标签

for i in soup.find_all(class_="title",limit = 2):

print(i.get_text())

### 获取文本内容时可以指定不同标签之间的分隔符,也可以选择是否去掉前后的空白。

soup = beautifulsoup('

the dormouses story

the dormouses story

', "html5lib")

soup.find(class_="title").get_text("|", strip=true)

#结果为:the dormouses story|the dormouses story

### 获取class为title的p标签的id

soup.find(class_="title").get("id")

### 对class名称正则:

soup.find_all(class_=re.compile("tit"))

### recursive参数,recursive=false时,只find当前标签的第一级子标签的数据

soup = beautifulsoup('

abc','lxml')

soup.html.find_all("title", recursive=false)

unicode编码转中文

content = "\u65f6\u75c7\u5b85"

content = content.encode("utf8","ignore").decode('unicode_escape')

url encode的解码与解码

from urllib import parse

# 编码

x = "中国你好"

y = parse.quote(x)

print(y)

# 解码

x = parse.unquote(y)

print(x)

html转义字符的解码

from html.parser import htmlparser

htmls = "

"

txt = htmlparser().unescape(htmls)

print(txt) . # 输出

base64的编码与解码

import base64

# 编码

content = "测试转码文本123"

contents_base64 = base64.b64encode(content.encode('utf-8','ignore')).decode("utf-8")

# 解码

contents = base64.b64decode(contents_base64)

过滤emoji表情

def filter_emoji(desstr,restr=''):

try:

co = re.compile(u'[\u00010000-\u0010ffff]')

except re.error:

co = re.compile(u'[\ud800-\udbff][\udc00-\udfff]')

return co.sub(restr, desstr)

完全过滤script和style标签

import requests

from bs4 import beautifulsoup

soup = beautifulsoup(htmls, "lxml")

for script in soup(["script", "style"]):

script.extract()

print(soup)

过滤html的标签,但保留标签里的内容

import re

htmls = "

abc

"

dr = re.compile(r']+>',re.s)

htmls2 = dr.sub('',htmls)

print(htmls2) #abc

正则提取内容(一般处理json)

rollback({

"response": {

"code": "0",

"msg": "success",

"dext": ""

},

"data": {

"count": 3,

"page": 1,

"article_info": [{

"title": "“小库里”:适应比赛是首要任务 投篮终会找到节奏",

"url": "http:\/\/sports.qq.com\/a\/20180704\/035378.htm",

"time": "2018-07-04 16:58:36",

"column": "nba",

"img": "",

"desc": ""

}, {

"title": "首钢体育助力国家冰球集训队 中国冰球联赛年底启动",

"url": "http:\/\/sports.qq.com\/a\/20180704\/034698.htm",

"time": "2018-07-04 16:34:44",

"column": "综合体育",

"img": "",

"desc": ""

}...]

}

})

import re

# 提取这个json中的每条新闻的title、url

# (.*?)为要提取的内容,可以在正则字符串中加入.*?表示中间省略若干字符

reg_str = r'"title":"(.*?)",.*?"url":"(.*?)"'

pattern = re.compile(reg_str,re.dotall)

items = re.findall(pattern,htmls)

for i in items:

tilte = i[0]

url = i[1]

时间操作

# 获取当前日期

today = datetime.date.today()

print(today) #2018-07-05

# 获取当前时间并格式化

time_now = time.strftime("%y-%m-%d %h:%m:%s",time.localtime(time.time()))

print(time_now) #2018-07-05 14:20:55

# 对时间戳格式化

a = 1502691655

time_a = time.strftime("%y-%m-%d %h:%m:%s", time.localtime(int(a)))

print(time_a) #2017-08-14 14:20:55

# 字符串转为datetime类型

str = "2018-07-01 00:00:00"

datetime.datetime.strptime(st, "%y-%m-%d %h:%m:%s")

# 将时间转化为时间戳

time_line = "2018-07-16 10:38:50"

time_tuple = time.strptime(time_line, "%y-%m-%d %h:%m:%s")

time_line2 = int(time.mktime(time_tuple))

# 明天的日期

today = datetime.date.today()

tomorrow = today + datetime.timedelta(days=1)

print(tomorrow) #2018-07-06

# 三天前的时间

today = datetime.datetime.today()

tomorrow = today + datetime.timedelta(days=-3)

print(tomorrow) #2018-07-02 13:37:00.107703

# 计算时间差

start = "2018-07-03 00:00:00"

time_now = datetime.datetime.now()

b = datetime.datetime.strptime(start,'%y-%m-%d %h:%m:%s')

minutes = (time_now-b).seconds/60

days = (time_now-b).days

all_minutes = days*24*60+minutes

print(minutes) #821.7666666666667

print(days) #2

print(all_minutes) #3701.7666666666664

数据库操作

import pymysql

conn = pymysql.connect(host='10.0.8.81', port=3306, user='root', passwd='root',db='xxx', charset='utf8')

cur = conn.cursor()

insert_sql = "insert into tbl_name(id,name,age) values(%s,%s,%s)

id = 1

name = "like"

age = 26

data_list = []

data = (id,name,age)

# 单条插入

cur.execute(insert_sql,data)

conn.commit()

# 批量插入

data_list.append(data)

cur.executemany(insert_sql,data_list)

conn.commit()

#特殊字符处理(name中含有特殊字符)

data = (id,pymysql.escape_string(name),age)

#更新

update_sql = "update tbl_name set content = '%s' where id = "+str(id)

cur.execute(update_sql%(pymysql.escape_string(content)))

conn.commit()

#批量更新

update_sql = "update tbl_recieve set content = %s ,title = %s , is_spider = %s where id = %s"

update_data = (contents,title,is_spider,one_new[0])

update_data_list.append(update_data)

if len(update_data_list) > 500:

try:

cur.executemany(update_sql,update_data_list)

conn.commit()

总结

以上就是这篇文章的全部内容了,希望本文的内容对大家的学习或者工作具有一定的参考学习价值,谢谢大家对萬仟网的支持。如果你想了解更多相关内容请查看下面相关链接

如您对本文有疑问或者有任何想说的,请点击进行留言回复,万千网友为您解惑!

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值