将字符串解析成json字符串时,遇到的错误

ECMA script注明json字符串中需要转义的字符 :    "  /  \ b f n r t 


问题描述及解决:


1出现 0xae  174 超acsii,采取替换  

s = str(html).replace("\\xae","") #转成str,替换编码174的符号为空

2 出现 \\  替换成 \  

s = s.replace("\\\\","\\")  #将\\转换成\

3 出现\'  替换成'   

s = s.replace("\\'" , "'")



代码

from urllib.request import *
import re
import json
import urllib.parse
import urllib.request
from bs4 import BeautifulSoup
import random
import re
import json


def randHeader():
    head_connection = ['Keep-Alive', 'close']
    head_accept = ['text/html, application/xhtml+xml, */*']
    head_accept_language = ['zh-CN,fr-FR;q=0.5', 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3']
    head_user_agent = ['Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
                       'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.95 Safari/537.36',
                       'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; rv:11.0) like Gecko)',
                       'Mozilla/5.0 (Windows; U; Windows NT 5.2) Gecko/2008070208 Firefox/3.0.1',
                       'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070309 Firefox/2.0.0.3',
                       'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070803 Firefox/1.5.0.12',
                       'Opera/9.27 (Windows NT 5.2; U; zh-cn)',
                       'Mozilla/5.0 (Macintosh; PPC Mac OS X; U; en) Opera 8.0',
                       'Opera/8.0 (Macintosh; PPC Mac OS X; U; en)',
                       'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.12) Gecko/20080219 Firefox/2.0.0.12 Navigator/9.0.0.6',
                       'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Win64; x64; Trident/4.0)',
                       'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)',
                       'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E)',
                       'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Maxthon/4.0.6.2000 Chrome/26.0.1410.43 Safari/537.1 ',
                       'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E; QQBrowser/7.3.9825.400)',
                       'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0 ',
                       'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.92 Safari/537.1 LBBROWSER',
                       'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; BIDUBrowser 2.x)',
                       'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/3.0 Safari/536.11']

    header = {
        'Connection': head_connection[0],
        'Accept': head_accept[0],
        'Accept-Language': head_accept_language[1],
        'User-Agent': head_user_agent[random.randrange(0, len(head_user_agent))]
    }
    return header


# url = 'http://www.ebay.com/itm/152586216774'
# req = urllib.request.Request(url= url , headers=randHeader())
# webpage = urllib.request.urlopen(req)
# html = webpage.read()
# soup = BeautifulSoup(html, 'html.parser')
# print(soup)

def getdesc(ebayno):
    url = "http://vi.vipr.ebaydesc.com/ws/eBayISAPI.dll?ViewItemDescV4&item="+ebayno
    req = urllib.request.Request(url= url , headers=randHeader())
    webpage = urllib.request.urlopen(req)
    html = webpage.read()
    soup = BeautifulSoup(html, 'html.parser') #解析
    print(soup.prettify())
    content = soup.find_all("div", class_="descriptionbox")[0] #选取desc部分
    p = content.find_all("span") #选取段落部分
    # print(p[0].strong.string)  #p[0]是Description
    content = p[1]  #p[1]是内容
    pp = content.p.prettify() #将内容格式化
    pp = BeautifulSoup(pp,"html.parser")#解析
    #写文本到txt文件
    file_object = open(str(ebayno)+"description.txt", 'w')
    file_object.write("Description")
    for child in pp.strings:
        print(repr(child))
        file_object.write(child)

#
# qs = [
#     "253013768959"
#
#
# ]
#
# for q in qs:
#     getdesc(q)

# url = 'http://catalog.monroe.com/catalogPart/partResults.do?&domain=monroe&locale=en&partNumber=171340L&selection=findPart'
# req = urllib.request.Request(url= url , headers=randHeader())
# webpage = urllib.request.urlopen(req)
# html = webpage.read().decode()
# # soup = BeautifulSoup(html, 'html.parser')
# print(html)
# sjson = re.findall("\[(.*?)\]",html)[0]
# sjson = json.loads(sjson)
# selectedPartId = sjson["value"]
# print(selectedPartId)
#
# url = 'http://catalog.monroe.com/catalog/catalogOptions.do?&domain=monroe&locale=en&selection=catalog'
# req = urllib.request.Request(url= url , headers=randHeader())
# webpage = urllib.request.urlopen(req)
# html = webpage.read().decode()
# # soup = BeautifulSoup(html, 'html.parser')
# print(html)
# sjson = re.findall("(.*?)",html)[0]
# sjson = json.loads(sjson)
# selectedCatalogId = sjson["value"]
# print(selectedCatalogId)
selectedPartId = "1904279"
selectedCatalogId = "974"


url = "http://catalog.monroe.com/catalogPart/partResults.do?&selectedCatalogId="+str(selectedCatalogId)+"&selectedPartId="+str(selectedPartId)+"&selection=partDetails"
# print(url)
req = urllib.request.Request(url= url , headers=randHeader())
webpage = urllib.request.urlopen(req)
html = webpage.read() #  字节编码
print(html)
s = str(html).replace("\\xae","") #转成str,替换编码174的符号为空
print(s)
s = re.findall("^b'(.*)'", s)[0]
print(s)
s = s.replace("\\\\","\\")  #将\\转换成\
s = s.replace("\\'" , "'")#将\'转换成'
s = json.loads(s )
print(s)
att = s["part"]["attributes"]
print(att)



  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值