python 关于爬虫的学习和小栗子

# -*— codeing = utf-8 -*-
# @Time : 2021/2/7 22:08
# @Author :何广鹏
# @File :  demo3
# @Software :  PyCharm
'''
#爬取有道翻译
# 导入需要的库
import urllib.request
import urllib.parse
import json

# 等待用户输入需要翻译的单词
content = input('请输入需要翻译的单词:')

# 有道翻译的url链接
url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule&sessionFrom=null'

# 发送给有道服务器的数据
data = {}

# 需要翻译的文字
data['i'] = content
# 下面这些都先按照我们之前抓包获取到的数据
data['from'] = 'AUTO'
data['to'] = 'AUTO'
data['smartresult'] = 'dict'
data['client'] = 'fanyideskweb'
data['salt'] = '1500349255670'
data['sign'] = '997742c66698b25b43a3a5030e1c2ff2'
data['doctype'] = 'json'
data['version'] = '2.1'
data['keyfrom'] = 'fanyi.web'
data['action'] = 'FY_BY_CL1CKBUTTON'
data['typoResult'] = 'true'

# 对数据进行编码处理,data是要发给有道翻译的内容
data = urllib.parse.urlencode(data).encode('utf-8')

# 创建一个Request对象,把url和data传进去,并且需要注意的使用的是POST请求
request = urllib.request.Request(url=url, data=data, method='POST')
# 打开这个请求
response = urllib.request.urlopen(request)
# 读取返回来的数据
result_str = response.read().decode('utf-8')
# 把返回来的json字符串解析成字典
result_dict = json.loads(result_str)

# 获取翻译结果
print('翻译的结果是:%s' % (result_dict['translateResult'][0][0]['tgt']))
'''
'''
#requests 模拟浏览器进行请求
import requests
if __name__ == '__main__':
    url = 'https://www.sogou.com/' #第一步指定网址
    response = requests.get(url =url,)#发起请求,接受响应结果
    result_text = response.text #response.text后缀可以将对象变为字符串类型的
    print(result_text)
    with open('sougou.html','w',encoding='utf-8') as f:#创建一个文件夹保存起来
        f.write(result_text)
'''
'''
#requests 模拟浏览器进行请求
#如果不加user agent,服务器可能不会响应,user agent是代表通过浏览器发出请求而非代码
import requests
if __name__ == '__main__':
    keyword = input("Please ask what you want:")
    url = 'https://www.sogou.com/web?'
    param = {
        'query' : keyword
    }
    headers = {
        'user-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36 Edg/90.0.818.46'
    }
    response = requests.get(url = url, params = param, headers = headers)
    result_txt = response.text
    with open('D:\爬虫文件\key.html','w',encoding= 'utf-8')as f:
        f.write(result_txt)
    print("保存成功!")
'''
'''
import requests
if __name__ == '__main__':
    url ='https://fanyi.baidu.com/v2transapi?from=en&to=zh'
    data = {
    'from': 'en',
    'to': 'zh',
    'query': 'dog',
    'transtype': 'realtime',
    'simple_means_flag': '3',
    'sign': '871501.634748',
    'token': '99c3d919d72081d32313d2db2726d30f',
    'domain':'common'
    }
    headers={
        'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36',
        "Cookie": "BAIDUID=0A56A2723B57B8F4AEEE056D1D4E3890:FG=1; BIDUPSID=09634B3C85E8CC2A6A6A194E2A79F93A; "
                  "PSTM=1575051572; BDORZ=FFFB88E999055A3F8A630C64834BD6D0; "
                  "BDUSS=k5kOWVRRFBmVUh5NU1QZlYxZ1JXQkR4SDV2QjJPbllCczVqeUtmcVZxVHp"
                  "-QWxlRVFBQUFBJCQAAAAAAAAAAAEAAABqcBczamlhX"
                  "-ixAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPNv4l3zb-Jdf; "
                  "H_PS_PSSID=1423_21082_20697; Hm_lvt_64ecd82404c51e03dc91cb9e8c025574=1575127780,1575135783; "
                  "Hm_lpvt_64ecd82404c51e03dc91cb9e8c025574=1575135802; "
                  "from_lang_often=%5B%7B%22value%22%3A%22est%22%2C%22text%22%3A%22%u7231%u6C99%u5C3C%u4E9A%u8BED%22%7D%2C"
                  "%7B%22value%22%3A%22cs%22%2C%22text%22%3A%22%u6377%u514B%u8BED%22%7D%2C%7B%22value%22%3A%22zh%22%2C"
                  "%22text%22%3A%22%u4E2D%u6587%22%7D%5D; "
                  "to_lang_often=%5B%7B%22value%22%3A%22en%22%2C%22text%22%3A%22%u82F1%u8BED%22%7D%2C%7B%22value%22%3A"
                  "%22zh%22%2C%22text%22%3A%22%u4E2D%u6587%22%7D%5D; REALTIME_TRANS_SWITCH=1; FANYI_WORD_SWITCH=1; "
                  "HISTORY_SWITCH=1; SOUND_SPD_SWITCH=1; SOUND_PREFER_SWITCH=1; "
                  "yjs_js_security_passport=92fcd939ce7b8845e696d1832c93d907288129ac_1575135808_js; "
                  "BDSFRCVID=3q-sJeCCxG3jlXJwI7hzbnFY0dQ_1_WOoxuo3J; "
                  "H_BDCLCKID_SF=tJuq_II2JCL3fP36q4rM-P_y52T22jPe-4jeaJ5n0-nnhnc1WM6byj"
                  "-J2x5X0qFj5N6dox76Bb7WfJARy66jK4JKjH8OqTJP; delPer=0; PSINO=1; BDRCVFR[gltLrB7qNCt]=mk3SLVN4HKm; "
                  "H_WISE_SIDS"
                  "=136721_138441_138434_128068_137657_135847_136436_120160_138490_137758_137978_132910_137690_131246_132552_137746_131518_118881_118877_118855_118837_118794_136688_107315_136431_138844_137901_136862_138147_138325_138114_136195_124621_137104_133847_138478_138343_137467_137734_131423_138663_137703_138607_110085_127969_138615_131953_137829_138274_127417_138313_136636_138425_138563_138942_138249_138302_138779; rsv_i=d6e9TNJb%2B3qKFQl8TUR%2BTZHvVqSR0wpofuwSqkQaewKiSq6vpJ4oYYAPIrNRiVRuqcIBsOHqnRRCn0DbP237jNis2u6sROs; FEED_SIDS=279036_1201_0; SE_LAUNCH=5%3A26252237_0%3A26252238; __yjsv5_shitong=1.0_7_5d9723b2e9549953a9853d661368336b7ae6_300_1575135783462_111.53.209.103_3a468be1; Hm_lvt_afd111fa62852d1f37001d1f980b6800=1575135802; Hm_lpvt_afd111fa62852d1f37001d1f980b6800=1575135802 " ,
    }
    response = requests.post(url = url, data=data,headers = headers)
    result  = response.text
    print(result)
'''
'''
#爬豆瓣
if __name__ == '__main__':
    import requests
    import json
    url = 'https://movie.douban.com/j/chart/top_list'#问号后面的可以删除,加入param参数,如果是post加入data,一个意思
    param = {
            'type': '24',
            'interval_id': '100:90',
            'action':'',
            'start': '0',#从第几部开始
            'limit': '20',#取出20部
    }
    headers = {
        'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36'
    }
    response = requests.get(url = url,params=param,headers=headers)
    list_movie = response.json()
    with open('D:\爬虫文件\douban.json', 'w', encoding='utf-8')as f:
        json.dump(list_movie,fp=f,ensure_ascii=False)
    print("Over!")
'''
'''
#爬肯德基
if __name__ == '__main__':
    import requests
    url = 'http://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=keyword'
    keyword = input("请输入想要查询的肯德基位置:")
    data = {
    'cname':'',
    'pid':'',
    'keyword': keyword,
    'pageIndex': '1',
    'pageSize': '10',

    }
    headers = {
        'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36'
    }
    response = requests.post(url =url, data= data, headers = headers)
    Location_data =response.text
    with open('D:\爬虫文件\肯德基位置。txt', 'w', encoding='utf-8')as f:
        f.write(Location_data)
    print("已查询!")
'''
'''
#爬国家药监
import requests
import json
if __name__ == '__main__':
    url = 'http://scxk.nmpa.gov.cn:81/xk/itownet/portalAction.do?method=getXkzsList'
    headers = {
        'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36 Edg/90.0.818.46'
    }
    id_list = []  # 这里的操作是先用json格式转换分析json字符串,拨开层层嵌套得到每个详情页的id
    all_data = []
    for page in range(1,5):
        data ={
        'on': 'true',
        'page': str(page),
        'pageSize': '15',
        'productName':'',
        'conditionType': '1',
        'applyname':'',
        'applysn':'',

        }
        response = requests.post(url = url, headers = headers,data=data).json()
        for i in response['list']:
            id_list.append(i['ID'])
    #通过对文件id的观察可以看出网页链接前面都是一样的,后面带一个id参数。包含在已经爬取的首页链接文件中
    #可以看出在详情页,url都是一样的,除了参数id,可以在用带参数的post请求获取详情页信息
    url_xqy = 'http://scxk.nmpa.gov.cn:81/xk/itownet/portalAction.do?method=getXkzsById'
    for id in id_list:
        data = {
            'id':id
        }
        response = requests.post(url = url_xqy, data= data,headers = headers)
        detail_json = response.json()
        all_data.append(detail_json)
    with open('D:\爬虫文件\企业详情.json','w',encoding='utf-8')as f:
        json.dump(all_data,fp=f,ensure_ascii=False)
'''
'''
#爬取一张图片
import requests
if __name__ == '__main__':
    url = 'https://pic.qiushibaike.com/system/pictures/12427/124273542/medium/V0KXTG9BXNH6EPIB.jpg'
    img_data = requests.get(url = url).content#这里返回的是二进制图片数据
    with open('D:\爬虫文件\picture.jpg','wb')as f:
        f.write(img_data)
'''
'''
#爬取糗事百科
#首先对一整张页面进行爬取
#这里用到了re,不仅可以在python中使用
import requests
import re
import os
if __name__ == '__main__':
    if not os.path.exists('D:\爬虫文件\qiutu'):#这里用os函数创建一个qiutu文件夹,用来存放爬取的图片
        os.mkdir('D:\爬虫文件\qiutu')
    for page in range(1,3):
        url = 'https://www.qiushibaike.com/imgrank/page/'+str(page)
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36 Edg/90.0.818.46'
        }
        response = requests.get(url = url, headers = headers)
        page_txt = response.text
        #下一步使用聚焦爬虫进行解析
        ex = '<div class="thumb">.*?<img src="(.*?)" alt=.*?</div>'
        ex1 = '<div class="thumb">.*?src="(.*?)alt='
        img_src_list = re.findall(ex, page_txt, re.S)
        for src in img_src_list:
            url_p = 'https:'+src #拼接出url
            img_data = requests.get(url = url_p, headers = headers).content
            #从列表中取图片名称
            img_name = src.split('/')[-1]
            img_path = 'D:\爬虫文件\qiutu\{}'.format(img_name)
            with open(img_path,'wb')as f:
                f.write(img_data)
                print("下载成功!")
'''
'''
#用bs4来解析爬取到的信息,注意bs4不像xpath或者re可以在很多环境中运行,只可以在python中使用
#数据解析的原理,1.标签定位
#              2.提取标签,标签属性中存储的数据值
#bs4数据解析的原理:
#1.实例化一个BeautifulSoup对象,并且将页面源码加载到对象中
#2.通过调用BeautifulSoup中相关的属性或者方法进行标签定位和数据获取
#环境安装
#bs4 和 lxml环境进行bs4解析
#将本地的html对象加载到BeaSoup对象中
#或将网页上获得的源码加载到其中
from bs4 import BeautifulSoup #导入包
if __name__ == '__main__':
    fp = open('sougou.html','r',encoding='utf-8')
    soup = BeautifulSoup(fp,'lxml')#将本地的html加载
    print(soup.a) #soup.tag_name 返回第一个tag name 的值
    print(soup.find('div', class_ = ''))#不加class_等同于.tag_name,加了class_可以定位查找class= 特定值的同名标签
    print(soup.find_all('a'))#以列表形式返回所有a标签,这样就可以被索引
    print(soup.select('a'))#可以加筛选条件,条件可以为class,id, 空格表示多个层级
    #获取文本内容
    #soup.select().text/.gettext()可以获取某个标签内所有的文本内容
    #soup.select().string 可以获取在某个标签内且直系的文本内容,不能有嵌套
    #soup.a['href']获取属性值
'''
'''
#爬取三国演义所有章节内容,标题,诗词名句网
import requests
from bs4 import BeautifulSoup
import os
if __name__ == '__main__':
    if not os.path.exists('D:\爬虫文件\三国演义'):
        os.mkdir('D:\爬虫文件\三国演义')
    headers = {
        'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36 Edg/90.0.818.46'
    }
    url = 'https://www.shicimingju.com/book/sanguoyanyi.html'
    page_text = requests.get(url=url, headers = headers).text.encode('ISO-8859-1')
    soup = BeautifulSoup(page_text,'lxml')
    mulu_list = soup.select('.book-mulu > ul > li')
    print(mulu_list)
    for mulu in mulu_list:
        title = mulu.a.string
        zhangjie_ulr = 'https://www.shicimingju.com'+mulu.a['href']
        t = requests.get(url = zhangjie_ulr, headers = headers).text.encode('ISO-8859-1')#这里是为了防止中文乱码
        soup_zhangjie = BeautifulSoup(t,'lxml')
        page_txt = soup_zhangjie.find('div',class_ = 'chapter_content' )
        content = page_txt.text
        with open('D:\爬虫文件\三国演义\{}.txt'.format(title),'w',encoding='utf-8-sig')as f:
            f.write(content)
        print("{}下载成功!".format(title))
'''
'''
Xpath 解析方法,
1. 实例化一个extree 对象,加载到对象中
2. 调用函数结合xpath表达式 解析
3. 环境安装- pip install lxml
4.导入本地文件 extree.parse(filePath)
5.导入互联网爬取的源码数据
    extree.HTML('page_text')
6.调用.xpath实现数据解析
'''
from lxml import html
from lxml import etree
if __name__ == '__main__':
    tree = html.parse('sougou.html')
    r = tree.xpath('/html/body/div')#找到所有div标签

    t = tree.xpath('//div[@class=''header'']')#获取属性名称为@后的值的标签,//表示跨越多个层级
    s = tree.xpath('//div[@class=''top-nav'']//text()')#索引定位,注意第一个是从一开始而非0
    q = tree.xpath('//div[@class=''top-nav'']//text()/@xx')#xx表示如何取属性值
    print(s)
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值