2021-10-21Python爬虫-requests基础,实战-第一血

Python爬虫-requests基础,实战-第一血

# -*- codeing = utf-8 -*-
# @Time : 2021/9/26 16:11
# @Author : luxiaoguo
# @File : requests第一血.py
# @Software : PyCharm
import requests


if __name__ == "__main__":
    # step1:指定url
    url = 'https://www.sogou.com/'

    # step2: 发起请求
    # get方法会返回一个响应对象
    response = requests.get(url=url)

    # step3:获取响应数据。text返回是字符串形式数据
    page_text = response.text
    print(page_text)
    # step4:持久化保存数据
    with open('./sogou.html', 'w',encoding='utf-8')as fp:
        fp.write(page_text)
    print("爬取完毕")


requests实战之破解百度翻译.py

# -*- codeing = utf-8 -*-
# @Time : 2021/9/26 19:27
# @Author : luxiaoguo
# @File : requests实战之破解百度翻译.py
# @Software : PyCharm
import json

import requests

if __name__ == "__main__":
    # 1指定url
    post_url = 'https://fanyi.baidu.com/sug'
    # 2进行UA伪装
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36 Edg/94.0.992.31'

    }
    word = input("enter a word:")
    # post请求参数处理(同get一致)
    data = {
        'kw':word
    }
    # 4发送请求
    response = requests.post(url=post_url,data=data,headers=header)

    # 5获取响应数据:json()方法的是obj(确认返回是json)
    dic_obj = response.json()

    # 持久化保存
    fileName = word + '.json'
    fp = open(fileName,'w', encoding='utf-8')
    json.dump(dic_obj,fp=fp,ensure_ascii=False)

    print("over!")

requests词条搜索.py

# -*- codeing = utf-8 -*-
# @Time : 2021/9/26 16:30
# @Author : luxiaoguo
# @File : requests词条搜索.py
# @Software : PyCharm
# UA: User-Agent(请求载体的身份标识)
# UA伪装:门户网站的服务器检测对应请求的载体身份标识

import requests

if __name__ == "__main__":
    # UA伪装:将对应的User—Agent封装到一个字典中
    headers = {
        'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36 Edg/94.0.992.31'
    }
    url = 'https://www.sogou.com/web'
    # 处理url携带的参数
    kw = input('enter a word:')
    # 字典键值
    param = {
        'query': kw
    }
    # 对指定的url发起的请求对应的url是携带参数的,并且请求过程中处理了参数
    response = requests.get(url=url, params=param)

    page_text = response.text
    fileName = kw + '.html'
    with open(fileName, 'w', encoding='utf-8')as fp:
        fp.write(page_text)
    print(fileName, "保存成功!!!")



requests实战之肯德基餐厅位置爬取.py

# -*- codeing = utf-8 -*-
# @Time : 2021/9/26 20:22
# @Author : luxiaoguo
# @File : requests实战之肯德基餐厅位置爬取.py
# @Software : PyCharm

import requests

if __name__ == "__main__":
    url = 'http://www.kfc.com.cn/kfccda/storelist/index.aspx'
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36 Edg/94.0.992.31'

    }
    e = int(input('enter a pageSize:'))
    for i in range(e):
        pageIndex = ""

        par = {
            'cname':'',
            ' pid':'',
            'keyword': '北京',
            'pageIndex': i,
            'pageSize':  e,
        }

        response = requests.post(url=url, data=par, headers=header)

        kedeji_text = response.text
        print(kedeji_text)
        with open('./kedeji.text','w', encoding='utf-8')as fp:
            fp.write(kedeji_text)
        print("读取完毕!!")




requests实战之爬取药监总局数据.py

# -*- codeing = utf-8 -*-
# @Time : 2021/9/27 15:17
# @Author : luxiaoguo
# @File : requests实战之爬取药监总局数据.py
# @Software : PyCharm
import requests
import json
if __name__ == '__main__':
    # 批量获取不同产业的url
    url = 'http://scxk.nmpa.gov.cn:81/xk/itownet/portalAction.do?method=getXkzsList'

    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36 Edg/94.0.992.31'

    }
    # 存储id
    id_list = []
    all_data_list = []
    for i in range(1,6):
        page = str(i)
        data = {
            'on': 'true',
            'page': page,
            'pageSize': '15',
            'productName':'',
            'conditionType': '1',
            'applyname':'',
            'applysn':'',
        }


        json_ids = requests.post(url=url, headers=header, data=data).json()
        # print(json_ids)
        for dic in json_ids['list']:
            id_list.append(dic['ID'])
        # print(id_list)

    # 获取不同企业详情数据

    post_url = 'http://scxk.nmpa.gov.cn:81/xk/itownet/portalAction.do?method=getXkzsById'
    for id in id_list:
        data = {
            'id':id
        }
        data_json = requests.post(url=post_url,headers=header,data=data).json()
        # print(data_json)
        all_data_list.append(data_json)

    # 持久化存储all_data_list
    fp = open('./allData.json','w',encoding='utf-8')
    json.dump(all_data_list,fp=fp,ensure_ascii=False)

    print('over!!!')
    print("爬取完毕")



step1:指定url

  • url = ‘https://www.sogou.com/’

step2: 发起请求

# get方法会返回一个响应对象
response = requests.get(url=url)

step3:获取响应数据。text返回是字符串形式数据

page_text = response.text
print(page_text)

step4:持久化保存数据

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值