爬虫教程大全

from urllib.request import urlopen
url = "http://www.baidu.com/"
open1 = urlopen(url)

#保存在文本里面
with open("F:/baidu.html",mode="w") as f:
    f.write(open1.read().decode("utf-8"))
    

#F12 network 第一次请求拿到骨架(preview) 并没有详细的数据

import requests
url = "http://www.baidu.com/"
resp = requests.get(url)
print(resp) #返回值是响应200
print(resp.text) #拿到页面源代码


import requests
url = "http://www.baidu.com/"
dic = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
}
resp = requests.get(url,headers=dic) #处理反爬
print(resp) #返回值是响应200
print(resp.text) #拿到页面源代码


#F12找到network相对应的翻译页面,payload负载查找特征值,然后查看参数
#XHR过滤页面
import requests
url = "https://fanyi.baidu.com/sug"
dic = {
    "kw": "dog"
}
resp = requests.post(url,data=dic) #post请求传参
print(resp.json())


#get传参
import requests
#F12 打开network 过滤XHR 然后payload 找对对应传参
params = {
    "type": 11,
    "interval_id": "100:90",
    "action": "",
    "start": 0,
    "limit": 20
}
url = "https://movie.douban.com/j/chart/top_list"
resp = requests.get(url,params = param) 
print(resp.request.url)#如果你想查看最终发送的完整 URL,可以通过访问 resp.request.url 来获得


#拿json数据
import requests
#F12 打开network 过滤XHR 然后payload 找对对应传参
params = {
    "type": 11,
    "interval_id": "100:90",
    "action": "",
    "start": 0,
    "limit": 20
}
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
}
url = "https://movie.douban.com/j/chart/top_list"
resp = requests.get(url,params = param,headers= headers)
print(resp.json())
resp.close()

#数据解析
#re模块
import re
lst = re.findall(r"\d+","手机号12837123")
print(lst)

#finditer 匹配字符串的所有内容【返回的是迭代器】,从迭代器中拿到内容需要.group()
it = re.finditer(r"\d+","手机号12837123,手机号是1231")
for i in it:
    print(i.group())

#search 找到一个结果就返回,拿数据需要.group()
it = re.search(r"\d+","手机号12837123,手机号是1231")
print(it.group())

#预加载正则表达式
obj = re.compile(r"\d+")
ret = obj.finditer(r"\d+","手机号12837123,手机号是1231")
for i in ret:
    print(i.group())

#bs4
from bs4 import BeautifulSoup
import requets

url = "https://example.com"
response = requests.get(url)
html_content = response.text
soup = BeautifulSoup(html_content, 'lxml')
soup.find("table",class_="?") #写法1
soup.find("table", attrs={"class": "class-name"})#写法2


#bs4
from bs4 import BeautifulSoup
import requets

url = "https://example.com"
response = requests.get(url)
response.encoding="utf-8"
html_content = response.text
soup = BeautifulSoup(html_content, 'lxml')
alist=soup.find("table",class_="?").find_all("a")
for a in alist:
    print(a.get("href")) #获得href的属性 get() 方法用于从 HTML 元素中获取属性值


#xpath
from lxml import etree
# 示例 XML 内容
xml_content = """
<book>
    <author>
        <name>John Doe</name>
        <nick>Johnny</nick>
    </author>
    <author>
        <name>Jane Smith</name>
        <nick>Janey</nick>
    </author>
</book>
"""
# 解析 XML 字符串
root = etree.XML(xml_content)
# 修改元素的文本
child = root.find('/book/author//nick/text()') #//是查看所有的后代
print(child)

nicks = root.xpath('/book/author/*/nick/text()') #是查看有子后代nick的值
print(nicks)




#xpath解析网站
import requests
from lxml import etree

# 获取网页内容
url = 'https://example.com'  # 替换为你要抓取的网址
response = requests.get(url)
response.encoding="utf-8"
html_content = response.content

# 使用 etree.HTML() 解析 HTML 内容
tree = etree.HTML(html_content)

title = tree.xpath('//title[0]/text()')
# 例如,获取所有的链接
links = tree.xpath('//a/@href')
print('Links:')
for link in links:
    print(link)

link1 = tree.xpath('//a[@href="?"]/text()')

#cookie登陆
import requests
session = requests.session()
data={
    #账号密码}
url=""
session.post(url,data=data)

#两种方式继续打开登陆后的其他网页
resp = session.get("") #方法1
print(resp.json())

resp = requests.get("",headers={"Cookie":""})
print(resp.text)



#防盗链
import requests
url = 'https://www.pearvideo.com/videoStatus.jsp?contId=1795765&mrd=0.530345573408592'

# 设置 Referer 头部防盗链
headers = {
     "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
    'Referer': 'https://www.pearvideo.com/video_1795765'
}
resp = requests.get(url, headers=headers)
dic = resp.json()
print(dic)



#代理ip clash设置
import requests
url = 'https://www.pearvideo.com/videoStatus.jsp?contId=1795765&mrd=0.530345573408592'
headers = {
     "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
    'Referer': 'https://www.pearvideo.com/video_1795765'
}

proxies = {
    "http": "http://127.0.0.1:7890",
    "https": "http://127.0.0.1:7890",
}
resp = requests.get(url, headers=headers, proxies=proxies)
dic = resp.json()
print(dic)


#多线程
from threading import Thread

def func():
    for i in range(100):
        print("A",i)
if __name__ == "__main__":
    t = Thread(target=func)
    t.start()
    for i in range(100):
        print("B",i)


        
#创造多线程对象
from threading import Thread
class MyThread(Thread):
    def run(self):
        for i in range(5):
            print("子线程",i)

if __name__ == "__main__":
    t = MyThread()
    t.start()
    for i in range(100):
        print("B",i)



from threading import Thread

def func(name):
    for i in range(100):
        print(name,i)
if __name__ == "__main__":
    t1 = Thread(target=func,args=("A",)) #传参必须是元组
    t1.start()
    t2 = Thread(target=func,args=("B",))
    t2.start()


#线程池
from concurrent.futures import ThreadPoolExecutor
import time

# 修改后的任务函数,包含一个 for 循环,并且输出为中文
def task(name, duration):
    print(f"任务 {name} 正在运行...")
    for i in range(duration):
        print(f"任务 {name} 第 {i + 1} 次迭代...")
        time.sleep(1)  # 模拟每次循环的延迟
    return f"任务 {name} 完成,耗时 {duration} 秒"

if __name__ == "__main__":
    # 创建一个线程池,最多有3个线程
    with ThreadPoolExecutor(max_workers=3) as executor:
        # 提交多个任务到线程池
        futures = []
        futures.append(executor.submit(task, "A", 2))
        futures.append(executor.submit(task, "B", 3))
        futures.append(executor.submit(task, "C", 1))
        futures.append(executor.submit(task, "D", 4))

        # 等待所有任务完成并获取结果
        for future in futures:
            print(future.result())


#快速打印1到1000
import concurrent.futures
import time

def collect_numbers(start, end):
    return list(range(start, end))

def print_numbers(numbers):
    print(' '.join(map(str, numbers)))

def main():
    start_time = time.time()  # 记录开始时间

    # 创建一个线程池,最多使用10个线程
    with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
        # 将任务分成多个块,每块由一个线程处理
        chunk_size = 100
        futures = [executor.submit(collect_numbers, i, i + chunk_size) for i in range(0, 1000, chunk_size)]
        
        results = [future.result() for future in futures]

    # 合并结果并打印
    all_numbers = [number for chunk in results for number in chunk]
    print_numbers(all_numbers)

    end_time = time.time()  # 记录结束时间

    elapsed_time = end_time - start_time  # 计算时间差
    print(f"\n打印1000个数字所用时间: {elapsed_time}秒")

if __name__ == "__main__":
    main()

#异步
import asyncio

async def task(name, delay):
    print(f"Task {name} started")
    await asyncio.sleep(delay)
    print(f"Task {name} completed")

async def main():
    await asyncio.gather(
        task('A', 2),
        task('B', 1),
    )

asyncio.run(main())

#https://www.bilibili.com/video/BV1uN4y1W7Du?p=65&spm_id_from=pageDriver&vd_source=0f1ecf6949ed24c58fa05b9239b42ad2

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

0wioiw0

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值