Python笔记_31_Manager_Pool进程池_线程_锁

Manager

dict list 能够实现进程之间的数据共享

from multiprocessing import Process,Manager,Lock

def work(dic,lock):
	# 简写:使用with语法自动给你上锁和解锁.
	with lock:
		dic["count"] -= 1

	"""
	# 正常写法
	# 上锁
	lock.acquire()
	# 数据值减一
	dic["count"] -= 1
	# 解锁
	lock.release()
	"""
if __name__ == "__main__":
	# 创建Manager对象
	m = Manager()
	# 创建一个锁对象
	lock = Lock()
	lst = []
	# 创建共享字典
	dic = m.dict({"count":100})
	for i in range(100):
		p = Process(target=work,args=(dic,lock))
		p.start()
		lst.append(p)
	
	for i in lst:
		i.join()
	
	print(dic)

Pool 进程池

import os,time,random
from multiprocessing import Process,Pool

# 计算你的机器有多少cpu (逻辑核心数)
# print(os.cpu_count())
比较pool和Process 执行的速度

因为进程池可以实现并行的概念,比process单核并发速度快

def func(num):
	# time.sleep(3) # 同一时间最多允许6个进程同时执行任务.
	# time.sleep(random.uniform(0.1,1)) #异步并行的程序.
	print("这是发送的第%d邮件" % (num))

if __name__ == "__main__":
	startime = time.time()
	# (1)创建进程池对象
	# Pool()里面的参数是同一时间允许多少个进程并行.
	"""
	6个任务
	(1)1个人做6个 
	(2)6个人做6个
	(3)6个人做1个
	任务量较少时,3的速度较快,任务量较大时,2的速度更快.
	因为如果任务线拉长,频繁切换cpu会浪费时间.
	"""
	p = Pool(1)
	for i in range(100):
		p.apply_async(func,args=(i,))
		
	# 关闭进程池,不在接受新的进程
	p.close()
	# 主进程阻塞,等待子进程全部完成后再退出
	p.join()
	endtime = time.time()
	print(endtime-startime) #0.19946622848510742
	
	# (2) Process 单核并发程序
	startime = time.time()
	lst = []
	for i in range(100):
		p = Process(target= func,args=(i,))
		p.start()
		lst.append(p)
		
	for i in lst:
		i.join()
	endtime = time.time()
	print(endtime-startime) #2.732689142227173
apply

开启进程,同步阻塞,每次都要等待当前任务完成之后,在开启下一个进程,可加上返回值

def task(num):
	time.sleep(random.uniform(0.1,1)) # 同步程序
	print("%s:%s" % (num,os.getpid()))
	return num
	
if __name__ == "__main__":
	p = Pool(2)
	for i in range(20):
		res = p.apply(task,args=(i,))
		print("--->",res)
	# 完完全全的同步程序,等上面走完了在执行finish
	print("finish")
'''
apply_async 异步非阻塞程序 可以有返回值

Process产生的子进程,默认主进程等待所有子进程执行完毕之后在终止
而Pool进程池,只要主进程跑完了,立刻终止所有程序.

def task(num):
	# time.sleep(3)
	time.sleep(random.uniform(0.1,1)) # 同步程序
	print("%s:%s" % (num,os.getpid()))
	return os.getpid()
	
if __name__ == "__main__":
	p = Pool()
	lst = []
	lst2 = []
	for i in range(20):
		res = p.apply_async(task,args=(i,))
		# print(res)
		# 1.把返回的对象一个一个插入到列表里
		lst.append(res)		
	for i in lst:
		# 2.使用get方法获取返回值
		lst2.append(i.get())
	# 关闭进程池,不在接受新的进程
	p.close()
	# 主进程阻塞,等待子进程全部完成后再退出
	p.join()
	# 返回的是默认6个进程,因为当前机器是6个核心cpu
	print(set(lst2),len(set(lst2)))
	print("finish11222")
进程池.map

(与高阶函数map使用方法一样,只不过该map支持并行并发)
进程池.map 返回的是列表
map默认底层中加了阻塞,等全部执行完毕之后,主进程在终止程序,区别于apply_async

def task(num):
	# time.sleep(10)
	# time.sleep(random.uniform(0.1,1))
	print("%s:%s" % (num,os.getpid()))
	return num ** 2 

if __name__ == "__main__":
	p = Pool()
	lst = p.map(task,range(100))
	print(lst)
	#[0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 144, 169, 196, 225, 256, 289, 324, 361, 400, 441, 484, 529, 576, 625, 676, 729, 784, 841, 900, 961, 1024, 1089, 1156, 1225, 1296, 1369, 1444, 1521, 1600, 1681, 1764, 1849, 1936, 2025, 2116, 2209, 2304, 2401, 2500, 2601, 2704, 2809, 2916, 3025, 3136, 3249, 3364, 3481, 3600, 3721, 3844, 3969, 4096, 4225, 4356, 4489, 4624, 4761, 4900, 5041, 5184, 5329, 5476, 5625, 5776, 5929, 6084, 6241, 6400, 6561, 6724, 6889, 7056, 7225, 7396, 7569, 7744, 7921, 8100, 8281, 8464, 8649, 8836, 9025, 9216, 9409, 9604, 9801]
	
	# 如果出现了join,一定需要加上close,要么同时出现,要么都没有.
	# p.close()
	# p.join()
	print('finish')
关闭进程池,不会再接受新的进程
def task(num):
	time.sleep(random.uniform(0.1,1))
	print("%s:%s" % (num,os.getpid()))
	return num ** 2 

if __name__ == "__main__":
	p = Pool()
	lst = []
	for i in range(20):
		res = p.apply_async(task,args=(i,))
		lst.append(res)
		
	# get 函数内部默认加了阻塞,获取完所有值之后在向下执行
	for i in lst:
		print(i.get())
		
	p.close()
	# 如果执行close,不能够继续往进程池里面加进程了.
	# res = p.apply_async(task,args=(112233,))
	p.join()
	print("finish")

线程

from threading import Thread
from multiprocessing import Process
import os,time,random
一个进程可以有多个线程
def func(num):
	time.sleep(random.uniform(0.1,1))
	print("子线程",num,os.getpid())

for i in range(10):
	t = Thread(target=func,args=(i,))
	t.start()
并发多线程和多进程的速度对比? 多线程更快
def func(i):
	# time.sleep(random.uniform(0.1,1))
	print("子线程",i,os.getpid())
	
if __name__ == "__main__":
	# 1.计算多线程的执行速度
	startime = time.perf_counter()
	lst = []
	for i in range(1000):
		t = Thread(target=func,args=(i,))
		t.start()
		lst.append(t)
	
	for i in lst:
		i.join()
	
	print("程序执行结束")
	endtime = time.perf_counter()
	print(endtime-startime) #0.02006927398906555
	
	# 2.计算多进程的执行速度
	startime = time.perf_counter()
	lst = []
	for i in range(1000):
		p = Process(target=func,args=(i,))
		p.start()
		lst.append(p)
	
	for i in lst:
		i.join()
	print("程序执行结束")
	endtime = time.perf_counter()
	print(endtime-startime) #0.111981043999549
多线程共享同一份进程资源
num = 100
lst = []
def func():
	global num
	num -= 1

for i in range(100):
	t = Thread(target=func)
	t.start()
	lst.append(t)
	
for i in lst:
	i.join()
print(num)
线程相关函数

线程.is_alive() 检测线程是否仍然存在
线程.setName() 设置线程名字
线程.getName() 获取线程名字
currentThread().ident 查看线程id号
enumerate() 返回目前正在运行的线程列表
activeCount() 返回目前正在运行的线程数量

def func():
	# time.sleep(0.1)
	pass
	
t = Thread(target=func)
t.start()
print(t.is_alive())
print(t.getName())
t.setName("wangwen")
print(t.getName())
time.sleep(2)
print(t.is_alive()) # False
  • currentThread().ident 查看线程id号
from threading import currentThread

def func():
	print("子线程:",currentThread().ident)
t = Thread(target = func)
t.start()
print("主线程:",currentThread().ident)
  • enumerate() 返回目前正在运行的线程列表
from threading import enumerate
def func():
	print("子线程:",currentThread().ident)
	time.sleep(0.5)
	
for i in range(10):
	t = Thread(target = func)
	t.start()

time.sleep(3)
# 10个子线程 + 1个主线程 = 11个正在运行的线程
print(enumerate()) #[<_MainThread(MainThread, started 140247126484736)>]
print(len(enumerate())) 
  • activeCount() 返回目前正在运行的线程数量
from threading import activeCount
def func():
	print("子线程:",currentThread().ident)
	time.sleep(0.5)
	
for i in range(10):
	t = Thread(target = func)
	t.start()
print(activeCount())

守护线程

等待所有线程执行结束之后,在自动结束,守护所有线程.

from threading import Thread
import time
def func1():
	while True:
		time.sleep(0.5)
		print("我是守护线程")
	
def func2():
	print("func2 -> start")
	time.sleep(3)
	print("func2 -> end")
	
t1 = Thread(target=func1)
# setDaemon 将t1线程对象变成守护线程
t1.setDaemon(True)
t1.start()

t2 = Thread(target=func2)
t2.start()

# time.sleep(5)

# t2.join()
print("主线程执行结束")

线程的数据安全

from threading import Thread,Lock
import time
n = 0
def func1(lock):
	global n
	# time.sleep(0.3)
	# print(11)
	for i in range(1000000):
		# 正常上锁解锁
		lock.acquire()
		# print(n)
		n-=1
		lock.release()

def func2(lock):
	global n
	# time.sleep(0.3)
	# print(22)
	for i in range(1000000):
		# 用with自动上锁解锁
		with lock:
			# print(n)
			n+=1
		
if __name__ == "__main__":
	# 创建一个锁
	lock = Lock()
	lst = []
	for i in range(10):
		t1 = Thread(target=func1,args=(lock,))
		t2 = Thread(target=func2,args=(lock,))
		
		t1.start()
		t2.start()
		
		lst.append(t1)
		lst.append(t2)
	
	for i in lst:
		i.join()
	
	print("主线程执行结束...")
	print(n)

Semaphore 信号量(线程)

from threading import Semaphore,Thread
import time,random

def func(i,sem):
	# with简写:
	with sem:
		print(i)
		time.sleep(random.uniform(0.1,1))
	"""
	# 正常写法:
	sem.acquire()
	print(i)
	time.sleep(random.uniform(0.1,1))
	sem.release()
	"""
	
if __name__ == "__main__":
	sem = Semaphore(5)
	for i in range(20):
		Thread(target=func,args=(i,sem)).start()

死锁,递归锁,互斥锁

from threading import Thread,Lock
import time
noodle_lock = Lock()
kuaizi_lock = Lock()

def eat1(name):
	noodle_lock.acquire()
	print("%s拿到面条" % (name))
	kuaizi_lock.acquire()
	print("%s拿到筷子" % (name))
	
	print("开始吃")
	time.sleep(0.7)
	
	kuaizi_lock.release()
	print("%s放下筷子" % (name))
	noodle_lock.release()
	print("%s放下面条" % (name))
	
def eat2(name):
	kuaizi_lock.acquire()
	print("%s拿到筷子" % (name))
	noodle_lock.acquire()
	print("%s拿到面条" % (name))

	print("开始吃")
	time.sleep(0.7)
	
	noodle_lock.release()
	print("%s放下面条" % (name))
	kuaizi_lock.release()
	print("%s放下筷子" % (name))

if __name__ == "__main__":
	name_list1 = ["马具强","熊卫华"]
	name_list2 = ["黄熊大","黄将用"]
	for name in  name_list1:
		Thread(target=eat1,args=(name,)).start()
		
	for name in name_list2:
		Thread(target=eat2,args=(name,)).start()
递归锁

递归锁专门用来解决死锁现象
临时用于快速解决服务器崩溃异常现象,用递归锁应急.
解决应急问题的.

  • 基本语法

递归锁如果3个,就对应释放3个锁.忽略上锁过程,进行解锁.

from threading import Thread,RLock

rlock = RLock()
def func(name):
	rlock.acquire()
	print(name,1)
	rlock.acquire()
	print(name,2)
	rlock.acquire()
	print(name,3)
	
	rlock.release()
	rlock.release()
	rlock.release()
lst = []
for i in range(10):
	t1 = Thread(target=func,args=("name%s" % (i) , ))
	t1.start()
	lst.append(t1)
	
for i in lst:
	i.join()
	
print("程序结束了")
  • 用递归锁应急解决死锁现象;
noodle_lock=kuaizi_lock = RLock()
def eat1(name):
	noodle_lock.acquire()
	print("%s拿到面条" % (name))
	kuaizi_lock.acquire()
	print("%s拿到筷子" % (name))
	
	print("开始吃")
	time.sleep(0.7)
	
	kuaizi_lock.release()
	print("%s放下筷子" % (name))
	noodle_lock.release()
	print("%s放下面条" % (name))
	
def eat2(name):
	kuaizi_lock.acquire()
	print("%s拿到筷子" % (name))
	noodle_lock.acquire()
	print("%s拿到面条" % (name))

	print("开始吃")
	time.sleep(0.7)
	
	noodle_lock.release()
	print("%s放下面条" % (name))
	kuaizi_lock.release()
	print("%s放下筷子" % (name))

if __name__ == "__main__":
	name_list1 = ["马具强","熊卫华"]
	name_list2 = ["黄熊大","黄将用"]
	for name in  name_list1:
		Thread(target=eat1,args=(name,)).start()
		
	for name in name_list2:
		Thread(target=eat2,args=(name,)).start()
互斥锁

从语法上来看,锁是可以互相嵌套的,但是不要使用
上一次锁,就对应解开一把锁,形成互斥锁
吃面条和拿筷子是同时的,上一次锁就够了,不要分别上锁.
尽量不要形成锁的嵌套,容易死锁

mylock = Lock()
def eat1(name):
	mylock.acquire()
	print("%s拿到面条" % (name))
	print("%s拿到筷子" % (name))
	
	print("开始吃")
	time.sleep(0.7)	

	print("%s放下筷子" % (name))	
	print("%s放下面条" % (name))
	mylock.release()
	
def eat2(name):
	mylock.acquire()
	print("%s拿到筷子" % (name))
	print("%s拿到面条" % (name))

	print("开始吃")
	time.sleep(0.7)	

	print("%s放下面条" % (name))	
	print("%s放下筷子" % (name))
	mylock.release()

if __name__ == "__main__":
	name_list1 = ["马具强","熊卫华"]
	name_list2 = ["黄熊大","黄将用"]
	for name in  name_list1:
		Thread(target=eat1,args=(name,)).start()
		
	for name in name_list2:
		Thread(target=eat2,args=(name,)).start()
  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Python线程编程中,(Lock)和可重入(RLock)都是常用的同步机制,用于保护共享资源,防止多个线程同时访问导致数据错误。 Lock是一种最基本的,它将资源住,直到被释放。当一个线程获得时,其他线程必须等待该线程释放后才能获得。这种是不可重入的,即同一个线程不能重复获得同一把。 RLock是可重入,它允许一个线程多次获得同一把。当一个线程获得时,它可以再次获得这个而不会被阻塞。只有该线程释放的次数与获得的次数相等时,其他线程才能获得该。可重入在需要多次获得同一把的场景中很有用。 下面是使用Lock和RLock的示例代码: ```python import threading # 创建一个Lock对象 lock = threading.Lock() # 创建一个RLock对象 rlock = threading.RLock() # 使用Lock保护共享资源 class Counter(object): def __init__(self): self.value = 0 def increment(self): lock.acquire() try: self.value += 1 finally: lock.release() # 使用RLock保护共享资源 class ReentrantCounter(object): def __init__(self): self.value = 0 def increment(self): rlock.acquire() try: self.value += 1 # 再次获得 rlock.acquire() try: self.value += 1 finally: rlock.release() finally: rlock.release() ``` 在上面的代码中,Counter类使用Lock保护value属性,而ReentrantCounter类使用RLock保护value属性。在increment方法中,Counter使用lock.acquire()和lock.release()获取和释放,在同一时间只允许一个线程访问value属性。而ReentrantCounter使用rlock.acquire()和rlock.release()获取和释放,并且在方法内部重复获得,这是RLock的特性。 需要注意的是,使用时要避免死的情况发生,即多个线程相互等待对方释放的情况。因此,在编写代码时要考虑好的获取和释放顺序,以避免死的发生。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值