creator owner是什么用户_并发系列-到底什么重量级锁?

本章概述

很多人对锁的理解大概都只是一个概念,那到底什么是重量级锁?不了解重量级锁怎么能看懂偏向锁、轻量级锁,1.6之后对重量级锁的优化并不是因为他重,主要还是因为业务场景,重量级锁存在的原因是因为他的互斥,如果没有这个互斥的场景自己就不需要重量级锁了。但是现在没有互斥并不代表将来没有互斥。也就引出了线程同步三种形式:单线程执行、线程交替执行、线程互斥执行。

所谓重量级锁就是通过os函数来实现的,过程由用户态转到内核态。那么可以说Java中的线程跟操作系统线程是对应的,而c++中锁的实现是由pthread_mutex_lock来完成的。接下来我就通过修改pthread_mutex_lock.c文件,编译覆盖操作系统中的原文件来说明这个问题。如果如果执行我们的Java方法调用到了我们修改后的pthread_mutex_lock函数,是不是就可以证明synchronized确实由os函数来实现的,那么也就证明了重量级锁的存在。

版本说明

CentOS-7-x86_64-DVD-1708.iso

glibc-2.19.tar.gz

因为pthread_mutex_lock.c文件在glibc包下,所以会用到,需要注意的是版本问题,最好跟我的一致比较好,否则可能会存在一些兼容性的问题而导致一些错误。

下面正式开始,带着概述中的问题往下看:

解压glibc

028699f2b99fbaba8981b34fbbe8d906.png

进入解压后的目录,修改glibc源码中的pthread_mutex_lock()中的源码

b7533742cabe2ba3b4cf3cfb1aa0c5d4.png

修改pthread_mutex_lock文件

两个注意的地方:

1、#include 引入头文件,相当于Java中导包

2、fprintf(stderr,"msg tid=%lu",pthread_self()); 当调用加锁就会打印

3fe82bfba7e0317f9dcdd82cdc0c806d.png

添加打印信息

/* 修改后的文件,供大家参考。    可能有点长,可直接往下划,精彩在后面!!!Copyright (C) 2002-2014 Free Software Foundation, Inc.   This file is part of the GNU C Library.   Contributed by Ulrich Drepper , 2002.   The GNU C Library is free software; you can redistribute it and/or   modify it under the terms of the GNU Lesser General Public   License as published by the Free Software Foundation; either   version 2.1 of the License, or (at your option) any later version.   The GNU C Library is distributed in the hope that it will be useful,   but WITHOUT ANY WARRANTY; without even the implied warranty of   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU   Lesser General Public License for more details.   You should have received a copy of the GNU Lesser General Public   License along with the GNU C Library; if not, see   .  */#include #include #include #include #include #include #include "pthreadP.h"#include #include #ifndef lll_lock_elision#define lll_lock_elision(lock, try_lock, private)({       lll_lock (lock, private); 0; })#endif#ifndef lll_trylock_elision#define lll_trylock_elision(a,t) lll_trylock(a)#endif#ifndef LLL_MUTEX_LOCK# define LLL_MUTEX_LOCK(mutex)   lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))# define LLL_MUTEX_TRYLOCK(mutex)   lll_trylock ((mutex)->__data.__lock)# define LLL_ROBUST_MUTEX_LOCK(mutex, id)   lll_robust_lock ((mutex)->__data.__lock, id,    PTHREAD_ROBUST_MUTEX_PSHARED (mutex))# define LLL_MUTEX_LOCK_ELISION(mutex)   lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision,    PTHREAD_MUTEX_PSHARED (mutex))# define LLL_MUTEX_TRYLOCK_ELISION(mutex)   lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision,    PTHREAD_MUTEX_PSHARED (mutex))#endif#ifndef FORCE_ELISION#define FORCE_ELISION(m, s)#endifstatic int __pthread_mutex_lock_full (pthread_mutex_t *mutex)     __attribute_noinline__;int__pthread_mutex_lock (mutex)     pthread_mutex_t *mutex;{  assert (sizeof (mutex->__size) >= sizeof (mutex->__data));  fprintf(stderr,"msg tid=%lu",pthread_self());  unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);  LIBC_PROBE (mutex_entry, 1, mutex);  if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP | PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))    return __pthread_mutex_lock_full (mutex);  if (__builtin_expect (type == PTHREAD_MUTEX_TIMED_NP, 1))    {      FORCE_ELISION (mutex, goto elision);    simple:      /* Normal mutex.  */      LLL_MUTEX_LOCK (mutex);      assert (mutex->__data.__owner == 0);    }#ifdef HAVE_ELISION  else if (__builtin_expect (type == PTHREAD_MUTEX_TIMED_ELISION_NP, 1))    {  elision: __attribute__((unused))      /* This case can never happen on a system without elision,         as the mutex type initialization functions will not allow to set the elision flags.  */      /* Don't record owner or users for elision case.  This is a         tail call.  */      return LLL_MUTEX_LOCK_ELISION (mutex);    }#endif  else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)     == PTHREAD_MUTEX_RECURSIVE_NP, 1))    {      /* Recursive mutex.  */      pid_t id = THREAD_GETMEM (THREAD_SELF, tid);      /* Check whether we already hold the mutex.  */      if (mutex->__data.__owner == id){  /* Just bump the counter.  */  if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))    /* Overflow of the counter.  */    return EAGAIN;  ++mutex->__data.__count;  return 0;}      /* We have to get the mutex.  */      LLL_MUTEX_LOCK (mutex);      assert (mutex->__data.__owner == 0);      mutex->__data.__count = 1;    }  else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)  == PTHREAD_MUTEX_ADAPTIVE_NP, 1))    {      if (! __is_smp)goto simple;      if (LLL_MUTEX_TRYLOCK (mutex) != 0){  int cnt = 0;  int max_cnt = MIN (MAX_ADAPTIVE_COUNT,     mutex->__data.__spins * 2 + 10);  do    {      if (cnt++ >= max_cnt){  LLL_MUTEX_LOCK (mutex);  break;}#ifdef BUSY_WAIT_NOP      BUSY_WAIT_NOP;#endif    }  while (LLL_MUTEX_TRYLOCK (mutex) != 0);  mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;}      assert (mutex->__data.__owner == 0);    }  else    {      pid_t id = THREAD_GETMEM (THREAD_SELF, tid);      assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP);      /* Check whether we already hold the mutex.  */      if (__builtin_expect (mutex->__data.__owner == id, 0))return EDEADLK;      goto simple;    }  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);  /* Record the ownership.  */  mutex->__data.__owner = id;#ifndef NO_INCR  ++mutex->__data.__nusers;#endif  LIBC_PROBE (mutex_acquired, 1, mutex);  return 0;}static int__pthread_mutex_lock_full (pthread_mutex_t *mutex){  int oldval;  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);  switch (PTHREAD_MUTEX_TYPE (mutex))    {    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,     &mutex->__data.__list.__next);      oldval = mutex->__data.__lock;      do{again:  if ((oldval & FUTEX_OWNER_DIED) != 0)    {      /* The previous owner died.  Try locking the mutex.  */      int newval = id;#ifdef NO_INCR      newval |= FUTEX_WAITERS;#else      newval |= (oldval & FUTEX_WAITERS);#endif      newval= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,       newval, oldval);      if (newval != oldval){  oldval = newval;  goto again;}      /* We got the mutex.  */      mutex->__data.__count = 1;      /* But it is inconsistent unless marked otherwise.  */      mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;      ENQUEUE_MUTEX (mutex);      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);      /* Note that we deliberately exit here.  If we fall through to the end of the function __nusers would be incremented which is not correct because the old owner has to be discounted.  If we are not supposed to increment __nusers we actually have to decrement it here.  */#ifdef NO_INCR      --mutex->__data.__nusers;#endif      return EOWNERDEAD;    }  /* Check whether we already hold the mutex.  */  if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))    {      int kind = PTHREAD_MUTEX_TYPE (mutex);      if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP){  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);  return EDEADLK;}      if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP){  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);  /* Just bump the counter.  */  if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))    /* Overflow of the counter.  */    return EAGAIN;  ++mutex->__data.__count;  return 0;}    }  oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);  if (__builtin_expect (mutex->__data.__owner== PTHREAD_MUTEX_NOTRECOVERABLE, 0))    {      /* This mutex is now not recoverable.  */      mutex->__data.__count = 0;      lll_unlock (mutex->__data.__lock,  PTHREAD_ROBUST_MUTEX_PSHARED (mutex));      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);      return ENOTRECOVERABLE;    }}      while ((oldval & FUTEX_OWNER_DIED) != 0);      mutex->__data.__count = 1;      ENQUEUE_MUTEX (mutex);      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);      break;    case PTHREAD_MUTEX_PI_RECURSIVE_NP:    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:    case PTHREAD_MUTEX_PI_NORMAL_NP:    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:      {int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;if (robust)  /* Note: robust PI futexes are signaled by setting bit 0.  */  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, (void *) (((uintptr_t) &mutex->__data.__list.__next)   | 1));oldval = mutex->__data.__lock;/* Check whether we already hold the mutex.  */if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))  {    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)      {THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);return EDEADLK;      }    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)      {THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);/* Just bump the counter.  */if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))  /* Overflow of the counter.  */  return EAGAIN;++mutex->__data.__count;return 0;      }  }int newval = id;#ifdef NO_INCRnewval |= FUTEX_WAITERS;#endifoldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,      newval, 0);if (oldval != 0)  {    /* The mutex is locked.  The kernel will now take care of       everything.  */    int private = (robust   ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)   : PTHREAD_MUTEX_PSHARED (mutex));    INTERNAL_SYSCALL_DECL (__err);    int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,      __lll_private_flag (FUTEX_LOCK_PI,  private), 1, 0);    if (INTERNAL_SYSCALL_ERROR_P (e, __err)&& (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH    || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))      {assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK|| (kind != PTHREAD_MUTEX_ERRORCHECK_NP    && kind != PTHREAD_MUTEX_RECURSIVE_NP));/* ESRCH can happen only for non-robust PI mutexes where   the owner of the lock died.  */assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);/* Delay the thread indefinitely.  */while (1)  pause_not_cancel ();      }    oldval = mutex->__data.__lock;    assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);  }if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))  {    atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);    /* We got the mutex.  */    mutex->__data.__count = 1;    /* But it is inconsistent unless marked otherwise.  */    mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;    ENQUEUE_MUTEX_PI (mutex);    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);    /* Note that we deliberately exit here.  If we fall       through to the end of the function __nusers would be       incremented which is not correct because the old owner       has to be discounted.  If we are not supposed to       increment __nusers we actually have to decrement it here.  */#ifdef NO_INCR    --mutex->__data.__nusers;#endif    return EOWNERDEAD;  }if (robust    && __builtin_expect (mutex->__data.__owner == PTHREAD_MUTEX_NOTRECOVERABLE, 0))  {    /* This mutex is now not recoverable.  */    mutex->__data.__count = 0;    INTERNAL_SYSCALL_DECL (__err);    INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,      __lll_private_flag (FUTEX_UNLOCK_PI,  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),      0, 0);    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);    return ENOTRECOVERABLE;  }mutex->__data.__count = 1;if (robust)  {    ENQUEUE_MUTEX_PI (mutex);    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);  }      }      break;    case PTHREAD_MUTEX_PP_RECURSIVE_NP:    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:    case PTHREAD_MUTEX_PP_NORMAL_NP:    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:      {int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;oldval = mutex->__data.__lock;/* Check whether we already hold the mutex.  */if (mutex->__data.__owner == id)  {    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)      return EDEADLK;    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)      {/* Just bump the counter.  */if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))  /* Overflow of the counter.  */  return EAGAIN;++mutex->__data.__count;return 0;      }  }int oldprio = -1, ceilval;do  {    int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)  >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;    if (__pthread_current_priority () > ceiling)      {if (oldprio != -1)  __pthread_tpp_change_priority (oldprio, -1);return EINVAL;      }    int retval = __pthread_tpp_change_priority (oldprio, ceiling);    if (retval)      return retval;    ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;    oldprio = ceiling;    oldval      = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,#ifdef NO_INCR     ceilval | 2,#else     ceilval | 1,#endif     ceilval);    if (oldval == ceilval)      break;    do      {oldval  = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, ceilval | 2, ceilval | 1);if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)  break;if (oldval != ceilval)  lll_futex_wait (&mutex->__data.__lock, ceilval | 2,  PTHREAD_MUTEX_PSHARED (mutex));      }    while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,ceilval | 2, ceilval)   != ceilval);  }while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);assert (mutex->__data.__owner == 0);mutex->__data.__count = 1;      }      break;    default:      /* Correct code cannot set any other type.  */      return EINVAL;    }  /* Record the ownership.  */  mutex->__data.__owner = id;#ifndef NO_INCR  ++mutex->__data.__nusers;#endif  LIBC_PROBE (mutex_acquired, 1, mutex);  return 0;}#ifndef __pthread_mutex_lockstrong_alias (__pthread_mutex_lock, pthread_mutex_lock)hidden_def (__pthread_mutex_lock)#endif#ifdef NO_INCRvoid__pthread_mutex_cond_lock_adjust (mutex)     pthread_mutex_t *mutex;{  assert ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0);  assert ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0);  assert ((mutex->__data.__kind & PTHREAD_MUTEX_PSHARED_BIT) == 0);  /* Record the ownership.  */  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);  mutex->__data.__owner = id;  if (mutex->__data.__kind == PTHREAD_MUTEX_PI_RECURSIVE_NP)    ++mutex->__data.__count;}#endif

修改完之后拖进去覆盖原文件即可,先看一下当前系统的glibc版本

2f34d3416c495b349cbcc082916f8b0f.png

然后就是创建编译目录,开始编译我们修改后的文件,编译后系统默认使用的目录是/usr/lib,我们这里直接覆盖就行

../configure --prefix=/usr --disable-profile --enable-add-ons --with-headers=/usr/include --with-binutils=/usr/bin
ecdb053da03aa68d9a3d3e0d35d33f0f.png

如果过程中没有出现任何error就说明是验证成功了,注意是验证当前版本是否可用,不是编译成功

0b908049d868acc2c9208b41b4635f39.png

然后直接make开始编译,这个过程比较漫长,我电脑上大概运行了12分钟

0628b868c5e22d39222d206419900998.png

最后执行一下make install,编译成功之后就大功告成了,现在再查看一下glibc版本现在就是2.19了

892bdb22480f04361d05dd2c3f7b51e6.png

直接运行一下java命令,已经打印出了我们添加的注释

7fd046d5dc2261e9b14cd2a547ee598f.png

问题分析

1、正常运行Java程序会这样打印,因为后台还有很多其他的线程在运行,比如gc线程等

msg tid=139972453586688msg tid=139972453586688msg tid=139972453586688msg tid=139972453586688msg tid=139972453586688msg tid=139972453586688msg tid=139972453586688msg tid=139972453586688msg tid=139972453586688msg tid=139972453586688msg tid=139972453586688msg tid=139972453586688msg tid=139972453586688

2、synchronized底层会调用os函数pthread_mutex_cond_lock来启动线程,所以我们Java中的线程跟linux中的线程是对应的,所以当msg tid打印出的线程id跟我们调用synchronized打印的线程id相同的话是不是就可以说明这个问题。但是Java中Thread.currentThread().getId();获取的线程id是jvm虚拟机生成线程id,所以我们需要通过在Java中定义的native函数来获取linux下的线程id。

3、那么当我们在Java中加锁的时候,同时打印出我们通过native打印的线程id,所以这两个线程id应该是成对打印的,这里申明一下:重量级锁是什么意思,就是他每次都会调用os函数,那么如果打印出来的效果像下面这样的话是不是就能说明重量级锁确实是存在的。

msg tid=139972135597824current tid:139972135597824-----msg tid=139972134545152msg tid=139972134545152msg tid=139972135597824current tid:139972135597824-----msg tid=139972134545152msg tid=139972134545152msg tid=139972135597824current tid:139972135597824-----msg tid=139972134545152msg tid=139972134545152msg tid=139972135597824current tid:139972135597824-----msg tid=139972134545152msg tid=139972134545152msg tid=139972135597824current tid:139972135597824-----

4、下面就来证明一下这个问题

测试代码

package org.xinhua.cbcloud.sync;public class Example4Start {    Object o= new Object();    static {        System.loadLibrary( "SyncThreadNative" );    }    public static void main(String[] args) {        System.out.println("xxxxxxxxxxxxxxxxxxxxxxxxxxxx");        Example4Start example4Start = new Example4Start();        example4Start.start();    }    public void start(){        Thread thread = new Thread(){            public void run() {                while (true){                    try {                        sync();                    } catch (InterruptedException e) {                    }                }            }        };        Thread thread2 = new Thread(){            @Override            public void run() {                while (true){                    try {                        sync();                    } catch (InterruptedException e) {                        e.printStackTrace();                    }                }            }        };        thread.setName("t1");        thread2.setName("t2");        thread.start();        thread2.start();    }    public native void tid();    public  void sync() throws InterruptedException {        synchronized(o) {            tid();        }    }}

上传测试代码到指定目录,然后javac 进行编译

43d4d2907f5fc9f54d2ccbb49ccfb1c9.png

生成.h头文件

javah org.xinhua.cbcloud.sync.Example4Start

注意:执行javac和javah的目录是不一样的

362e17323c91f4e12386025dbed5c2ad.png
0f1937b25bda5670428e3591a50e99ce.png

定义一个方法Java_Example4Start_tid来获取linux系统下的线程id

#include#include#include#include "org_xinhua_cbcloud_sync_Example4Start.h"JNIEXPORT void JNICALL Java_org_xinhua_cbcloud_sync_Example4Start_tid(JNIEnv *env, jobject c1){printf("current tid:%lu-----",pthread_self());usleep(700);} 

调用原理

1、System.loadLibrary( "SyncThreadNative" ); 在Java中加载类库。

2、将Java文件编译成 .h文件。

3、编写c文件,在c文件中引入上面的 .h文件,再调用Java文件中的native方法。

4、编译c文件生成对应的 .so类库文件。

5、把这个.so文件加入到path,这样java才能load到。

解析类,把这个getId.c编译成为一个动态链接库,这样在java代码里会被laod到内存

libSyncThreadNative这个命名需要注意libxx,xx就等于你java那边写的字符串

gcc -fPIC -I /usr/lib/jvm/java-1.8.0-openjdk/include -I /usr/lib/jvm/java-1.8.0-openjdk/include/linux -shared -o libSyncThreadNative.so getId.c

直接执行会找不到getId.c文件的中的Example4Start.h文件,所以在当前目录下生成一下,然后再执行上面的命令

e60eebba038656a52e0d08908656c327.png
19da3894217c4a36629cce1581e6541c.png

做完这一系列事情之后需要把这个.so文件加入到path,这样java才能load到

export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/lyfeifei
e665392efcf4c341ba2fdf0003f8b279.png

运行结果

717d1b6053b7b376e29ef67156dd4626.png

本章总结

测试代码连个线程是互斥执行的,存在竞争这里肯定是重量级锁,所以每次都会调用os函数就像运行结果打印的这样。感兴趣的可以自行实践验证一下其他锁的情况,本章涉及的面比较广所以建议收藏,要不想看的时候就找不到啦,绝对全网仅此一份,今天看不懂不要紧,总会有看懂的那一天。

转载请注明出处。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值