webrtc-m79-音视频同步

1 参考链接

https://blog.csdn.net/sonysuqin/article/details/107297157

https://www.cnblogs.com/jiayayao/p/12649665.html

https://www.jianshu.com/p/3a4d24a71091?hmsr=toutiao.io&utm_medium=toutiao.io&utm_source=toutiao.io

2 相关代码

音视频同步的基本对象是AudioReceiveStream和VideoReceiveStream,两者都继承自Syncable;

2.1 同步前相关的初始化操作

namespace webrtc {

class Syncable {
 public:
  struct Info {
    int64_t latest_receive_time_ms = 0;
    uint32_t latest_received_capture_timestamp = 0;
    uint32_t capture_time_ntp_secs = 0;
    uint32_t capture_time_ntp_frac = 0;
    uint32_t capture_time_source_clock = 0;
    int current_delay_ms = 0;
  };

  virtual ~Syncable();

  virtual int id() const = 0;
  virtual absl::optional<Info> GetInfo() const = 0;
  virtual uint32_t GetPlayoutTimestamp() const = 0;
  virtual void SetMinimumPlayoutDelay(int delay_ms) = 0;
};
}  // namespace webrtc

 

创建 VideoReceiveStream 并进行相应设置:

webrtc::VideoReceiveStream* Call::CreateVideoReceiveStream(
    webrtc::VideoReceiveStream::Config configuration) {
  TRACE_EVENT0("webrtc", "Call::CreateVideoReceiveStream");
  RTC_DCHECK_RUN_ON(&configuration_sequence_checker_);

  receive_side_cc_.SetSendPeriodicFeedback(
      SendPeriodicFeedback(configuration.rtp.extensions));

  RegisterRateObserver();

  VideoReceiveStream* receive_stream = new VideoReceiveStream(
      task_queue_factory_, &video_receiver_controller_, num_cpu_cores_,
      transport_send_ptr_->packet_router(), std::move(configuration),
      module_process_thread_.get(), call_stats_.get(), clock_);

  const webrtc::VideoReceiveStream::Config& config = receive_stream->config();
  {
    WriteLockScoped write_lock(*receive_crit_);
    if (config.rtp.rtx_ssrc) {
      // We record identical config for the rtx stream as for the main
      // stream. Since the transport_send_cc negotiation is per payload
      // type, we may get an incorrect value for the rtx stream, but
      // that is unlikely to matter in practice.
      receive_rtp_config_.emplace(config.rtp.rtx_ssrc,
                                  ReceiveRtpConfig(config));
    }
    receive_rtp_config_.emplace(config.rtp.remote_ssrc,
                                ReceiveRtpConfig(config));
    video_receive_streams_.insert(receive_stream);
    ConfigureSync(config.sync_group); // 音视频同步
  }
  receive_stream->SignalNetworkState(video_network_state_);
  UpdateAggregateNetworkState();
  event_log_->Log(std::make_unique<RtcEventVideoReceiveStreamConfig>(
      CreateRtcLogStreamConfig(config)));
  return receive_stream;
}


void Call::ConfigureSync(const std::string& sync_group) {
  // Set sync only if there was no previous one.
  if (sync_group.empty())
    return;

  AudioReceiveStream* sync_audio_stream = nullptr;
  // Find existing audio stream.
  const auto it = sync_stream_mapping_.find(sync_group);
  if (it != sync_stream_mapping_.end()) {
    sync_audio_stream = it->second;
  } else {
    // No configured audio stream, see if we can find one.
    for (AudioReceiveStream* stream : audio_receive_streams_) {
      if (stream->config().sync_group == sync_group) {
        if (sync_audio_stream != nullptr) {
          RTC_LOG(LS_WARNING)
              << "Attempting to sync more than one audio stream "
                 "within the same sync group. This is not "
                 "supported in the current implementation.";
          break;
        }
        sync_audio_stream = stream;
      }
    }
  }
  if (sync_audio_stream)
    sync_stream_mapping_[sync_group] = sync_audio_stream;
  size_t num_synced_streams = 0;
  for (VideoReceiveStream* video_stream : video_receive_streams_) {
    if (video_stream->config().sync_group != sync_group)
      continue;
    ++num_synced_streams;
    if (num_synced_streams > 1) {
      // TODO(pbos): Support synchronizing more than one A/V pair.
      // https://code.google.com/p/webrtc/issues/detail?id=4762
      RTC_LOG(LS_WARNING)
          << "Attempting to sync more than one audio/video pair "
             "within the same sync group. This is not supported in "
             "the current implementation.";
    }
    // Only sync the first A/V pair within this sync group.
    if (num_synced_streams == 1) {
      // sync_audio_stream may be null and that's ok.
      video_stream->SetSync(sync_audio_stream); // 将 sync_audio_stream 和 video_stream 进行绑定以用来进行音视频的同步
    } else {
      video_stream->SetSync(nullptr);
    }
  }
}


void VideoReceiveStream::SetSync(Syncable* audio_syncable) {
  RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
  rtp_stream_sync_.ConfigureSync(audio_syncable); // RtpStreamsSynchronizer::ConfigureSync
}


												VideoReceiveStream::VideoReceiveStream(
												    TaskQueueFactory* task_queue_factory,
												    RtpStreamReceiverControllerInterface* receiver_controller,
												    int num_cpu_cores,
												    PacketRouter* packet_router,
												    VideoReceiveStream::Config config,
												    ProcessThread* process_thread, // process_thread 就是在 Call* Call::Create(const Call::Config& config) 中创建的 ProcessThread::Create("ModuleProcessThread")
												    CallStats* call_stats,
												    Clock* clock,
												    VCMTiming* timing)
												    : task_queue_factory_(task_queue_factory),
												      transport_adapter_(config.rtcp_send_transport),
												      config_(std::move(config)),
												      num_cpu_cores_(num_cpu_cores),
												      process_thread_(process_thread), // 
												      clock_(clock),
												      call_stats_(call_stats),
												      source_tracker_(clock_),
												      stats_proxy_(&config_, clock_),
												      rtp_receive_statistics_(ReceiveStatistics::Create(clock_)),
												      timing_(timing),
												      video_receiver_(clock_, timing_.get()),
												      rtp_video_stream_receiver_(clock_,
												                                 &transport_adapter_,
												                                 call_stats,
												                                 packet_router,
												                                 &config_,
												                                 rtp_receive_statistics_.get(),
												                                 &stats_proxy_,
												                                 process_thread_,
												                                 this,     // NackSender
												                                 nullptr,  // Use default KeyFrameRequestSender
												                                 this,     // OnCompleteFrameCallback
												                                 config_.frame_decryptor),
												      rtp_stream_sync_(this), // 将 webrtc::internal::VideoReceiveStream 的 this 指针传递进去,也就是 RtpStreamsSynchronizer 中的 syncable_video_ 实际指向的就是 VideoReceiveStream
															process_thread_->RegisterModule(&rtp_stream_sync_, RTC_FROM_HERE); // 将 RtpStreamsSynchronizer rtp_stream_sync_ 的地址注册到处理线程中






void RtpStreamsSynchronizer::ConfigureSync(Syncable* syncable_audio) {
  rtc::CritScope lock(&crit_);
  if (syncable_audio == syncable_audio_) {
    // This prevents expensive no-ops.
    return;
  }

  syncable_audio_ = syncable_audio; // syncable_audio_ 实际上指向的是 webrtc::internal::AudioReceiveStream
  sync_.reset(nullptr);
  if (syncable_audio_) {
    sync_.reset(new StreamSynchronization(syncable_video_->id(), // 
                                          syncable_audio_->id()));
  }
}

创建 AudioReceiveStream 并进行相应设置:

webrtc::AudioReceiveStream* Call::CreateAudioReceiveStream(
    const webrtc::AudioReceiveStream::Config& config) {
  TRACE_EVENT0("webrtc", "Call::CreateAudioReceiveStream");
  RTC_DCHECK_RUN_ON(&configuration_sequence_checker_);
  RegisterRateObserver();
  event_log_->Log(std::make_unique<RtcEventAudioReceiveStreamConfig>(
      CreateRtcLogStreamConfig(config)));
  AudioReceiveStream* receive_stream = new AudioReceiveStream( //
      clock_, &audio_receiver_controller_, transport_send_ptr_->packet_router(), // RtpStreamReceiverController audio_receiver_controller_;
      module_process_thread_.get(), config, config_.audio_state, event_log_);// config_.audio_stat 
  {
    WriteLockScoped write_lock(*receive_crit_);
    receive_rtp_config_.emplace(config.rtp.remote_ssrc,
                                ReceiveRtpConfig(config));
    audio_receive_streams_.insert(receive_stream);

    ConfigureSync(config.sync_group); // 音视频同步
  }
  {
    ReadLockScoped read_lock(*send_crit_);
    auto it = audio_send_ssrcs_.find(config.rtp.local_ssrc);
    if (it != audio_send_ssrcs_.end()) {
      receive_stream->AssociateSendStream(it->second);
    }
  }
  UpdateAggregateNetworkState();
  return receive_stream;
}



AudioReceiveStream::AudioReceiveStream(
    Clock* clock,
    RtpStreamReceiverControllerInterface* receiver_controller, // receiver_controller 就是 webrtc::internal::Call 中 audio_receiver_controller_的 地址
    PacketRouter* packet_router,
    ProcessThread* module_process_thread,
    const webrtc::AudioReceiveStream::Config& config,
    const rtc::scoped_refptr<webrtc::AudioState>& audio_state, // 
    webrtc::RtcEventLog* event_log)
    : AudioReceiveStream(clock,
                         receiver_controller,
                         packet_router,
                         config,
                         audio_state,
                         event_log,
                         CreateChannelReceive(clock, //
                                              audio_state.get(),
                                              module_process_thread,
                                              config,
                                              event_log)) {}
                                              

											// 缩进表示内部的创建细节
											std::unique_ptr<voe::ChannelReceiveInterface> CreateChannelReceive(
											    Clock* clock,
											    webrtc::AudioState* audio_state,
											    ProcessThread* module_process_thread,
											    const webrtc::AudioReceiveStream::Config& config,
											    RtcEventLog* event_log) {
											  RTC_DCHECK(audio_state);
											  internal::AudioState* internal_audio_state =
											      static_cast<internal::AudioState*>(audio_state);
											  return voe::CreateChannelReceive( //
											      clock, module_process_thread, internal_audio_state->audio_device_module(),
											      config.media_transport_config, config.rtcp_send_transport, event_log,// config.rtcp_send_transport 就是 WebRtcVoiceMediaChannel 的 this 指针,参考 WebRtcVoiceMediaChannel::WebRtcAudioReceiveStream
											      config.rtp.local_ssrc, config.rtp.remote_ssrc,
											      config.jitter_buffer_max_packets, config.jitter_buffer_fast_accelerate,
											      config.jitter_buffer_min_delay_ms,
											      config.jitter_buffer_enable_rtx_handling, config.decoder_factory, // config.decoder_factory 就是 CreateBuiltinAudioDecoderFactory() 的返回值
											      config.codec_pair_id, config.frame_decryptor, config.crypto_options);
											}
											
											
											std::unique_ptr<ChannelReceiveInterface> CreateChannelReceive(
											    Clock* clock,
											    ProcessThread* module_process_thread,
											    AudioDeviceModule* audio_device_module,
											    const MediaTransportConfig& media_transport_config,
											    Transport* rtcp_send_transport, // 
											    RtcEventLog* rtc_event_log,
											    uint32_t local_ssrc,
											    uint32_t remote_ssrc,
											    size_t jitter_buffer_max_packets,
											    bool jitter_buffer_fast_playout,
											    int jitter_buffer_min_delay_ms,
											    bool jitter_buffer_enable_rtx_handling,
											    rtc::scoped_refptr<AudioDecoderFactory> decoder_factory, // decoder_factory 就是 CreateBuiltinAudioDecoderFactory() 的返回值
											    absl::optional<AudioCodecPairId> codec_pair_id,
											    rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
											    const webrtc::CryptoOptions& crypto_options) {
											  return std::make_unique<ChannelReceive>( //
											      clock, module_process_thread, audio_device_module, media_transport_config,
											      rtcp_send_transport, rtc_event_log, local_ssrc, remote_ssrc, // 
											      jitter_buffer_max_packets, jitter_buffer_fast_playout,
											      jitter_buffer_min_delay_ms, jitter_buffer_enable_rtx_handling,
											      decoder_factory, codec_pair_id, frame_decryptor, crypto_options); // decoder_factory 就是 CreateBuiltinAudioDecoderFactory() 的返回值
											}
											
											
											
											// webrtc::voe::ChannelReceive
											// class ChannelReceiveInterface : public RtpPacketSinkInterface
											// class ChannelReceive : public ChannelReceiveInterface,
											                       public MediaTransportAudioSinkInterface
											===>
											acm2::AcmReceiver acm_receiver_;//
											
											
											ChannelReceive::ChannelReceive(
											    Clock* clock,
											    ProcessThread* module_process_thread,
											    AudioDeviceModule* audio_device_module,
											    const MediaTransportConfig& media_transport_config,
											    Transport* rtcp_send_transport, // 
											    RtcEventLog* rtc_event_log,
											    uint32_t local_ssrc,
											    uint32_t remote_ssrc,
											    size_t jitter_buffer_max_packets,
											    bool jitter_buffer_fast_playout,
											    int jitter_buffer_min_delay_ms,
											    bool jitter_buffer_enable_rtx_handling,
											    rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,// decoder_factory 就是 CreateBuiltinAudioDecoderFactory() 的返回值
											    absl::optional<AudioCodecPairId> codec_pair_id,
											    rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
											    const webrtc::CryptoOptions& crypto_options)
											    : event_log_(rtc_event_log),
											      rtp_receive_statistics_(ReceiveStatistics::Create(clock)),
											      remote_ssrc_(remote_ssrc),
											      acm_receiver_(AcmConfig(decoder_factory, // decoder_factory 就是 CreateBuiltinAudioDecoderFactory() 的返回值
											                              codec_pair_id,
											                              jitter_buffer_max_packets,
											                              jitter_buffer_fast_playout)),
											      _outputAudioLevel(),
											      ntp_estimator_(clock),
											      playout_timestamp_rtp_(0),
											      playout_delay_ms_(0),
											      rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()),
											      capture_start_rtp_time_stamp_(-1),
											      capture_start_ntp_time_ms_(-1),
											      _moduleProcessThreadPtr(module_process_thread),
											      _audioDeviceModulePtr(audio_device_module),
											      _outputGain(1.0f),
											      associated_send_channel_(nullptr),
											      media_transport_config_(media_transport_config),
											      frame_decryptor_(frame_decryptor),
											      crypto_options_(crypto_options) {
											  // TODO(nisse): Use _moduleProcessThreadPtr instead?
											  module_process_thread_checker_.Detach();
											
											  RTC_DCHECK(module_process_thread);
											  RTC_DCHECK(audio_device_module);
											
											  acm_receiver_.ResetInitialDelay();
											  acm_receiver_.SetMinimumDelay(0);
											  acm_receiver_.SetMaximumDelay(0);
											  acm_receiver_.FlushBuffers();
											
											  _outputAudioLevel.ResetLevelFullRange();
											
											  rtp_receive_statistics_->EnableRetransmitDetection(remote_ssrc_, true);
											  RtpRtcp::Configuration configuration;
											  configuration.clock = clock;
											  configuration.audio = true;
											  configuration.receiver_only = true;
											  configuration.outgoing_transport = rtcp_send_transport;
											  configuration.receive_statistics = rtp_receive_statistics_.get();
											  configuration.event_log = event_log_;
											  configuration.local_media_ssrc = local_ssrc;
											
											  _rtpRtcpModule = RtpRtcp::Create(configuration);
											  _rtpRtcpModule->SetSendingMediaStatus(false);
											  _rtpRtcpModule->SetRemoteSSRC(remote_ssrc_);
											
											  _moduleProcessThreadPtr->RegisterModule(_rtpRtcpModule.get(), RTC_FROM_HERE);
											
											  // Ensure that RTCP is enabled for the created channel.
											  _rtpRtcpModule->SetRTCPStatus(RtcpMode::kCompound);
											
											  if (media_transport()) {
											    media_transport()->SetReceiveAudioSink(this);
											  }
											}
											
											
											AudioCodingModule::Config AcmConfig(
											    rtc::scoped_refptr<AudioDecoderFactory> decoder_factory, // decoder_factory 就是 CreateBuiltinAudioDecoderFactory() 的返回值
											    absl::optional<AudioCodecPairId> codec_pair_id,
											    size_t jitter_buffer_max_packets,
											    bool jitter_buffer_fast_playout) {
											  AudioCodingModule::Config acm_config;
											  acm_config.decoder_factory = decoder_factory; // decoder_factory 就是 CreateBuiltinAudioDecoderFactory() 的返回值
											  acm_config.neteq_config.codec_pair_id = codec_pair_id;
											  acm_config.neteq_config.max_packets_in_buffer = jitter_buffer_max_packets;
											  acm_config.neteq_config.enable_fast_accelerate = jitter_buffer_fast_playout;
											  acm_config.neteq_config.enable_muted_state = true;
											
											  return acm_config;
											}
											
											
											AcmReceiver::AcmReceiver(const AudioCodingModule::Config& config)
											    : last_audio_buffer_(new int16_t[AudioFrame::kMaxDataSizeSamples]),
											      neteq_(NetEq::Create(config.neteq_config,
											                           config.clock,
											                           config.decoder_factory)), // config.decoder_factory 就是 CreateBuiltinAudioDecoderFactory() 的返回值
											      clock_(config.clock),
											      resampled_last_output_frame_(true) {
											  RTC_DCHECK(clock_);
											  memset(last_audio_buffer_.get(), 0,
											         sizeof(int16_t) * AudioFrame::kMaxDataSizeSamples);
											}
											
											
																					NetEq* NetEq::Create(
																					    const NetEq::Config& config,
																					    Clock* clock,
																					    const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory) { // decoder_factory 就是 CreateBuiltinAudioDecoderFactory() 的返回值
																					  return new NetEqImpl(config, //
																					                       NetEqImpl::Dependencies(config, clock, decoder_factory));
																					}


																					NetEqImpl::Dependencies::Dependencies(
																					    const NetEq::Config& config,
																					    Clock* clock,
																					    const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory)
																					    : clock(clock),
																					      tick_timer(new TickTimer),
																					      stats(new StatisticsCalculator),
																					      buffer_level_filter(new BufferLevelFilter),
																					      decoder_database(
																					          new DecoderDatabase(decoder_factory, config.codec_pair_id)), //
																					      delay_peak_detector(
																					          new DelayPeakDetector(tick_timer.get(), config.enable_rtx_handling)),
																					      delay_manager(DelayManager::Create(config.max_packets_in_buffer,
																					                                         config.min_delay_ms,
																					                                         config.enable_rtx_handling,
																					                                         delay_peak_detector.get(),
																					                                         tick_timer.get(),
																					                                         stats.get())),
																					      dtmf_buffer(new DtmfBuffer(config.sample_rate_hz)),
																					      dtmf_tone_generator(new DtmfToneGenerator),
																					      packet_buffer(
																					          new PacketBuffer(config.max_packets_in_buffer, tick_timer.get())),
																					      red_payload_splitter(new RedPayloadSplitter),
																					      timestamp_scaler(new TimestampScaler(*decoder_database)),
																					      accelerate_factory(new AccelerateFactory),
																					      expand_factory(new ExpandFactory),
																					      preemptive_expand_factory(new PreemptiveExpandFactory) {}



AudioReceiveStream::AudioReceiveStream(
    Clock* clock,
    RtpStreamReceiverControllerInterface* receiver_controller,//
    PacketRouter* packet_router,
    const webrtc::AudioReceiveStream::Config& config,
    const rtc::scoped_refptr<webrtc::AudioState>& audio_state, // 
    webrtc::RtcEventLog* event_log,
    std::unique_ptr<voe::ChannelReceiveInterface> channel_receive)// channel_receive 实际指向的是 webrtc::voe::ChannelReceive
    : audio_state_(audio_state),
      channel_receive_(std::move(channel_receive)), // move 后 channel_receive_ 实际上指向的也是 webrtc::voe::ChannelReceive
      source_tracker_(clock) {
  RTC_LOG(LS_INFO) << "AudioReceiveStream: " << config.rtp.remote_ssrc;
  RTC_DCHECK(config.decoder_factory);
  RTC_DCHECK(config.rtcp_send_transport);
  RTC_DCHECK(audio_state_);
  RTC_DCHECK(channel_receive_);

  module_process_thread_checker_.Detach();

  if (!config.media_transport_config.media_transport) {
    RTC_DCHECK(receiver_controller);
    RTC_DCHECK(packet_router);
    // Configure bandwidth estimation.
    channel_receive_->RegisterReceiverCongestionControlObjects(packet_router); // 

    // Register with transport.
    rtp_stream_receiver_ = receiver_controller->CreateReceiver( //RtpStreamReceiverController::CreateReceiver
        config.rtp.remote_ssrc, channel_receive_.get()); // channel_receive_ 实际上指向的也是 webrtc::voe::ChannelReceive
  } // rtp_stream_receiver_ 指向的是 webrtc::RtpStreamReceiverController::Receiver
  ConfigureStream(this, config, true); // 
}


// webrtc::voe::ChannelReceive
// class ChannelReceiveInterface : public RtpPacketSinkInterface
// class ChannelReceive : public ChannelReceiveInterface,
                       public MediaTransportAudioSinkInterface

// class webrtc::RtpStreamReceiverController::Receiver : public RtpStreamReceiverInterface                      
std::unique_ptr<RtpStreamReceiverInterface>
RtpStreamReceiverController::CreateReceiver(uint32_t ssrc,
                                            RtpPacketSinkInterface* sink) {
  return std::make_unique<Receiver>(this, ssrc, sink); // webrtc::RtpStreamReceiverController::Receiver
}

 

在名称为 ModuleProcessThread 的线程中进行音视频的同步:

std::unique_ptr<ProcessThread> ProcessThread::Create(const char* thread_name) {
  return std::unique_ptr<ProcessThread>(new ProcessThreadImpl(thread_name));
}


ProcessThreadImpl::ProcessThreadImpl(const char* thread_name)
    : stop_(false), thread_name_(thread_name) {}
    
class ProcessThreadImpl : public ProcessThread {
 public:
  explicit ProcessThreadImpl(const char* thread_name);
  ~ProcessThreadImpl() override;

  void Start() override;
  void Stop() override;

  void WakeUp(Module* module) override;
  void PostTask(std::unique_ptr<QueuedTask> task) override;

  void RegisterModule(Module* module, const rtc::Location& from) override;
  void DeRegisterModule(Module* module) override;

 protected:
  static void Run(void* obj);
  bool Process();

 private:
  struct ModuleCallback {
    ModuleCallback() = delete;
    ModuleCallback(ModuleCallback&& cb) = default;
    ModuleCallback(const ModuleCallback& cb) = default;
    ModuleCallback(Module* module, const rtc::Location& location)
        : module(module), location(location) {}
    bool operator==(const ModuleCallback& cb) const {
      return cb.module == module;
    }

    Module* const module;
    int64_t next_callback = 0;  // Absolute timestamp.
    const rtc::Location location;

   private:
    ModuleCallback& operator=(ModuleCallback&);
  };

  typedef std::list<ModuleCallback> ModuleList;

  // Warning: For some reason, if |lock_| comes immediately before |modules_|
  // with the current class layout, we will  start to have mysterious crashes
  // on Mac 10.9 debug.  I (Tommi) suspect we're hitting some obscure alignemnt
  // issues, but I haven't figured out what they are, if there are alignment
  // requirements for mutexes on Mac or if there's something else to it.
  // So be careful with changing the layout.
  rtc::CriticalSection lock_;  // Used to guard modules_, tasks_ and stop_.

  rtc::ThreadChecker thread_checker_;
  rtc::Event wake_up_;
  // TODO(pbos): Remove unique_ptr and stop recreating the thread.
  std::unique_ptr<rtc::PlatformThread> thread_; // 平台相关的线程

  ModuleList modules_;
  std::queue<QueuedTask*> queue_;
  bool stop_;
  const char* thread_name_;
};


void ProcessThreadImpl::Start() {
  RTC_DCHECK(thread_checker_.IsCurrent());
  RTC_DCHECK(!thread_.get());
  if (thread_.get())
    return;

  RTC_DCHECK(!stop_);

  for (ModuleCallback& m : modules_)
    m.module->ProcessThreadAttached(this);

  thread_.reset(
      new rtc::PlatformThread(&ProcessThreadImpl::Run, this, thread_name_));
  thread_->Start();
}


void ProcessThreadImpl::Run(void* obj) {
  ProcessThreadImpl* impl = static_cast<ProcessThreadImpl*>(obj);
  while (impl->Process()) {
  }
}

bool ProcessThreadImpl::Process() {
  TRACE_EVENT1("webrtc", "ProcessThreadImpl", "name", thread_name_);
  int64_t now = rtc::TimeMillis();
  int64_t next_checkpoint = now + (1000 * 60);

  {
    rtc::CritScope lock(&lock_);
    if (stop_)
      return false;
    for (ModuleCallback& m : modules_) {
      // TODO(tommi): Would be good to measure the time TimeUntilNextProcess
      // takes and dcheck if it takes too long (e.g. >=10ms).  Ideally this
      // operation should not require taking a lock, so querying all modules
      // should run in a matter of nanoseconds.
      if (m.next_callback == 0)
        m.next_callback = GetNextCallbackTime(m.module, now);

      if (m.next_callback <= now ||
          m.next_callback == kCallProcessImmediately) {
        {
          TRACE_EVENT2("webrtc", "ModuleProcess", "function",
                       m.location.function_name(), "file",
                       m.location.file_and_line());
          m.module->Process(); // 上面的设置:process_thread_->RegisterModule(&rtp_stream_sync_, RTC_FROM_HERE),rtp_stream_sync_ 是 RtpStreamsSynchronizer 类型
        } // 所以这里的会执行 RtpStreamsSynchronizer::Process
        // Use a new 'now' reference to calculate when the next callback
        // should occur.  We'll continue to use 'now' above for the baseline
        // of calculating how long we should wait, to reduce variance.
        int64_t new_now = rtc::TimeMillis();
        m.next_callback = GetNextCallbackTime(m.module, new_now);
      }

      if (m.next_callback < next_checkpoint)
        next_checkpoint = m.next_callback;
    }

    while (!queue_.empty()) {
      QueuedTask* task = queue_.front();
      queue_.pop();
      lock_.Leave();
      task->Run();
      delete task;
      lock_.Enter();
    }
  }

  int64_t time_to_wait = next_checkpoint - rtc::TimeMillis();
  if (time_to_wait > 0)
    wake_up_.Wait(static_cast<int>(time_to_wait));

  return true;
}




void RtpStreamsSynchronizer::Process() {
  RTC_DCHECK_RUN_ON(&process_thread_checker_);
  last_sync_time_ = rtc::TimeNanos();

  rtc::CritScope lock(&crit_);
  if (!syncable_audio_) {
    return;
  }
  RTC_DCHECK(sync_.get());

  absl::optional<Syncable::Info> audio_info = syncable_audio_->GetInfo(); // AudioReceiveStream::GetInfo
  if (!audio_info || !UpdateMeasurements(&audio_measurement_, *audio_info)) {
    return;
  }

  int64_t last_video_receive_ms = video_measurement_.latest_receive_time_ms;
  absl::optional<Syncable::Info> video_info = syncable_video_->GetInfo(); // VideoReceiveStream::GetInfo
  if (!video_info || !UpdateMeasurements(&video_measurement_, *video_info)) {
    return;
  }

  if (last_video_receive_ms == video_measurement_.latest_receive_time_ms) {
    // No new video packet has been received since last update.
    return;
  }

  int relative_delay_ms;
  // Calculate how much later or earlier the audio stream is compared to video.
  if (!sync_->ComputeRelativeDelay(audio_measurement_, video_measurement_, // StreamSynchronization::ComputeRelativeDelay
                                   &relative_delay_ms)) {
    return;
  }

  TRACE_COUNTER1("webrtc", "SyncCurrentVideoDelay",
                 video_info->current_delay_ms);
  TRACE_COUNTER1("webrtc", "SyncCurrentAudioDelay",
                 audio_info->current_delay_ms);
  TRACE_COUNTER1("webrtc", "SyncRelativeDelay", relative_delay_ms);
  int target_audio_delay_ms = 0;
  int target_video_delay_ms = video_info->current_delay_ms;
  // Calculate the necessary extra audio delay and desired total video
  // delay to get the streams in sync.
  if (!sync_->ComputeDelays(relative_delay_ms, audio_info->current_delay_ms, // StreamSynchronization::ComputeDelays
                            &target_audio_delay_ms, &target_video_delay_ms)) {
    return;
  }

  syncable_audio_->SetMinimumPlayoutDelay(target_audio_delay_ms); // AudioReceiveStream::SetMinimumPlayoutDelay
  syncable_video_->SetMinimumPlayoutDelay(target_video_delay_ms); // VideoReceiveStream::SetMinimumPlayoutDelay
}


// 计算最近一对音视频包的相对延迟
bool StreamSynchronization::ComputeRelativeDelay(
    const Measurements& audio_measurement,
    const Measurements& video_measurement,
    int* relative_delay_ms) {
  assert(relative_delay_ms);
  int64_t audio_last_capture_time_ms;
  if (!audio_measurement.rtp_to_ntp.Estimate(audio_measurement.latest_timestamp,
                                             &audio_last_capture_time_ms)) {
    return false;
  }
  int64_t video_last_capture_time_ms;
  if (!video_measurement.rtp_to_ntp.Estimate(video_measurement.latest_timestamp,
                                             &video_last_capture_time_ms)) {
    return false;
  }
  if (video_last_capture_time_ms < 0) {
    return false;
  }
  // Positive diff means that video_measurement is behind audio_measurement.
  *relative_delay_ms =
      video_measurement.latest_receive_time_ms -
      audio_measurement.latest_receive_time_ms -
      (video_last_capture_time_ms - audio_last_capture_time_ms);
  if (*relative_delay_ms > kMaxDeltaDelayMs ||
      *relative_delay_ms < -kMaxDeltaDelayMs) {
    return false;
  }
  return true;
}

// 音视频同步的具体逻辑
// 参考链接: https://blog.csdn.net/sonysuqin/article/details/107297157
bool StreamSynchronization::ComputeDelays(int relative_delay_ms,
                                          int current_audio_delay_ms,
                                          int* total_audio_delay_target_ms,
                                          int* total_video_delay_target_ms) {
  assert(total_audio_delay_target_ms && total_video_delay_target_ms);

  int current_video_delay_ms = *total_video_delay_target_ms;
  RTC_LOG(LS_VERBOSE) << "Audio delay: " << current_audio_delay_ms
                      << " current diff: " << relative_delay_ms
                      << " for stream " << audio_stream_id_;
  // Calculate the difference between the lowest possible video delay and
  // the current audio delay.
  int current_diff_ms =
      current_video_delay_ms - current_audio_delay_ms + relative_delay_ms;

  avg_diff_ms_ =
      ((kFilterLength - 1) * avg_diff_ms_ + current_diff_ms) / kFilterLength;
  if (abs(avg_diff_ms_) < kMinDeltaMs) {
    // Don't adjust if the diff is within our margin.
    return false;
  }

  // Make sure we don't move too fast.
  int diff_ms = avg_diff_ms_ / 2;
  diff_ms = std::min(diff_ms, kMaxChangeMs);
  diff_ms = std::max(diff_ms, -kMaxChangeMs);

  // Reset the average after a move to prevent overshooting reaction.
  avg_diff_ms_ = 0;

  if (diff_ms > 0) {
    // The minimum video delay is longer than the current audio delay.
    // We need to decrease extra video delay, or add extra audio delay.
    if (channel_delay_.extra_video_delay_ms > base_target_delay_ms_) {
      // We have extra delay added to ViE. Reduce this delay before adding
      // extra delay to VoE.
      channel_delay_.extra_video_delay_ms -= diff_ms;
      channel_delay_.extra_audio_delay_ms = base_target_delay_ms_;
    } else {  // channel_delay_.extra_video_delay_ms > 0
      // We have no extra video delay to remove, increase the audio delay.
      channel_delay_.extra_audio_delay_ms += diff_ms;
      channel_delay_.extra_video_delay_ms = base_target_delay_ms_;
    }
  } else {  // if (diff_ms > 0)
    // The video delay is lower than the current audio delay.
    // We need to decrease extra audio delay, or add extra video delay.
    if (channel_delay_.extra_audio_delay_ms > base_target_delay_ms_) {
      // We have extra delay in VoiceEngine.
      // Start with decreasing the voice delay.
      // Note: diff_ms is negative; add the negative difference.
      channel_delay_.extra_audio_delay_ms += diff_ms;
      channel_delay_.extra_video_delay_ms = base_target_delay_ms_;
    } else {  // channel_delay_.extra_audio_delay_ms > base_target_delay_ms_
      // We have no extra delay in VoiceEngine, increase the video delay.
      // Note: diff_ms is negative; subtract the negative difference.
      channel_delay_.extra_video_delay_ms -= diff_ms;  // X - (-Y) = X + Y.
      channel_delay_.extra_audio_delay_ms = base_target_delay_ms_;
    }
  }

  // Make sure that video is never below our target.
  channel_delay_.extra_video_delay_ms =
      std::max(channel_delay_.extra_video_delay_ms, base_target_delay_ms_);

  int new_video_delay_ms;
  if (channel_delay_.extra_video_delay_ms > base_target_delay_ms_) {
    new_video_delay_ms = channel_delay_.extra_video_delay_ms;
  } else {
    // No change to the extra video delay. We are changing audio and we only
    // allow to change one at the time.
    new_video_delay_ms = channel_delay_.last_video_delay_ms;
  }

  // Make sure that we don't go below the extra video delay.
  new_video_delay_ms =
      std::max(new_video_delay_ms, channel_delay_.extra_video_delay_ms);

  // Verify we don't go above the maximum allowed video delay.
  new_video_delay_ms =
      std::min(new_video_delay_ms, base_target_delay_ms_ + kMaxDeltaDelayMs);

  int new_audio_delay_ms;
  if (channel_delay_.extra_audio_delay_ms > base_target_delay_ms_) {
    new_audio_delay_ms = channel_delay_.extra_audio_delay_ms;
  } else {
    // No change to the audio delay. We are changing video and we only
    // allow to change one at the time.
    new_audio_delay_ms = channel_delay_.last_audio_delay_ms;
  }

  // Make sure that we don't go below the extra audio delay.
  new_audio_delay_ms =
      std::max(new_audio_delay_ms, channel_delay_.extra_audio_delay_ms);

  // Verify we don't go above the maximum allowed audio delay.
  new_audio_delay_ms =
      std::min(new_audio_delay_ms, base_target_delay_ms_ + kMaxDeltaDelayMs);

  // Remember our last audio and video delays.
  channel_delay_.last_video_delay_ms = new_video_delay_ms;
  channel_delay_.last_audio_delay_ms = new_audio_delay_ms;

  RTC_LOG(LS_VERBOSE) << "Sync video delay " << new_video_delay_ms
                      << " for video stream " << video_stream_id_
                      << " and audio delay "
                      << channel_delay_.extra_audio_delay_ms
                      << " for audio stream " << audio_stream_id_;

  // Return values.
  *total_video_delay_target_ms = new_video_delay_ms;
  *total_audio_delay_target_ms = new_audio_delay_ms;
  return true;
}

3 AudioReceiveStream::GetInfo 

absl::optional<Syncable::Info> AudioReceiveStream::GetInfo() const {
  RTC_DCHECK_RUN_ON(&module_process_thread_checker_);
  absl::optional<Syncable::Info> info = channel_receive_->GetSyncInfo(); // ChannelReceive::GetSyncInfo

  if (!info)
    return absl::nullopt;

  info->current_delay_ms = channel_receive_->GetDelayEstimate(); // ChannelReceive::GetDelayEstimate
  return info;
}


								ChannelReceive::ChannelReceive(
								    Clock* clock,
								    ProcessThread* module_process_thread, // 就是在 Call::Create 中通过 ProcessThread::Create("ModuleProcessThread") 创建的 thread
								    AudioDeviceModule* audio_device_module,
								    const MediaTransportConfig& media_transport_config,
								    Transport* rtcp_send_transport,
								    RtcEventLog* rtc_event_log,
								    uint32_t local_ssrc,
								    uint32_t remote_ssrc,
								    size_t jitter_buffer_max_packets,
								    bool jitter_buffer_fast_playout,
								    int jitter_buffer_min_delay_ms,
								    bool jitter_buffer_enable_rtx_handling,
								    rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
								    absl::optional<AudioCodecPairId> codec_pair_id,
								    rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
								    const webrtc::CryptoOptions& crypto_options)
								    : event_log_(rtc_event_log),
								      rtp_receive_statistics_(ReceiveStatistics::Create(clock)),
								      remote_ssrc_(remote_ssrc),
								      acm_receiver_(AcmConfig(decoder_factory,
								                              codec_pair_id,
								                              jitter_buffer_max_packets,
								                              jitter_buffer_fast_playout)),
								      _outputAudioLevel(),
								      ntp_estimator_(clock),
								      playout_timestamp_rtp_(0),
								      playout_delay_ms_(0),
								      rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()),
								      capture_start_rtp_time_stamp_(-1),
								      capture_start_ntp_time_ms_(-1),
								      _moduleProcessThreadPtr(module_process_thread), //module_process_thread 就是在 Call::Create 中通过 ProcessThread::Create("ModuleProcessThread") 创建的 thread
								      _audioDeviceModulePtr(audio_device_module),
								      _outputGain(1.0f),
								      associated_send_channel_(nullptr),
								      media_transport_config_(media_transport_config),
								      frame_decryptor_(frame_decryptor),
								      crypto_options_(crypto_options) {
								  // TODO(nisse): Use _moduleProcessThreadPtr instead?
								  module_process_thread_checker_.Detach();
								
								  RTC_DCHECK(module_process_thread);
								  RTC_DCHECK(audio_device_module);
								
								  acm_receiver_.ResetInitialDelay();
								  acm_receiver_.SetMinimumDelay(0);
								  acm_receiver_.SetMaximumDelay(0);
								  acm_receiver_.FlushBuffers();
								
								  _outputAudioLevel.ResetLevelFullRange();
								
								  rtp_receive_statistics_->EnableRetransmitDetection(remote_ssrc_, true);
								  RtpRtcp::Configuration configuration; // 
								  configuration.clock = clock;
								  configuration.audio = true;
								  configuration.receiver_only = true;
								  configuration.outgoing_transport = rtcp_send_transport;
								  configuration.receive_statistics = rtp_receive_statistics_.get();
								  configuration.event_log = event_log_;
								  configuration.local_media_ssrc = local_ssrc;
								
								  _rtpRtcpModule = RtpRtcp::Create(configuration); // RtpRtcp::Create 返回的是 webrtc::ModuleRtpRtcpImpl
								  _rtpRtcpModule->SetSendingMediaStatus(false);
								  _rtpRtcpModule->SetRemoteSSRC(remote_ssrc_);
								  
									// _moduleProcessThreadPtr 就是在 Call::Create 中通过 ProcessThread::Create("ModuleProcessThread") 创建的 thread
								  _moduleProcessThreadPtr->RegisterModule(_rtpRtcpModule.get(), RTC_FROM_HERE);// 注册 module ,这样就会在 ModuleProcessThread 线程中运行 ModuleRtpRtcpImpl::Process
								
								  // Ensure that RTCP is enabled for the created channel.
								  _rtpRtcpModule->SetRTCPStatus(RtcpMode::kCompound);
								
								  if (media_transport()) {
								    media_transport()->SetReceiveAudioSink(this);
								  }
								}
								
								
								std::unique_ptr<RtpRtcp> RtpRtcp::Create(const Configuration& configuration) {
								  RTC_DCHECK(configuration.clock);
								  return std::make_unique<ModuleRtpRtcpImpl>(configuration);
								}



absl::optional<Syncable::Info> ChannelReceive::GetSyncInfo() const {
  RTC_DCHECK(module_process_thread_checker_.IsCurrent());
  Syncable::Info info;
  if (_rtpRtcpModule->RemoteNTP(&info.capture_time_ntp_secs, // webrtc::ModuleRtpRtcpImpl::RemoteNTP
                                &info.capture_time_ntp_frac, nullptr, nullptr,
                                &info.capture_time_source_clock) != 0) {
    return absl::nullopt;
  }
  {
  	// last_received_rtp_timestamp_ 最近一次收到的RTP包中的时间戳,在 ChannelReceive::OnRtpPacket 中更新
  	// last_received_rtp_system_time_ms_ 最近一次收到RTP包时对应的本地时间,单位为ms,在 ChannelReceive::OnRtpPacket 中更新
    rtc::CritScope cs(&sync_info_lock_);
    if (!last_received_rtp_timestamp_ || !last_received_rtp_system_time_ms_) {
      return absl::nullopt;
    }
    info.latest_received_capture_timestamp = *last_received_rtp_timestamp_;
    info.latest_receive_time_ms = *last_received_rtp_system_time_ms_;
  }
  return info;//
}


										int32_t ModuleRtpRtcpImpl::RemoteNTP(uint32_t* received_ntpsecs,
										                                     uint32_t* received_ntpfrac,
										                                     uint32_t* rtcp_arrival_time_secs,
										                                     uint32_t* rtcp_arrival_time_frac,
										                                     uint32_t* rtcp_timestamp) const {
										  return rtcp_receiver_.NTP(received_ntpsecs, received_ntpfrac, // RTCPReceiver::NTP
										                            rtcp_arrival_time_secs, rtcp_arrival_time_frac,
										                            rtcp_timestamp)
										             ? 0
										             : -1;
										}
										
										// received_ntp_secs 用来保存最近一次接收到的SR包中的NTP时间中的单位为秒的部分
										// received_ntp_frac 用来保存最近一次接收到的SR包中的NTP时间中的单位为1/2^32秒的部分
										// rtcp_arrival_time_secs 用来保存接收到这个SR包的时候,本地时间的NTP表示中的单位为秒的部分
										// rtcp_arrival_time_frac 用来保存接收到这个SR包的时候,本地时间的NTP表示中的单位为1/2^32秒的部分
										// rtcp_timestamp 用来保存最近一次接收到的SR包中的 RTP 时间
										bool RTCPReceiver::NTP(uint32_t* received_ntp_secs,
										                       uint32_t* received_ntp_frac,
										                       uint32_t* rtcp_arrival_time_secs,
										                       uint32_t* rtcp_arrival_time_frac,
										                       uint32_t* rtcp_timestamp) const {
										  rtc::CritScope lock(&rtcp_receiver_lock_);
										  if (!last_received_sr_ntp_.Valid())
										    return false;
										
										  // NTP from incoming SenderReport.
										  if (received_ntp_secs)
										    *received_ntp_secs = remote_sender_ntp_time_.seconds();
										  if (received_ntp_frac)
										    *received_ntp_frac = remote_sender_ntp_time_.fractions();
										
										  // Rtp time from incoming SenderReport.
										  if (rtcp_timestamp)
										    *rtcp_timestamp = remote_sender_rtp_time_;
										
										  // Local NTP time when we received a RTCP packet with a send block.
										  if (rtcp_arrival_time_secs)
										    *rtcp_arrival_time_secs = last_received_sr_ntp_.seconds();
										  if (rtcp_arrival_time_frac)
										    *rtcp_arrival_time_frac = last_received_sr_ntp_.fractions();
										
										  return true;
										}



uint32_t ChannelReceive::GetDelayEstimate() const {
  RTC_DCHECK(worker_thread_checker_.IsCurrent() ||
             module_process_thread_checker_.IsCurrent());
  rtc::CritScope lock(&video_sync_lock_);
  return acm_receiver_.FilteredCurrentDelayMs() + playout_delay_ms_; // AcmReceiver::FilteredCurrentDelayMs
}


int AcmReceiver::FilteredCurrentDelayMs() const {
  return neteq_->FilteredCurrentDelayMs(); // NetEqImpl::FilteredCurrentDelayMs
}

int NetEqImpl::FilteredCurrentDelayMs() const {
  rtc::CritScope lock(&crit_sect_);
  // Sum up the filtered packet buffer level with the future length of the sync
  // buffer.
  const int delay_samples = buffer_level_filter_->filtered_current_level() +
                            sync_buffer_->FutureLength();
  // The division below will truncate. The return value is in ms.
  return delay_samples / rtc::CheckedDivExact(fs_hz_, 1000);
}

//更新度量信息
RtpStreamsSynchronizer::Process
===>
UpdateMeasurements(&audio_measurement_, *audio_info)


bool UpdateMeasurements(StreamSynchronization::Measurements* stream,
                        const Syncable::Info& info) {
  RTC_DCHECK(stream);
  stream->latest_timestamp = info.latest_received_capture_timestamp;
  stream->latest_receive_time_ms = info.latest_receive_time_ms;
  bool new_rtcp_sr = false;
  if (!stream->rtp_to_ntp.UpdateMeasurements( // RtpToNtpEstimator::UpdateMeasurements
          info.capture_time_ntp_secs, info.capture_time_ntp_frac,
          info.capture_time_source_clock, &new_rtcp_sr)) {
    return false;
  }
  return true;
}

// 由上面的调用可知:
// ntp_secs: 从接收端观测到的,最近一次接收到的SR包中的NTP时间中的单位为秒的部分
// ntp_frac: 从接收端观测到的,最近一次接收到的SR包中的NTP时间中的单位为1/2^32秒的部分
// rtp_timestamp: 从接收端观测到的,最近一次接收到的SR包中的 RTP 时间戳
bool RtpToNtpEstimator::UpdateMeasurements(uint32_t ntp_secs,
                                           uint32_t ntp_frac,
                                           uint32_t rtp_timestamp,
                                           bool* new_rtcp_sr) {
  *new_rtcp_sr = false;

  int64_t unwrapped_rtp_timestamp = unwrapper_.Unwrap(rtp_timestamp);

  RtcpMeasurement new_measurement(ntp_secs, ntp_frac, unwrapped_rtp_timestamp);

  if (Contains(measurements_, new_measurement)) {
    // RTCP SR report already added.
    return true;
  }

  if (!new_measurement.ntp_time.Valid())
    return false;

  int64_t ntp_ms_new = new_measurement.ntp_time.ToMs();
  bool invalid_sample = false;
  if (!measurements_.empty()) {
    int64_t old_rtp_timestamp = measurements_.front().unwrapped_rtp_timestamp;
    int64_t old_ntp_ms = measurements_.front().ntp_time.ToMs();
    if (ntp_ms_new <= old_ntp_ms ||
        ntp_ms_new > old_ntp_ms + kMaxAllowedRtcpNtpIntervalMs) {
      invalid_sample = true;
    } else if (unwrapped_rtp_timestamp <= old_rtp_timestamp) {
      RTC_LOG(LS_WARNING)
          << "Newer RTCP SR report with older RTP timestamp, dropping";
      invalid_sample = true;
    } else if (unwrapped_rtp_timestamp - old_rtp_timestamp > (1 << 25)) {
      // Sanity check. No jumps too far into the future in rtp.
      invalid_sample = true;
    }
  }

  if (invalid_sample) {
    ++consecutive_invalid_samples_;
    if (consecutive_invalid_samples_ < kMaxInvalidSamples) {
      return false;
    }
    RTC_LOG(LS_WARNING) << "Multiple consecutively invalid RTCP SR reports, "
                           "clearing measurements.";
    measurements_.clear();
    params_ = absl::nullopt;
  }
  consecutive_invalid_samples_ = 0;

  // Insert new RTCP SR report.
  if (measurements_.size() == kNumRtcpReportsToUse)
    measurements_.pop_back();

  measurements_.push_front(new_measurement);
  *new_rtcp_sr = true;

  // List updated, calculate new parameters.
  UpdateParameters();
  return true;
}

 

4 VideoReceiveStream::GetInfo 

absl::optional<Syncable::Info> VideoReceiveStream::GetInfo() const {
  RTC_DCHECK_RUN_ON(&module_process_sequence_checker_);
  absl::optional<Syncable::Info> info =
      rtp_video_stream_receiver_.GetSyncInfo(); // RtpVideoStreamReceiver::GetSyncInfo

  if (!info)
    return absl::nullopt;

  info->current_delay_ms = timing_->TargetVideoDelay(); // VCMTiming::TargetVideoDelay
  return info;
}


absl::optional<Syncable::Info> RtpVideoStreamReceiver::GetSyncInfo() const {
  Syncable::Info info;
  if (rtp_rtcp_->RemoteNTP(&info.capture_time_ntp_secs, // ModuleRtpRtcpImpl::RemoteNTP
                           &info.capture_time_ntp_frac, nullptr, nullptr,
                           &info.capture_time_source_clock) != 0) {
    return absl::nullopt;
  }
  {
 		// last_received_rtp_timestamp_ 最近一次收到的RTP包中的时间戳,在 RtpVideoStreamReceiver::OnRtpPacket 中更新
  	// last_received_rtp_system_time_ms_ 最近一次收到RTP包时对应的本地时间,单位为ms,在 RtpVideoStreamReceiver::OnRtpPacket 
    rtc::CritScope lock(&sync_info_lock_);
    if (!last_received_rtp_timestamp_ || !last_received_rtp_system_time_ms_) {
      return absl::nullopt;
    }
    info.latest_received_capture_timestamp = *last_received_rtp_timestamp_;
    info.latest_receive_time_ms = *last_received_rtp_system_time_ms_;
  }

  // Leaves info.current_delay_ms uninitialized.
  return info;
}



															int32_t ModuleRtpRtcpImpl::RemoteNTP(uint32_t* received_ntpsecs,
															                                     uint32_t* received_ntpfrac,
															                                     uint32_t* rtcp_arrival_time_secs,
															                                     uint32_t* rtcp_arrival_time_frac,
															                                     uint32_t* rtcp_timestamp) const {
															  return rtcp_receiver_.NTP(received_ntpsecs, received_ntpfrac, // RTCPReceiver::NTP
															                            rtcp_arrival_time_secs, rtcp_arrival_time_frac,
															                            rtcp_timestamp)
															             ? 0
															             : -1;
															}
															
															// received_ntp_secs 用来保存最近一次接收到的SR包中的NTP时间中的单位为秒的部分
															// received_ntp_frac 用来保存最近一次接收到的SR包中的NTP时间中的单位为1/2^32秒的部分
															// rtcp_arrival_time_secs 用来保存接收到这个SR包的时候,本地时间的NTP表示中的单位为秒的部分
															// rtcp_arrival_time_frac 用来保存接收到这个SR包的时候,本地时间的NTP表示中的单位为1/2^32秒的部分
															// rtcp_timestamp 用来保存最近一次接收到的SR包中的 RTP 时间
															bool RTCPReceiver::NTP(uint32_t* received_ntp_secs,
															                       uint32_t* received_ntp_frac,
															                       uint32_t* rtcp_arrival_time_secs,
															                       uint32_t* rtcp_arrival_time_frac,
															                       uint32_t* rtcp_timestamp) const {
															  rtc::CritScope lock(&rtcp_receiver_lock_);
															  if (!last_received_sr_ntp_.Valid())
															    return false;
															
															  // NTP from incoming SenderReport.
															  if (received_ntp_secs)
															    *received_ntp_secs = remote_sender_ntp_time_.seconds();
															  if (received_ntp_frac)
															    *received_ntp_frac = remote_sender_ntp_time_.fractions();
															
															  // Rtp time from incoming SenderReport.
															  if (rtcp_timestamp)
															    *rtcp_timestamp = remote_sender_rtp_time_;
															
															  // Local NTP time when we received a RTCP packet with a send block.
															  if (rtcp_arrival_time_secs)
															    *rtcp_arrival_time_secs = last_received_sr_ntp_.seconds();
															  if (rtcp_arrival_time_frac)
															    *rtcp_arrival_time_frac = last_received_sr_ntp_.fractions();
															
															  return true;
															}



int VCMTiming::TargetVideoDelay() const {
  rtc::CritScope cs(&crit_sect_);
  return TargetDelayInternal();
}


int VCMTiming::TargetDelayInternal() const {
  return std::max(min_playout_delay_ms_,
                  jitter_delay_ms_ + RequiredDecodeTimeMs() + render_delay_ms_);
}


//更新度量信息
RtpStreamsSynchronizer::Process
===>
UpdateMeasurements(&video_measurement_, *video_info)


bool UpdateMeasurements(StreamSynchronization::Measurements* stream,
                        const Syncable::Info& info) {
  RTC_DCHECK(stream);
  stream->latest_timestamp = info.latest_received_capture_timestamp;
  stream->latest_receive_time_ms = info.latest_receive_time_ms;
  bool new_rtcp_sr = false;
  if (!stream->rtp_to_ntp.UpdateMeasurements( // RtpToNtpEstimator::UpdateMeasurements
          info.capture_time_ntp_secs, info.capture_time_ntp_frac,
          info.capture_time_source_clock, &new_rtcp_sr)) {
    return false;
  }
  return true;
}


bool RtpToNtpEstimator::UpdateMeasurements(uint32_t ntp_secs,
                                           uint32_t ntp_frac,
                                           uint32_t rtp_timestamp,
                                           bool* new_rtcp_sr) {
  *new_rtcp_sr = false;

  int64_t unwrapped_rtp_timestamp = unwrapper_.Unwrap(rtp_timestamp);

  RtcpMeasurement new_measurement(ntp_secs, ntp_frac, unwrapped_rtp_timestamp);

  if (Contains(measurements_, new_measurement)) {
    // RTCP SR report already added.
    return true;
  }

  if (!new_measurement.ntp_time.Valid())
    return false;

  int64_t ntp_ms_new = new_measurement.ntp_time.ToMs();
  bool invalid_sample = false;
  if (!measurements_.empty()) {
    int64_t old_rtp_timestamp = measurements_.front().unwrapped_rtp_timestamp;
    int64_t old_ntp_ms = measurements_.front().ntp_time.ToMs();
    if (ntp_ms_new <= old_ntp_ms ||
        ntp_ms_new > old_ntp_ms + kMaxAllowedRtcpNtpIntervalMs) {
      invalid_sample = true;
    } else if (unwrapped_rtp_timestamp <= old_rtp_timestamp) {
      RTC_LOG(LS_WARNING)
          << "Newer RTCP SR report with older RTP timestamp, dropping";
      invalid_sample = true;
    } else if (unwrapped_rtp_timestamp - old_rtp_timestamp > (1 << 25)) {
      // Sanity check. No jumps too far into the future in rtp.
      invalid_sample = true;
    }
  }

  if (invalid_sample) {
    ++consecutive_invalid_samples_;
    if (consecutive_invalid_samples_ < kMaxInvalidSamples) {
      return false;
    }
    RTC_LOG(LS_WARNING) << "Multiple consecutively invalid RTCP SR reports, "
                           "clearing measurements.";
    measurements_.clear();
    params_ = absl::nullopt;
  }
  consecutive_invalid_samples_ = 0;

  // Insert new RTCP SR report.
  if (measurements_.size() == kNumRtcpReportsToUse)
    measurements_.pop_back();

  measurements_.push_front(new_measurement);
  *new_rtcp_sr = true;

  // List updated, calculate new parameters.
  UpdateParameters();
  return true;
}

5 相关类图

 

 

 

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值