webrtc-m79-视频流接收过程

void SocketDispatcher::OnEvent(uint32_t ff, int err) {
#if defined(WEBRTC_USE_EPOLL)
  // Remember currently enabled events so we can combine multiple changes
  // into one update call later.
  // The signal handlers might re-enable events disabled here, so we can't
  // keep a list of events to disable at the end of the method. This list
  // would not be updated with the events enabled by the signal handlers.
  StartBatchedEventUpdates();
#endif
  // Make sure we deliver connect/accept first. Otherwise, consumers may see
  // something like a READ followed by a CONNECT, which would be odd.
  if ((ff & DE_CONNECT) != 0) {
    DisableEvents(DE_CONNECT);
    SignalConnectEvent(this); 
																																		                                              
  }
  if ((ff & DE_ACCEPT) != 0) {
    DisableEvents(DE_ACCEPT);
    SignalReadEvent(this); 
  } 
  if ((ff & DE_READ) != 0) {
    DisableEvents(DE_READ);
    SignalReadEvent(this);// 在 BasicPacketSocketFactory::CreateUdpSocket 中将 SocketDispatcher 的指针传递到了 AsyncUDPSocket ,
  }// 并在 AsyncUDPSocket 的构造函数中设置了 SocketDispatcher 的 SignalReadEvent 和 SignalWriteEvent ,分别将其绑定到了 AsyncUDPSocket::OnReadEvent AsyncUDPSocket::OnWriteEvent
  if ((ff & DE_WRITE) != 0) {
    DisableEvents(DE_WRITE);
    SignalWriteEvent(this);
  }
  if ((ff & DE_CLOSE) != 0) {
    // The socket is now dead to us, so stop checking it.
    SetEnabledEvents(0);
    SignalCloseEvent(this, err);
  }
#if defined(WEBRTC_USE_EPOLL)
  FinishBatchedEventUpdates();
#endif
}


void AsyncUDPSocket::OnReadEvent(AsyncSocket* socket) {
  RTC_DCHECK(socket_.get() == socket);

  SocketAddress remote_addr;
  int64_t timestamp;
  int len = socket_->RecvFrom(buf_, size_, &remote_addr, &timestamp); /
  if (len < 0) {
    // An error here typically means we got an ICMP error in response to our
    // send datagram, indicating the remote address was unreachable.
    // When doing ICE, this kind of thing will often happen.
    // TODO: Do something better like forwarding the error to the user.
    SocketAddress local_addr = socket_->GetLocalAddress();
    RTC_LOG(LS_INFO) << "AsyncUDPSocket[" << local_addr.ToSensitiveString()
                     << "] receive failed with error " << socket_->GetError();
    return;
  }

  // TODO: Make sure that we got all of the packet.
  // If we did not, then we should resize our buffer to be large enough.
  SignalReadPacket(this, buf_, static_cast<size_t>(len), remote_addr,
                   (timestamp > -1 ? timestamp : TimeMicros()));  AsyncUDPSocket 的 SignalReadPacket 事件来自其父类 AsyncPacketSocket  
} // AsyncPacketSocket 的 SignalReadPacket 事件是在 AllocationSequence::Init 中设置,将其绑定到了 AllocationSequence::OnReadPacket


void AllocationSequence::OnReadPacket(rtc::AsyncPacketSocket* socket,
                                      const char* data,
                                      size_t size,
                                      const rtc::SocketAddress& remote_addr,
                                      const int64_t& packet_time_us) {
  RTC_DCHECK(socket == udp_socket_.get());  udp_socket_ 是 AsyncPacketSocket 指针,且该指针指向 AsyncUDPSocket

  bool turn_port_found = false;

  // Try to find the TurnPort that matches the remote address. Note that the
  // message could be a STUN binding response if the TURN server is also used as
  // a STUN server. We don't want to parse every message here to check if it is
  // a STUN binding response, so we pass the message to TurnPort regardless of
  // the message type. The TurnPort will just ignore the message since it will
  // not find any request by transaction ID.
  for (auto* port : relay_ports_) {
    if (port->CanHandleIncomingPacketsFrom(remote_addr)) {
      if (port->HandleIncomingPacket(socket, data, size, remote_addr,
                                     packet_time_us)) {
        return;
      }
      turn_port_found = true;
    }
  }

  if (udp_port_) {  udp_port_ 是 UDPPort 指针(UDPPort内部包含 rtc::AsyncPacketSocket 指针,实际上指向 AsyncUDPSocket)
    const ServerAddresses& stun_servers = udp_port_->server_addresses();

    // Pass the packet to the UdpPort if there is no matching TurnPort, or if
    // the TURN server is also a STUN server.
    if (!turn_port_found ||
        stun_servers.find(remote_addr) != stun_servers.end()) {
      RTC_DCHECK(udp_port_->SharedSocket());
      udp_port_->HandleIncomingPacket(socket, data, size, remote_addr,
                                      packet_time_us); // UDPPort::HandleIncomingPacket
    }
  }
}



bool UDPPort::HandleIncomingPacket(rtc::AsyncPacketSocket* socket,
                                   const char* data,
                                   size_t size,
                                   const rtc::SocketAddress& remote_addr,
                                   int64_t packet_time_us) {
  // All packets given to UDP port will be consumed.
  OnReadPacket(socket, data, size, remote_addr, packet_time_us);  
  return true;
}


void UDPPort::OnReadPacket(rtc::AsyncPacketSocket* socket,
                           const char* data,
                           size_t size,
                           const rtc::SocketAddress& remote_addr,
                           const int64_t& packet_time_us) {
  RTC_DCHECK(socket == socket_);
  RTC_DCHECK(!remote_addr.IsUnresolvedIP());

  // Look for a response from the STUN server.
  // Even if the response doesn't match one of our outstanding requests, we
  // will eat it because it might be a response to a retransmitted packet, and
  // we already cleared the request when we got the first response.
  if (server_addresses_.find(remote_addr) != server_addresses_.end()) {
    requests_.CheckResponse(data, size);
    return;
  }

  if (Connection* conn = GetConnection(remote_addr)) {
    conn->OnReadPacket(data, size, packet_time_us); /// 
  } else {
    Port::OnReadPacket(data, size, remote_addr, PROTO_UDP);
  }
}


void Connection::OnReadPacket(const char* data,
                              size_t size,
                              int64_t packet_time_us) {
  std::unique_ptr<IceMessage> msg;
  std::string remote_ufrag;
  const rtc::SocketAddress& addr(remote_candidate_.address());
  if (!port_->GetStunMessage(data, size, addr, &msg, &remote_ufrag)) {
    // The packet did not parse as a valid STUN message
    // This is a data packet, pass it along.
    last_data_received_ = rtc::TimeMillis();
    UpdateReceiving(last_data_received_);
    recv_rate_tracker_.AddSamples(size);
    SignalReadPacket(this, data, size, packet_time_us);  P2PTransportChannel::AddConnection ===>其内   connection->SignalReadPacket.connect(this, &P2PTransportChannel::OnReadPacket); // 

    // If timed out sending writability checks, start up again
    if (!pruned_ && (write_state_ == STATE_WRITE_TIMEOUT)) {
      RTC_LOG(LS_WARNING)
          << "Received a data packet on a timed-out Connection. "
             "Resetting state to STATE_WRITE_INIT.";
      set_write_state(STATE_WRITE_INIT);
    }
  } else if (!msg) {
    // The packet was STUN, but failed a check and was handled internally.
  } else {
    // The packet is STUN and passed the Port checks.
    // Perform our own checks to ensure this packet is valid.
    // If this is a STUN request, then update the receiving bit and respond.
    // If this is a STUN response, then update the writable bit.
    // Log at LS_INFO if we receive a ping on an unwritable connection.
    rtc::LoggingSeverity sev = (!writable() ? rtc::LS_INFO : rtc::LS_VERBOSE);
    switch (msg->type()) {
      case STUN_BINDING_REQUEST:
        RTC_LOG_V(sev) << ToString() << ": Received STUN ping, id="
                       << rtc::hex_encode(msg->transaction_id());

        if (remote_ufrag == remote_candidate_.username()) {
          HandleBindingRequest(msg.get());
        } else {
          // The packet had the right local username, but the remote username
          // was not the right one for the remote address.
          RTC_LOG(LS_ERROR)
              << ToString()
              << ": Received STUN request with bad remote username "
              << remote_ufrag;
          port_->SendBindingErrorResponse(msg.get(), addr,
                                          STUN_ERROR_UNAUTHORIZED,
                                          STUN_ERROR_REASON_UNAUTHORIZED);
        }
        break;

      // Response from remote peer. Does it match request sent?
      // This doesn't just check, it makes callbacks if transaction
      // id's match.
      case STUN_BINDING_RESPONSE:
      case STUN_BINDING_ERROR_RESPONSE:
        if (msg->ValidateMessageIntegrity(data, size,
                                          remote_candidate().password())) {
          requests_.CheckResponse(msg.get());
        }
        // Otherwise silently discard the response message.
        break;

      // Remote end point sent an STUN indication instead of regular binding
      // request. In this case |last_ping_received_| will be updated but no
      // response will be sent.
      case STUN_BINDING_INDICATION:
        ReceivedPing(msg->transaction_id());
        break;

      default:
        RTC_NOTREACHED();
        break;
    }
  }
}


void P2PTransportChannel::OnReadPacket(Connection* connection,
                                       const char* data,
                                       size_t len,
                                       int64_t packet_time_us) {
  RTC_DCHECK_RUN_ON(network_thread_);

  // Do not deliver, if packet doesn't belong to the correct transport channel.
  if (!FindConnection(connection))
    return;

  // Let the client know of an incoming packet
  SignalReadPacket(this, data, len, packet_time_us, 0);  DtlsTransport::ConnectToIceTransport ===> ice_transport_->SignalReadPacket.connect(this, &DtlsTransport::OnReadPacket); //

  // May need to switch the sending connection based on the receiving media path
  // if this is the controlled side.
  if (ice_role_ == ICEROLE_CONTROLLED) {
    MaybeSwitchSelectedConnection(connection, "data received");
  }
}


void DtlsTransport::OnReadPacket(rtc::PacketTransportInternal* transport,
                                 const char* data,
                                 size_t size,
                                 const int64_t& packet_time_us,
                                 int flags) {
  RTC_DCHECK_RUN_ON(&thread_checker_);
  RTC_DCHECK(transport == ice_transport_);
  RTC_DCHECK(flags == 0);

  if (!dtls_active_) {
    // Not doing DTLS.
    SignalReadPacket(this, data, size, packet_time_us, 0);
    return;
  }

  switch (dtls_state()) {
    case DTLS_TRANSPORT_NEW:
      if (dtls_) {
        RTC_LOG(LS_INFO) << ToString()
                         << ": Packet received before DTLS started.";
      } else {
        RTC_LOG(LS_WARNING) << ToString()
                            << ": Packet received before we know if we are "
                               "doing DTLS or not.";
      }
      // Cache a client hello packet received before DTLS has actually started.
      if (IsDtlsClientHelloPacket(data, size)) {
        RTC_LOG(LS_INFO) << ToString()
                         << ": Caching DTLS ClientHello packet until DTLS is "
                            "started.";
        cached_client_hello_.SetData(data, size);
        // If we haven't started setting up DTLS yet (because we don't have a
        // remote fingerprint/role), we can use the client hello as a clue that
        // the peer has chosen the client role, and proceed with the handshake.
        // The fingerprint will be verified when it's set.
        if (!dtls_ && local_certificate_) {
          SetDtlsRole(rtc::SSL_SERVER);
          SetupDtls();
        }
      } else {
        RTC_LOG(LS_INFO) << ToString()
                         << ": Not a DTLS ClientHello packet; dropping.";
      }
      break;

    case DTLS_TRANSPORT_CONNECTING:
    case DTLS_TRANSPORT_CONNECTED:
      // We should only get DTLS or SRTP packets; STUN's already been demuxed.
      // Is this potentially a DTLS packet?
      if (IsDtlsPacket(data, size)) {
        if (!HandleDtlsPacket(data, size)) {
          RTC_LOG(LS_ERROR) << ToString() << ": Failed to handle DTLS packet.";
          return;
        }
      } else {
        // Not a DTLS packet; our handshake should be complete by now.
        if (dtls_state() != DTLS_TRANSPORT_CONNECTED) {
          RTC_LOG(LS_ERROR) << ToString()
                            << ": Received non-DTLS packet before DTLS "
                               "complete.";
          return;
        }

        // And it had better be a SRTP packet.
        if (!IsRtpPacket(data, size)) {
          RTC_LOG(LS_ERROR)
              << ToString() << ": Received unexpected non-DTLS packet.";
          return;
        }

        // Sanity check.
        RTC_DCHECK(!srtp_ciphers_.empty());

        // Signal this upwards as a bypass packet.
        SignalReadPacket(this, data, size, packet_time_us, PF_SRTP_BYPASS); /// RtpTransport::SetRtpPacketTransport ===> new_packet_transport->SignalReadPacket.connect(this, &RtpTransport::OnReadPacket); /// 
      } // 此时的 this 指针实际上指向的就是 webrtc::DtlsSrtpTransport
      break;
    case DTLS_TRANSPORT_FAILED:
    case DTLS_TRANSPORT_CLOSED:
      // This shouldn't be happening. Drop the packet.
      break;
  }
}


/ webrtc::DtlsSrtpTransport 的继承关系
class webrtc::RtpTransport : public webrtc::RtpTransportInternal
class webrtc::SrtpTransport : public webrtc::RtpTransport
class webrtc::DtlsSrtpTransport : public webrtc::SrtpTransport


void RtpTransport::OnReadPacket(rtc::PacketTransportInternal* transport,
                                const char* data,
                                size_t len,
                                const int64_t& packet_time_us,
                                int flags) {
  TRACE_EVENT0("webrtc", "RtpTransport::OnReadPacket");

  // When using RTCP multiplexing we might get RTCP packets on the RTP
  // transport. We check the RTP payload type to determine if it is RTCP.
  auto array_view = rtc::MakeArrayView(data, len);
  cricket::RtpPacketType packet_type = cricket::InferRtpPacketType(array_view);
  // Filter out the packet that is neither RTP nor RTCP.
  if (packet_type == cricket::RtpPacketType::kUnknown) {
    return;
  }

  // Protect ourselves against crazy data.
  if (!cricket::IsValidRtpPacketSize(packet_type, len)) {
    RTC_LOG(LS_ERROR) << "Dropping incoming "
                      << cricket::RtpPacketTypeToString(packet_type)
                      << " packet: wrong size=" << len;
    return;
  }

  rtc::CopyOnWriteBuffer packet(data, len);
  if (packet_type == cricket::RtpPacketType::kRtcp) {
    OnRtcpPacketReceived(std::move(packet), packet_time_us); 
  } else {
    OnRtpPacketReceived(std::move(packet), packet_time_us); / 多态 SrtpTransport::OnRtpPacketReceived
  }
}



void SrtpTransport::OnRtpPacketReceived(rtc::CopyOnWriteBuffer packet,
                                        int64_t packet_time_us) {
  if (!IsSrtpActive()) {
    RTC_LOG(LS_WARNING)
        << "Inactive SRTP transport received an RTP packet. Drop it.";
    return;
  }
  TRACE_EVENT0("webrtc", "SRTP Decode");
  char* data = packet.data<char>();
  int len = rtc::checked_cast<int>(packet.size());
  if (!UnprotectRtp(data, len, &len)) { 
    int seq_num = -1;
    uint32_t ssrc = 0;
    cricket::GetRtpSeqNum(data, len, &seq_num);
    cricket::GetRtpSsrc(data, len, &ssrc);

    // Limit the error logging to avoid excessive logs when there are lots of
    // bad packets.
    const int kFailureLogThrottleCount = 100;
    if (decryption_failure_count_ % kFailureLogThrottleCount == 0) {
      RTC_LOG(LS_ERROR) << "Failed to unprotect RTP packet: size=" << len
                        << ", seqnum=" << seq_num << ", SSRC=" << ssrc
                        << ", previous failure count: "
                        << decryption_failure_count_;
    }
    ++decryption_failure_count_;
    return;
  }
  packet.SetSize(len);
  DemuxPacket(std::move(packet), packet_time_us); 
}


void RtpTransport::DemuxPacket(rtc::CopyOnWriteBuffer packet,
                               int64_t packet_time_us) {
  webrtc::RtpPacketReceived parsed_packet(&header_extension_map_);
  if (!parsed_packet.Parse(std::move(packet))) { ///
    RTC_LOG(LS_ERROR)
        << "Failed to parse the incoming RTP packet before demuxing. Drop it.";
    return;
  }

  if (packet_time_us != -1) {
    parsed_packet.set_arrival_time_ms((packet_time_us + 500) / 1000);
  }
  if (!rtp_demuxer_.OnRtpPacket(parsed_packet)) { ///
    RTC_LOG(LS_WARNING) << "Failed to demux RTP packet: "
                        << RtpDemuxer::DescribePacket(parsed_packet);
  }
}


bool RtpDemuxer::OnRtpPacket(const RtpPacketReceived& packet) {
  RtpPacketSinkInterface* sink = ResolveSink(packet);
  if (sink != nullptr) {
    sink->OnRtpPacket(packet);  参考链接: https://blog.csdn.net/zhengbin6072/article/details/108411342
    return true;
  }
  return false;
}

void BaseChannel::OnRtpPacket(const webrtc::RtpPacketReceived& parsed_packet) {
  // Take packet time from the |parsed_packet|.
  // RtpPacketReceived.arrival_time_ms = (timestamp_us + 500) / 1000;
  int64_t packet_time_us = -1;
  if (parsed_packet.arrival_time_ms() > 0) {
    packet_time_us = parsed_packet.arrival_time_ms() * 1000;
  }

  if (!has_received_packet_) {
    has_received_packet_ = true;
    signaling_thread()->Post(RTC_FROM_HERE, this, MSG_FIRSTPACKETRECEIVED);
  }

  if (!srtp_active() && srtp_required_) {
    // Our session description indicates that SRTP is required, but we got a
    // packet before our SRTP filter is active. This means either that
    // a) we got SRTP packets before we received the SDES keys, in which case
    //    we can't decrypt it anyway, or
    // b) we got SRTP packets before DTLS completed on both the RTP and RTCP
    //    transports, so we haven't yet extracted keys, even if DTLS did
    //    complete on the transport that the packets are being sent on. It's
    //    really good practice to wait for both RTP and RTCP to be good to go
    //    before sending  media, to prevent weird failure modes, so it's fine
    //    for us to just eat packets here. This is all sidestepped if RTCP mux
    //    is used anyway.
    RTC_LOG(LS_WARNING) << "Can't process incoming RTP packet when "
                           "SRTP is inactive and crypto is required";
    return;
  }

  auto packet_buffer = parsed_packet.Buffer();

  invoker_.AsyncInvoke<void>(
      RTC_FROM_HERE, worker_thread_, [this, packet_buffer, packet_time_us] {
        RTC_DCHECK(worker_thread_->IsCurrent());
        media_channel_->OnPacketReceived(packet_buffer, packet_time_us); // 以视频为例
      });
}


void WebRtcVideoChannel::OnPacketReceived(rtc::CopyOnWriteBuffer packet,
                                          int64_t packet_time_us) {
  RTC_DCHECK_RUN_ON(&thread_checker_);
  const webrtc::PacketReceiver::DeliveryStatus delivery_result =
      call_->Receiver()->DeliverPacket(webrtc::MediaType::VIDEO, packet,
                                       packet_time_us);
  switch (delivery_result) {
    case webrtc::PacketReceiver::DELIVERY_OK:
      return;
    case webrtc::PacketReceiver::DELIVERY_PACKET_ERROR:
      return;
    case webrtc::PacketReceiver::DELIVERY_UNKNOWN_SSRC:
      break;
  }

  uint32_t ssrc = 0;
  if (!GetRtpSsrc(packet.cdata(), packet.size(), &ssrc)) {
    return;
  }

  if (unknown_ssrc_packet_buffer_) {
    unknown_ssrc_packet_buffer_->AddPacket(ssrc, packet_time_us, packet);
    return;
  }

  if (discard_unknown_ssrc_packets_) {
    return;
  }

  int payload_type = 0;
  if (!GetRtpPayloadType(packet.cdata(), packet.size(), &payload_type)) {
    return;
  }

  // See if this payload_type is registered as one that usually gets its own
  // SSRC (RTX) or at least is safe to drop either way (FEC). If it is, and
  // it wasn't handled above by DeliverPacket, that means we don't know what
  // stream it associates with, and we shouldn't ever create an implicit channel
  // for these.
  for (auto& codec : recv_codecs_) {
    if (payload_type == codec.rtx_payload_type ||
        payload_type == codec.ulpfec.red_rtx_payload_type ||
        payload_type == codec.ulpfec.ulpfec_payload_type) {
      return;
    }
  }
  if (payload_type == recv_flexfec_payload_type_) {
    return;
  }

  switch (unsignalled_ssrc_handler_->OnUnsignalledSsrc(this, ssrc)) {
    case UnsignalledSsrcHandler::kDropPacket:
      return;
    case UnsignalledSsrcHandler::kDeliverPacket:
      break;
  }

  if (call_->Receiver()->DeliverPacket(webrtc::MediaType::VIDEO, packet,
                                       packet_time_us) !=
      webrtc::PacketReceiver::DELIVERY_OK) { // 
    RTC_LOG(LS_WARNING) << "Failed to deliver RTP packet on re-delivery.";
    return;
  }
}


PacketReceiver::DeliveryStatus Call::DeliverRtp(MediaType media_type,
                                                rtc::CopyOnWriteBuffer packet,
                                                int64_t packet_time_us) {
  TRACE_EVENT0("webrtc", "Call::DeliverRtp");

  RtpPacketReceived parsed_packet;
  if (!parsed_packet.Parse(std::move(packet)))
    return DELIVERY_PACKET_ERROR;

  if (packet_time_us != -1) {
    if (receive_time_calculator_) {
      // Repair packet_time_us for clock resets by comparing a new read of
      // the same clock (TimeUTCMicros) to a monotonic clock reading.
      packet_time_us = receive_time_calculator_->ReconcileReceiveTimes(
          packet_time_us, rtc::TimeUTCMicros(), clock_->TimeInMicroseconds());
    }
    parsed_packet.set_arrival_time_ms((packet_time_us + 500) / 1000);
  } else {
    parsed_packet.set_arrival_time_ms(clock_->TimeInMilliseconds());
  }

  // We might get RTP keep-alive packets in accordance with RFC6263 section 4.6.
  // These are empty (zero length payload) RTP packets with an unsignaled
  // payload type.
  const bool is_keep_alive_packet = parsed_packet.payload_size() == 0;

  RTC_DCHECK(media_type == MediaType::AUDIO || media_type == MediaType::VIDEO ||
             is_keep_alive_packet);

  ReadLockScoped read_lock(*receive_crit_);
  auto it = receive_rtp_config_.find(parsed_packet.Ssrc());
  if (it == receive_rtp_config_.end()) {
    RTC_LOG(LS_ERROR) << "receive_rtp_config_ lookup failed for ssrc "
                      << parsed_packet.Ssrc();
    // Destruction of the receive stream, including deregistering from the
    // RtpDemuxer, is not protected by the |receive_crit_| lock. But
    // deregistering in the |receive_rtp_config_| map is protected by that lock.
    // So by not passing the packet on to demuxing in this case, we prevent
    // incoming packets to be passed on via the demuxer to a receive stream
    // which is being torned down.
    return DELIVERY_UNKNOWN_SSRC;
  }

  parsed_packet.IdentifyExtensions(it->second.extensions);

  NotifyBweOfReceivedPacket(parsed_packet, media_type);

  // RateCounters expect input parameter as int, save it as int,
  // instead of converting each time it is passed to RateCounter::Add below.
  int length = static_cast<int>(parsed_packet.size());
  if (media_type == MediaType::AUDIO) {
    if (audio_receiver_controller_.OnRtpPacket(parsed_packet)) { / 
      received_bytes_per_second_counter_.Add(length);
      received_audio_bytes_per_second_counter_.Add(length);
      event_log_->Log(
          std::make_unique<RtcEventRtpPacketIncoming>(parsed_packet));
      const int64_t arrival_time_ms = parsed_packet.arrival_time_ms();
      if (!first_received_rtp_audio_ms_) {
        first_received_rtp_audio_ms_.emplace(arrival_time_ms);
      }
      last_received_rtp_audio_ms_.emplace(arrival_time_ms);
      return DELIVERY_OK;
    }
  } else if (media_type == MediaType::VIDEO) {
    parsed_packet.set_payload_type_frequency(kVideoPayloadTypeFrequency);
    if (video_receiver_controller_.OnRtpPacket(parsed_packet)) { / 
      received_bytes_per_second_counter_.Add(length);
      received_video_bytes_per_second_counter_.Add(length);
      event_log_->Log(
          std::make_unique<RtcEventRtpPacketIncoming>(parsed_packet));
      const int64_t arrival_time_ms = parsed_packet.arrival_time_ms();
      if (!first_received_rtp_video_ms_) {
        first_received_rtp_video_ms_.emplace(arrival_time_ms);
      }
      last_received_rtp_video_ms_.emplace(arrival_time_ms);
      return DELIVERY_OK;
    }
  }
  return DELIVERY_UNKNOWN_SSRC;
}


                              webrtc::VideoReceiveStream* Call::CreateVideoReceiveStream(
                                  webrtc::VideoReceiveStream::Config configuration) {
                                TRACE_EVENT0("webrtc", "Call::CreateVideoReceiveStream");
                                RTC_DCHECK_RUN_ON(&configuration_sequence_checker_);

                                receive_side_cc_.SetSendPeriodicFeedback(
                                    SendPeriodicFeedback(configuration.rtp.extensions));

                                RegisterRateObserver();

                                VideoReceiveStream* receive_stream = new VideoReceiveStream(
                                    task_queue_factory_, &video_receiver_controller_, num_cpu_cores_, /// 将 video_receiver_controller_ 传递进去 
                                    transport_send_ptr_->packet_router(), std::move(configuration),
                                    module_process_thread_.get(), call_stats_.get(), clock_);

                                const webrtc::VideoReceiveStream::Config& config = receive_stream->config();
                                {
                                  WriteLockScoped write_lock(*receive_crit_);
                                  if (config.rtp.rtx_ssrc) {
                                    // We record identical config for the rtx stream as for the main
                                    // stream. Since the transport_send_cc negotiation is per payload
                                    // type, we may get an incorrect value for the rtx stream, but
                                    // that is unlikely to matter in practice.
                                    receive_rtp_config_.emplace(config.rtp.rtx_ssrc,
                                                                ReceiveRtpConfig(config));
                                  }
                                  receive_rtp_config_.emplace(config.rtp.remote_ssrc,
                                                              ReceiveRtpConfig(config));
                                  video_receive_streams_.insert(receive_stream);
                                  ConfigureSync(config.sync_group);
                                }
                                receive_stream->SignalNetworkState(video_network_state_);
                                UpdateAggregateNetworkState();
                                event_log_->Log(std::make_unique<RtcEventVideoReceiveStreamConfig>(
                                    CreateRtcLogStreamConfig(config)));
                                return receive_stream;
                              }



bool RtpStreamReceiverController::OnRtpPacket(const RtpPacketReceived& packet) {
  rtc::CritScope cs(&lock_);
  return demuxer_.OnRtpPacket(packet); /// 
}


bool RtpDemuxer::OnRtpPacket(const RtpPacketReceived& packet) {
  RtpPacketSinkInterface* sink = ResolveSink(packet);
  if (sink != nullptr) {
    sink->OnRtpPacket(packet); // 这里的 sink 实际上就是 webrtc::RtpVideoStreamReceiver this 指针
    return true;
  }
  return false;
}


                                          VideoReceiveStream::VideoReceiveStream
                                          ===>
                                              media_receiver_ = receiver_controller->CreateReceiver(
                                                  config_.rtp.remote_ssrc, &rtp_video_stream_receiver_); / rtp_video_stream_receiver_ 就是 webrtc::RtpVideoStreamReceiver 

                                          std::unique_ptr<RtpStreamReceiverInterface>
                                          RtpStreamReceiverController::CreateReceiver(uint32_t ssrc,
                                                                                      RtpPacketSinkInterface* sink) {
                                            return std::make_unique<Receiver>(this, ssrc, sink); / 
                                          }


                                          RtpStreamReceiverController::Receiver::Receiver(
                                              RtpStreamReceiverController* controller,
                                              uint32_t ssrc,
                                              RtpPacketSinkInterface* sink)
                                              : controller_(controller), sink_(sink) {
                                            const bool sink_added = controller_->AddSink(ssrc, sink_); /// sink 就是 webrtc::RtpVideoStreamReceiver 
                                            if (!sink_added) {
                                              RTC_LOG(LS_ERROR)
                                                  << "RtpStreamReceiverController::Receiver::Receiver: Sink "
                                                  << "could not be added for SSRC=" << ssrc << ".";
                                            }
                                          }


void RtpVideoStreamReceiver::OnRtpPacket(const RtpPacketReceived& packet) {
  RTC_DCHECK_RUN_ON(&worker_task_checker_);

  if (!receiving_) {
    return;
  }

  if (!packet.recovered()) {
    // TODO(nisse): Exclude out-of-order packets?
    int64_t now_ms = clock_->TimeInMilliseconds();
    {
      rtc::CritScope cs(&sync_info_lock_);
      last_received_rtp_timestamp_ = packet.Timestamp();
      last_received_rtp_system_time_ms_ = now_ms;
    }
    // Periodically log the RTP header of incoming packets.
    if (now_ms - last_packet_log_ms_ > kPacketLogIntervalMs) {
      rtc::StringBuilder ss;
      ss << "Packet received on SSRC: " << packet.Ssrc()
         << " with payload type: " << static_cast<int>(packet.PayloadType())
         << ", timestamp: " << packet.Timestamp()
         << ", sequence number: " << packet.SequenceNumber()
         << ", arrival time: " << packet.arrival_time_ms();
      int32_t time_offset;
      if (packet.GetExtension<TransmissionOffset>(&time_offset)) {
        ss << ", toffset: " << time_offset;
      }
      uint32_t send_time;
      if (packet.GetExtension<AbsoluteSendTime>(&send_time)) {
        ss << ", abs send time: " << send_time;
      }
      RTC_LOG(LS_INFO) << ss.str();
      last_packet_log_ms_ = now_ms;
    }
  }

  ReceivePacket(packet); 

  // Update receive statistics after ReceivePacket.
  // Receive statistics will be reset if the payload type changes (make sure
  // that the first packet is included in the stats).
  if (!packet.recovered()) {
    rtp_receive_statistics_->OnRtpPacket(packet);
  }

  for (RtpPacketSinkInterface* secondary_sink : secondary_sinks_) {
    secondary_sink->OnRtpPacket(packet); 
  }
}


void RtpVideoStreamReceiver::ReceivePacket(const RtpPacketReceived& packet) {
  if (packet.payload_size() == 0) {
    // Padding or keep-alive packet.
    // TODO(nisse): Could drop empty packets earlier, but need to figure out how
    // they should be counted in stats.
    NotifyReceiverOfEmptyPacket(packet.SequenceNumber());
    return;
  }
  if (packet.PayloadType() == config_.rtp.red_payload_type) {
    ParseAndHandleEncapsulatingHeader(packet); /// FEC 以及数据恢复相关  
    return;
  }

  const auto type_it = payload_type_map_.find(packet.PayloadType());
  if (type_it == payload_type_map_.end()) {
    return;
  }
  auto depacketizer =
      absl::WrapUnique(RtpDepacketizer::Create(type_it->second)); // 创建对应视频编码格式的解包器

  if (!depacketizer) {
    RTC_LOG(LS_ERROR) << "Failed to create depacketizer.";
    return;
  }
  RtpDepacketizer::ParsedPayload parsed_payload;
  if (!depacketizer->Parse(&parsed_payload, packet.payload().data(), /// RtpDepacketizerH264::Parse 或者 RtpDepacketizerVp8::Parse
                           packet.payload().size())) {
    RTC_LOG(LS_WARNING) << "Failed parsing payload.";
    return;
  }

  RTPHeader rtp_header;
  packet.GetHeader(&rtp_header);
  RTPVideoHeader video_header = parsed_payload.video_header();
  video_header.rotation = kVideoRotation_0;
  video_header.content_type = VideoContentType::UNSPECIFIED;
  video_header.video_timing.flags = VideoSendTiming::kInvalid;
  video_header.is_last_packet_in_frame = rtp_header.markerBit;
  video_header.frame_marking.temporal_id = kNoTemporalIdx;

  if (parsed_payload.video_header().codec == kVideoCodecVP9) {
    const RTPVideoHeaderVP9& codec_header = absl::get<RTPVideoHeaderVP9>(
        parsed_payload.video_header().video_type_header);
    video_header.is_last_packet_in_frame |= codec_header.end_of_frame;
    video_header.is_first_packet_in_frame |= codec_header.beginning_of_frame;
  }

  packet.GetExtension<VideoOrientation>(&video_header.rotation);
  packet.GetExtension<VideoContentTypeExtension>(&video_header.content_type);
  packet.GetExtension<VideoTimingExtension>(&video_header.video_timing);
  packet.GetExtension<PlayoutDelayLimits>(&video_header.playout_delay);
  packet.GetExtension<FrameMarkingExtension>(&video_header.frame_marking);

  // Color space should only be transmitted in the last packet of a frame,
  // therefore, neglect it otherwise so that last_color_space_ is not reset by
  // mistake.
  if (video_header.is_last_packet_in_frame) {
    video_header.color_space = packet.GetExtension<ColorSpaceExtension>();
    if (video_header.color_space ||
        video_header.frame_type == VideoFrameType::kVideoFrameKey) {
      // Store color space since it's only transmitted when changed or for key
      // frames. Color space will be cleared if a key frame is transmitted
      // without color space information.
      last_color_space_ = video_header.color_space;
    } else if (last_color_space_) {
      video_header.color_space = last_color_space_;
    }
  }

  absl::optional<RtpGenericFrameDescriptor> generic_descriptor_wire;
  generic_descriptor_wire.emplace();
  const bool generic_descriptor_v00 =
      packet.GetExtension<RtpGenericFrameDescriptorExtension00>(
          &generic_descriptor_wire.value());
  const bool generic_descriptor_v01 =
      packet.GetExtension<RtpGenericFrameDescriptorExtension01>(
          &generic_descriptor_wire.value());
  if (generic_descriptor_v00 && generic_descriptor_v01) {
    RTC_LOG(LS_WARNING) << "RTP packet had two different GFD versions.";
    return;
  }

  if (generic_descriptor_v00 || generic_descriptor_v01) {
    if (generic_descriptor_v00) {
      generic_descriptor_wire->SetByteRepresentation(
          packet.GetRawExtension<RtpGenericFrameDescriptorExtension00>());
    } else {
      generic_descriptor_wire->SetByteRepresentation(
          packet.GetRawExtension<RtpGenericFrameDescriptorExtension01>());
    }

    video_header.is_first_packet_in_frame =
        generic_descriptor_wire->FirstPacketInSubFrame();
    video_header.is_last_packet_in_frame =
        rtp_header.markerBit || generic_descriptor_wire->LastPacketInSubFrame();

    if (generic_descriptor_wire->FirstPacketInSubFrame()) {
      video_header.frame_type =
          generic_descriptor_wire->FrameDependenciesDiffs().empty()
              ? VideoFrameType::kVideoFrameKey
              : VideoFrameType::kVideoFrameDelta;
    }

    video_header.width = generic_descriptor_wire->Width();
    video_header.height = generic_descriptor_wire->Height();
  } else {
    generic_descriptor_wire.reset();
  }

  OnReceivedPayloadData(parsed_payload.payload, parsed_payload.payload_length, ///
                        rtp_header, video_header, generic_descriptor_wire,
                        packet.recovered());
}



int32_t RtpVideoStreamReceiver::OnReceivedPayloadData(
    const uint8_t* payload_data,
    size_t payload_size,
    const RTPHeader& rtp_header,
    const RTPVideoHeader& video_header,
    const absl::optional<RtpGenericFrameDescriptor>& generic_descriptor,
    bool is_recovered) {
  VCMPacket packet(payload_data, payload_size, rtp_header, video_header,
                   ntp_estimator_.Estimate(rtp_header.timestamp), // RemoteNtpTimeEstimator::Estimate 将 RTP 时间转换成 NTP 时间
                   clock_->TimeInMilliseconds());
  packet.generic_descriptor = generic_descriptor;

  if (loss_notification_controller_) {
    if (is_recovered) {
      // TODO(bugs.webrtc.org/10336): Implement support for reordering.
      RTC_LOG(LS_INFO)
          << "LossNotificationController does not support reordering.";
    } else if (!generic_descriptor) {
      RTC_LOG(LS_WARNING) << "LossNotificationController requires generic "
                             "frame descriptor, but it is missing.";
    } else {
      loss_notification_controller_->OnReceivedPacket(rtp_header.sequenceNumber,
                                                      *generic_descriptor);
    }
  }

  if (nack_module_) {
    const bool is_keyframe =
        video_header.is_first_packet_in_frame &&
        video_header.frame_type == VideoFrameType::kVideoFrameKey;

    packet.timesNacked = nack_module_->OnReceivedPacket(
        rtp_header.sequenceNumber, is_keyframe, is_recovered);
  } else {
    packet.timesNacked = -1;
  }

  if (packet.sizeBytes == 0) {
    NotifyReceiverOfEmptyPacket(packet.seqNum);
    rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
    return 0;
  }

  if (packet.codec() == kVideoCodecH264) {
    // Only when we start to receive packets will we know what payload type
    // that will be used. When we know the payload type insert the correct
    // sps/pps into the tracker.
    if (packet.payloadType != last_payload_type_) {
      last_payload_type_ = packet.payloadType;
      InsertSpsPpsIntoTracker(packet.payloadType);
    }

    switch (tracker_.CopyAndFixBitstream(&packet)) {
      case video_coding::H264SpsPpsTracker::kRequestKeyframe:
        rtcp_feedback_buffer_.RequestKeyFrame();
        rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
        RTC_FALLTHROUGH();
      case video_coding::H264SpsPpsTracker::kDrop:
        return 0;
      case video_coding::H264SpsPpsTracker::kInsert:
        break;
    }

  } else {
    uint8_t* data = new uint8_t[packet.sizeBytes];
    memcpy(data, packet.dataPtr, packet.sizeBytes);
    packet.dataPtr = data;
  }

  rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
  if (!packet_buffer_.InsertPacket(&packet)) { // video_coding::PacketBuffer packet_buffer_;
    RequestKeyFrame();
  }
  return 0;
}


bool PacketBuffer::InsertPacket(VCMPacket* packet) {
  std::vector<std::unique_ptr<RtpFrameObject>> found_frames;
  {
    rtc::CritScope lock(&crit_);

    OnTimestampReceived(packet->timestamp);

    uint16_t seq_num = packet->seqNum;
    size_t index = seq_num % size_;

    if (!first_packet_received_) {
      first_seq_num_ = seq_num;
      first_packet_received_ = true;
    } else if (AheadOf(first_seq_num_, seq_num)) {
      // If we have explicitly cleared past this packet then it's old,
      // don't insert it, just silently ignore it.
      if (is_cleared_to_first_seq_num_) {
        delete[] packet->dataPtr;
        packet->dataPtr = nullptr;
        return true;
      }

      first_seq_num_ = seq_num;
    }

    if (sequence_buffer_[index].used) {
      // Duplicate packet, just delete the payload.
      if (data_buffer_[index].seqNum == packet->seqNum) {
        delete[] packet->dataPtr;
        packet->dataPtr = nullptr;
        return true;
      }

      // The packet buffer is full, try to expand the buffer.
      while (ExpandBufferSize() && sequence_buffer_[seq_num % size_].used) {
      }
      index = seq_num % size_;

      // Packet buffer is still full since we were unable to expand the buffer.
      if (sequence_buffer_[index].used) {
        // Clear the buffer, delete payload, and return false to signal that a
        // new keyframe is needed.
        RTC_LOG(LS_WARNING) << "Clear PacketBuffer and request key frame.";
        Clear();
        delete[] packet->dataPtr;
        packet->dataPtr = nullptr;
        return false;
      }
    }

    sequence_buffer_[index].frame_begin = packet->is_first_packet_in_frame();
    sequence_buffer_[index].frame_end = packet->is_last_packet_in_frame();
    sequence_buffer_[index].seq_num = packet->seqNum;
    sequence_buffer_[index].continuous = false;
    sequence_buffer_[index].frame_created = false;
    sequence_buffer_[index].used = true;
    data_buffer_[index] = *packet;
    packet->dataPtr = nullptr;

    UpdateMissingPackets(packet->seqNum); 

    int64_t now_ms = clock_->TimeInMilliseconds();
    last_received_packet_ms_ = now_ms;
    if (packet->video_header.frame_type == VideoFrameType::kVideoFrameKey)
      last_received_keyframe_packet_ms_ = now_ms;

    found_frames = FindFrames(seq_num);  参考链接: https://blog.csdn.net/sonysuqin/article/details/106629343
  }

  for (std::unique_ptr<RtpFrameObject>& frame : found_frames)
    assembled_frame_callback_->OnAssembledFrame(std::move(frame)); 

  return true;
}


std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
    uint16_t seq_num) {
  std::vector<std::unique_ptr<RtpFrameObject>> found_frames;
  for (size_t i = 0; i < size_ && PotentialNewFrame(seq_num); ++i) {  
    size_t index = seq_num % size_;
    sequence_buffer_[index].continuous = true;

    // If all packets of the frame is continuous, find the first packet of the
    // frame and create an RtpFrameObject.
    if (sequence_buffer_[index].frame_end) {
      size_t frame_size = 0;
      int max_nack_count = -1;
      uint16_t start_seq_num = seq_num;
      int64_t min_recv_time = data_buffer_[index].packet_info.receive_time_ms();
      int64_t max_recv_time = data_buffer_[index].packet_info.receive_time_ms();
      RtpPacketInfos::vector_type packet_infos;

      // Find the start index by searching backward until the packet with
      // the |frame_begin| flag is set.
      int start_index = index;
      size_t tested_packets = 0;
      int64_t frame_timestamp = data_buffer_[start_index].timestamp;

      // Identify H.264 keyframes by means of SPS, PPS, and IDR.
      bool is_h264 = data_buffer_[start_index].codec() == kVideoCodecH264;
      bool has_h264_sps = false;
      bool has_h264_pps = false;
      bool has_h264_idr = false;
      bool is_h264_keyframe = false;

      while (true) {
        ++tested_packets;
        frame_size += data_buffer_[start_index].sizeBytes;
        max_nack_count =
            std::max(max_nack_count, data_buffer_[start_index].timesNacked);
        sequence_buffer_[start_index].frame_created = true;

        min_recv_time =
            std::min(min_recv_time,
                     data_buffer_[start_index].packet_info.receive_time_ms());
        max_recv_time =
            std::max(max_recv_time,
                     data_buffer_[start_index].packet_info.receive_time_ms());

        // Should use |push_front()| since the loop traverses backwards. But
        // it's too inefficient to do so on a vector so we'll instead fix the
        // order afterwards.
        packet_infos.push_back(data_buffer_[start_index].packet_info);

        if (!is_h264 && sequence_buffer_[start_index].frame_begin)
          break;

        if (is_h264) {
          const auto* h264_header = absl::get_if<RTPVideoHeaderH264>(
              &data_buffer_[start_index].video_header.video_type_header);
          if (!h264_header || h264_header->nalus_length >= kMaxNalusPerPacket)
            return found_frames;

          for (size_t j = 0; j < h264_header->nalus_length; ++j) {
            if (h264_header->nalus[j].type == H264::NaluType::kSps) {
              has_h264_sps = true;
            } else if (h264_header->nalus[j].type == H264::NaluType::kPps) {
              has_h264_pps = true;
            } else if (h264_header->nalus[j].type == H264::NaluType::kIdr) {
              has_h264_idr = true;
            }
          }
          if ((sps_pps_idr_is_h264_keyframe_ && has_h264_idr && has_h264_sps &&
               has_h264_pps) ||
              (!sps_pps_idr_is_h264_keyframe_ && has_h264_idr)) {
            is_h264_keyframe = true;
          }
        }

        if (tested_packets == size_)
          break;

        start_index = start_index > 0 ? start_index - 1 : size_ - 1;

        // In the case of H264 we don't have a frame_begin bit (yes,
        // |frame_begin| might be set to true but that is a lie). So instead
        // we traverese backwards as long as we have a previous packet and
        // the timestamp of that packet is the same as this one. This may cause
        // the PacketBuffer to hand out incomplete frames.
        // See: https://bugs.chromium.org/p/webrtc/issues/detail?id=7106
        if (is_h264 &&
            (!sequence_buffer_[start_index].used ||
             data_buffer_[start_index].timestamp != frame_timestamp)) {
          break;
        }

        --start_seq_num;
      }

      // Fix the order since the packet-finding loop traverses backwards.
      std::reverse(packet_infos.begin(), packet_infos.end());

      if (is_h264) {
        // Warn if this is an unsafe frame.
        if (has_h264_idr && (!has_h264_sps || !has_h264_pps)) {
          RTC_LOG(LS_WARNING)
              << "Received H.264-IDR frame "
              << "(SPS: " << has_h264_sps << ", PPS: " << has_h264_pps
              << "). Treating as "
              << (sps_pps_idr_is_h264_keyframe_ ? "delta" : "key")
              << " frame since WebRTC-SpsPpsIdrIsH264Keyframe is "
              << (sps_pps_idr_is_h264_keyframe_ ? "enabled." : "disabled");
        }

        // Now that we have decided whether to treat this frame as a key frame
        // or delta frame in the frame buffer, we update the field that
        // determines if the RtpFrameObject is a key frame or delta frame.
        const size_t first_packet_index = start_seq_num % size_;
        RTC_CHECK_LT(first_packet_index, size_);
        if (is_h264_keyframe) {
          data_buffer_[first_packet_index].video_header.frame_type =
              VideoFrameType::kVideoFrameKey;
        } else {
          data_buffer_[first_packet_index].video_header.frame_type =
              VideoFrameType::kVideoFrameDelta;
        }

        // With IPPP, if this is not a keyframe, make sure there are no gaps
        // in the packet sequence numbers up until this point.
        const uint8_t h264tid =
            data_buffer_[start_index].video_header.frame_marking.temporal_id;
        if (h264tid == kNoTemporalIdx && !is_h264_keyframe &&
            missing_packets_.upper_bound(start_seq_num) !=
                missing_packets_.begin()) {
          uint16_t stop_index = (index + 1) % size_;
          while (start_index != stop_index) {
            sequence_buffer_[start_index].frame_created = false;
            start_index = (start_index + 1) % size_;
          }

          return found_frames;
        }
      }

      missing_packets_.erase(missing_packets_.begin(),
                             missing_packets_.upper_bound(seq_num));

      const VCMPacket* first_packet = GetPacket(start_seq_num);
      const VCMPacket* last_packet = GetPacket(seq_num);
      auto frame = std::make_unique<RtpFrameObject>(
          start_seq_num, seq_num, last_packet->markerBit, max_nack_count,
          min_recv_time, max_recv_time, first_packet->timestamp,
          first_packet->ntp_time_ms_, last_packet->video_header.video_timing,
          first_packet->payloadType, first_packet->codec(),
          last_packet->video_header.rotation,
          last_packet->video_header.content_type, first_packet->video_header,
          last_packet->video_header.color_space,
          first_packet->generic_descriptor,
          RtpPacketInfos(std::move(packet_infos)),
          GetEncodedImageBuffer(frame_size, start_seq_num, seq_num));

      found_frames.emplace_back(std::move(frame));

      ClearInterval(start_seq_num, seq_num);
    }
    ++seq_num;
  }
  return found_frames;
}



void RtpVideoStreamReceiver::OnAssembledFrame(
    std::unique_ptr<video_coding::RtpFrameObject> frame) {
  RTC_DCHECK_RUN_ON(&network_tc_);
  RTC_DCHECK(frame);

  absl::optional<RtpGenericFrameDescriptor> descriptor =
      frame->GetGenericFrameDescriptor();

  if (loss_notification_controller_ && descriptor) {
    loss_notification_controller_->OnAssembledFrame(
        frame->first_seq_num(), descriptor->FrameId(),
        descriptor->Discardable().value_or(false),
        descriptor->FrameDependenciesDiffs());
  }

  // If frames arrive before a key frame, they would not be decodable.
  // In that case, request a key frame ASAP.
  if (!has_received_frame_) {
    if (frame->FrameType() != VideoFrameType::kVideoFrameKey) {
      // |loss_notification_controller_|, if present, would have already
      // requested a key frame when the first packet for the non-key frame
      // had arrived, so no need to replicate the request.
      if (!loss_notification_controller_) {
        RequestKeyFrame();
      }
    }
    has_received_frame_ = true;
  }

  rtc::CritScope lock(&reference_finder_lock_);
  // Reset |reference_finder_| if |frame| is new and the codec have changed.
  if (current_codec_) {
    bool frame_is_newer =
        AheadOf(frame->Timestamp(), last_assembled_frame_rtp_timestamp_);

    if (frame->codec_type() != current_codec_) {
      if (frame_is_newer) {
        // When we reset the |reference_finder_| we don't want new picture ids
        // to overlap with old picture ids. To ensure that doesn't happen we
        // start from the |last_completed_picture_id_| and add an offset in case
        // of reordering.
        reference_finder_ =
            std::make_unique<video_coding::RtpFrameReferenceFinder>(
                this, last_completed_picture_id_ +
                          std::numeric_limits<uint16_t>::max());
        current_codec_ = frame->codec_type();
      } else {
        // Old frame from before the codec switch, discard it.
        return;
      }
    }

    if (frame_is_newer) {
      last_assembled_frame_rtp_timestamp_ = frame->Timestamp();
    }
  } else {
    current_codec_ = frame->codec_type();
    last_assembled_frame_rtp_timestamp_ = frame->Timestamp();
  }

  if (buffered_frame_decryptor_ == nullptr) {
    reference_finder_->ManageFrame(std::move(frame)); / RtpFrameReferenceFinder::ManageFrame
  } else {
    buffered_frame_decryptor_->ManageEncryptedFrame(std::move(frame));
  }
}


void RtpFrameReferenceFinder::ManageFrame(
    std::unique_ptr<RtpFrameObject> frame) {
  // If we have cleared past this frame, drop it.
  if (cleared_to_seq_num_ != -1 &&
      AheadOf<uint16_t>(cleared_to_seq_num_, frame->first_seq_num())) {
    return;
  }

  FrameDecision decision = ManageFrameInternal(frame.get());

  switch (decision) {
    case kStash:
      if (stashed_frames_.size() > kMaxStashedFrames)
        stashed_frames_.pop_back();
      stashed_frames_.push_front(std::move(frame));
      break;
    case kHandOff:
      HandOffFrame(std::move(frame)); // 
      RetryStashedFrames();
      break;
    case kDrop:
      break;
  }
}



void RtpFrameReferenceFinder::HandOffFrame(
    std::unique_ptr<RtpFrameObject> frame) {
  frame->id.picture_id += picture_id_offset_;
  for (size_t i = 0; i < frame->num_references; ++i) {
    frame->references[i] += picture_id_offset_;
  }

  frame_callback_->OnCompleteFrame(std::move(frame)); // 
}



void RtpVideoStreamReceiver::OnCompleteFrame(
    std::unique_ptr<video_coding::EncodedFrame> frame) {
  {
    rtc::CritScope lock(&last_seq_num_cs_);
    video_coding::RtpFrameObject* rtp_frame =
        static_cast<video_coding::RtpFrameObject*>(frame.get());
    last_seq_num_for_pic_id_[rtp_frame->id.picture_id] =
        rtp_frame->last_seq_num();
  }
  last_completed_picture_id_ =
      std::max(last_completed_picture_id_, frame->id.picture_id);
  complete_frame_callback_->OnCompleteFrame(std::move(frame)); /// complete_frame_callback_ 是在 RtpVideoStreamReceiver 的构造函数中传递进来的,具体参考 VideoReceiveStream 的构造函数
}   complete_frame_callback_ 实际上就是 VideoReceiveStream 的 this 指针


void VideoReceiveStream::OnCompleteFrame(
    std::unique_ptr<video_coding::EncodedFrame> frame) {
  RTC_DCHECK_RUN_ON(&network_sequence_checker_);
  // TODO(https://bugs.webrtc.org/9974): Consider removing this workaround.
  int64_t time_now_ms = rtc::TimeMillis();
  if (last_complete_frame_time_ms_ > 0 &&
      time_now_ms - last_complete_frame_time_ms_ > kInactiveStreamThresholdMs) {
    frame_buffer_->Clear();
  }
  last_complete_frame_time_ms_ = time_now_ms;

  const PlayoutDelay& playout_delay = frame->EncodedImage().playout_delay_;
  if (playout_delay.min_ms >= 0) {
    rtc::CritScope cs(&playout_delay_lock_);
    frame_minimum_playout_delay_ms_ = playout_delay.min_ms;
    UpdatePlayoutDelays();
  }

  if (playout_delay.max_ms >= 0) {
    rtc::CritScope cs(&playout_delay_lock_);
    frame_maximum_playout_delay_ms_ = playout_delay.max_ms;
    UpdatePlayoutDelays();
  }

  int64_t last_continuous_pid = frame_buffer_->InsertFrame(std::move(frame)); /// std::unique_ptr<video_coding::FrameBuffer> frame_buffer_;
  if (last_continuous_pid != -1)
    rtp_video_stream_receiver_.FrameContinuous(last_continuous_pid);
}


int64_t FrameBuffer::InsertFrame(std::unique_ptr<EncodedFrame> frame) {
  TRACE_EVENT0("webrtc", "FrameBuffer::InsertFrame");
  RTC_DCHECK(frame);

  rtc::CritScope lock(&crit_);

  const VideoLayerFrameId& id = frame->id;
  int64_t last_continuous_picture_id =
      !last_continuous_frame_ ? -1 : last_continuous_frame_->picture_id;

  if (!ValidReferences(*frame)) {
    RTC_LOG(LS_WARNING) << "Frame with (picture_id:spatial_id) ("
                        << id.picture_id << ":"
                        << static_cast<int>(id.spatial_layer)
                        << ") has invalid frame references, dropping frame.";
    return last_continuous_picture_id;
  }

  if (frames_.size() >= kMaxFramesBuffered) {
    if (frame->is_keyframe()) {
      RTC_LOG(LS_WARNING) << "Inserting keyframe (picture_id:spatial_id) ("
                          << id.picture_id << ":"
                          << static_cast<int>(id.spatial_layer)
                          << ") but buffer is full, clearing"
                          << " buffer and inserting the frame.";
      ClearFramesAndHistory();
    } else {
      RTC_LOG(LS_WARNING) << "Frame with (picture_id:spatial_id) ("
                          << id.picture_id << ":"
                          << static_cast<int>(id.spatial_layer)
                          << ") could not be inserted due to the frame "
                          << "buffer being full, dropping frame.";
      return last_continuous_picture_id;
    }
  }

  auto last_decoded_frame = decoded_frames_history_.GetLastDecodedFrameId();
  auto last_decoded_frame_timestamp =
      decoded_frames_history_.GetLastDecodedFrameTimestamp();
  if (last_decoded_frame && id <= *last_decoded_frame) {
    if (AheadOf(frame->Timestamp(), *last_decoded_frame_timestamp) &&
        frame->is_keyframe()) {
      // If this frame has a newer timestamp but an earlier picture id then we
      // assume there has been a jump in the picture id due to some encoder
      // reconfiguration or some other reason. Even though this is not according
      // to spec we can still continue to decode from this frame if it is a
      // keyframe.
      RTC_LOG(LS_WARNING)
          << "A jump in picture id was detected, clearing buffer.";
      ClearFramesAndHistory();
      last_continuous_picture_id = -1;
    } else {
      RTC_LOG(LS_WARNING) << "Frame with (picture_id:spatial_id) ("
                          << id.picture_id << ":"
                          << static_cast<int>(id.spatial_layer)
                          << ") inserted after frame ("
                          << last_decoded_frame->picture_id << ":"
                          << static_cast<int>(last_decoded_frame->spatial_layer)
                          << ") was handed off for decoding, dropping frame.";
      return last_continuous_picture_id;
    }
  }

  // Test if inserting this frame would cause the order of the frames to become
  // ambiguous (covering more than half the interval of 2^16). This can happen
  // when the picture id make large jumps mid stream.
  if (!frames_.empty() && id < frames_.begin()->first &&
      frames_.rbegin()->first < id) {
    RTC_LOG(LS_WARNING)
        << "A jump in picture id was detected, clearing buffer.";
    ClearFramesAndHistory();
    last_continuous_picture_id = -1;
  }

  auto info = frames_.emplace(id, FrameInfo()).first;

  if (info->second.frame) {
    RTC_LOG(LS_WARNING) << "Frame with (picture_id:spatial_id) ("
                        << id.picture_id << ":"
                        << static_cast<int>(id.spatial_layer)
                        << ") already inserted, dropping frame.";
    return last_continuous_picture_id;
  }

  if (!UpdateFrameInfoWithIncomingFrame(*frame, info))
    return last_continuous_picture_id;

  if (!frame->delayed_by_retransmission())
    timing_->IncomingTimestamp(frame->Timestamp(), frame->ReceivedTime()); //

  if (stats_callback_ && IsCompleteSuperFrame(*frame)) {
    stats_callback_->OnCompleteFrame(frame->is_keyframe(), frame->size(), /// 
                                     frame->contentType());
  }

  info->second.frame = std::move(frame);

  if (info->second.num_missing_continuous == 0) {
    info->second.continuous = true;
    PropagateContinuity(info);
    last_continuous_picture_id = last_continuous_frame_->picture_id;

    // Since we now have new continuous frames there might be a better frame
    // to return from NextFrame.
    new_continuous_frame_event_.Set();  

    if (callback_queue_) {
      callback_queue_->PostTask([this] {
        rtc::CritScope lock(&crit_);
        if (!callback_task_.Running())
          return;
        RTC_CHECK(frame_handler_);
        callback_task_.Stop();
        StartWaitForNextFrameOnQueue();
      });
    }
  }

  return last_continuous_picture_id;
}

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值