1. App_main函数启动rtsp服务器
void app_main(void)
{
rtsp_server();
}
2.rtsp_server启动并监听client socket
void rtsp_server(void)
{
初始化server socket
初始化摄像头
while循环内监听client socket连接,有client socket连接上来以后,启动任务处理改client。(对每个连接要启动一个任务来处理)
// loop forever to accept client connections
while (true)
{
printf(“Minimum free heap size: %d bytes\n”, esp_get_minimum_free_heap_size());
client_socket = accept(server_socket, (struct sockaddr*)&client_addr, &client_addr_len);
printf("Client connected from: %s\n", inet_ntoa(client_addr.sin_addr));
xTaskCreate(client_worker, "client_worker", 3584, (void*)client_socket, tskIDLE_PRIORITY, &xHandle);
}
}
3.client_worker任务会根据帧率定时发送摄像头图片,在两张图片间隔时间内会进行休眠,让出cpu资源给其他任务。
void client_worker(void * client)
{
实例化一个streamer对象:
OV2640Streamer * streamer = new OV2640Streamer((SOCKET)client, cam);
实例化一个会话session对象:
CRtspSession * session = new CRtspSession((SOCKET)client, streamer);
unsigned long lastFrameTime = 0;
const unsigned long msecPerFrame = (1000 / CONFIG_CAM_FRAMERATE);
while (session->m_stopped == false)
{
***处理client请求***
session->handleRequests(0);
unsigned long now = millis();
if ((now > (lastFrameTime + msecPerFrame)) || (now < lastFrameTime))
{
**间隔时间大于帧率时,或者是当前时间小于上一帧时间,传送摄像头图片**
session->broadcastCurrentFrame(now);
lastFrameTime = now;
}
else
{
//let the system do something else for a bit
vTaskDelay(1);
}
}
//shut ourselves down
delete streamer;
delete session;
vTaskDelete(NULL);
}
4.session->handleRequests函数,解析client的请求,设置俩个程序运行控制变量。
RTSP_OPTIONS,RTSP_DESCRIBE,RTSP_SETUP,RTSP_PLAY,RTSP_TEARDOWN,RTSP_UNKNOWN
根据这些请求类型设置m_streaming和m_stopped两个逻辑变量。
/**
Read from our socket, parsing commands as possible.
*/
bool CRtspSession::handleRequests( uint32_t readTimeoutMs )
{
if ( m_stopped )
return false; // Already closed down
bufPos = 0; // current position into receiving buffer. used to glue split requests.
state = hdrStateUnknown;
if ( bufPos == 0 || bufPos >= RTSP_BUFFER_SIZE - 1 ) // in case of bad client
{
memset( RecvBuf, 0x00, RTSP_BUFFER_SIZE );
bufPos = 0;
state = hdrStateUnknown;
}
// we always read 1 byte less than the buffer length, so all string ops here will not panic
int res = tcpsocketread( m_RtspClient, RecvBuf + bufPos, RTSP_BUFFER_SIZE - bufPos - 1, readTimeoutMs );
if ( res > 0 )
{
bufPos += res;
RecvBuf[ bufPos ] = '\0';
DEBUG_PRINT( "+ read %d bytes\n", res );
if ( state == hdrStateUnknown && bufPos >= 6 ) // we need at least 4-letter at the line start with optional heading CRLF
{
if( NULL != strstr( RecvBuf, "\r\n" ) ) // got a full line
{
char *s = RecvBuf;
if ( *s == '\r' && *(s + 1) == '\n' ) // skip allowed empty line at front
s += 2;
newCommandInit();
// find out the command type
m_RtspCmdType = RTSP_UNKNOWN;
if ( strncmp( s, "OPTIONS ", 8 ) == 0 ) m_RtspCmdType = RTSP_OPTIONS;
else if ( strncmp( s, "DESCRIBE ", 9 ) == 0 ) m_RtspCmdType = RTSP_DESCRIBE;
else if ( strncmp( s, "SETUP ", 6 ) == 0 ) m_RtspCmdType = RTSP_SETUP;
else if ( strncmp( s, "PLAY ", 5 ) == 0 ) m_RtspCmdType = RTSP_PLAY;
else if ( strncmp( s, "TEARDOWN ", 9 ) == 0 ) m_RtspCmdType = RTSP_TEARDOWN;
if( m_RtspCmdType != RTSP_UNKNOWN ) // got some
state = hdrStateGotMethod;
else
state = hdrStateInvalid;
}
} // if state == hdrStateUnknown
if ( state != hdrStateUnknown ) // in all cases we need to slurp the whole header before answering
{
// per https://tools.ietf.org/html/rfc2326 we need to look for an empty line
// to be sure that we got the correctly formed header. Also starting CRLF should be ignored.
char *s = strstr( bufPos > 4 ? RecvBuf + bufPos - 4 : RecvBuf, "\r\n\r\n" ); // try to save cycles by searching in the new data only
if ( s == NULL ) // no end of header seen yet
return true;
if ( state == hdrStateInvalid ) // tossing some immediate answer, so client don't fall into endless stupor
{
// not sure which code is more appropriate and if CSeq is needed here?
int l = snprintf( RecvBuf, RTSP_BUFFER_SIZE, "RTSP/1.0 400 Bad Request\r\nCSeq: %u\r\n\r\n", m_CSeq );
tcpsocketsend( m_RtspClient, RecvBuf, l );
bufPos = 0;
return false;
}
}
RTSP_CMD_TYPES C = Handle_RtspRequest( RecvBuf, res );
if ( C == RTSP_PLAY )
m_streaming = true;
else if ( C == RTSP_TEARDOWN )
{
m_stopped = true;
}
// cleaning up
state = hdrStateUnknown;
bufPos = 0;
return true;
} // res > 0
else if ( res == 0 )
{
DEBUG_PRINT("client closed socket, exiting\n");
m_stopped = true;
return true;
}
else
{
// Timeout on read
return false;
}
}
5. broadcastCurrentFrame根据m_streaming和m_stopped两个逻辑变量,判断是否继续发送图片。
void CRtspSession::broadcastCurrentFrame(uint32_t curMsec) {
// Send a frame
if (m_streaming && !m_stopped) {
DEBUG_PRINT(“serving a frame\n”);
m_Streamer->streamImage(curMsec);
}
}
6.在streamImage函数中调用streamFrame发送摄像头图片:
void OV2640Streamer::streamImage(uint32_t curMsec)
{
m_cam.run();// queue up a read for next time
BufPtr bytes = m_cam.getfb();
streamFrame(bytes, m_cam.getSize(), curMsec);
m_cam.done();
}
7.streamFrame函数对摄像头图片进行格式转换后,调用SendRtpPacket函数,发送rtp数据包。
void CStreamer::streamFrame(unsigned const char *data, uint32_t dataLen, uint32_t curMsec)
{
if(m_prevMsec == 0) // first frame init our timestamp
m_prevMsec = curMsec;
// compute deltat (being careful to handle clock rollover with a little lie)
uint32_t deltams = (curMsec >= m_prevMsec) ? curMsec - m_prevMsec : 100;
m_prevMsec = curMsec;
// locate quant tables if possible
BufPtr qtable0, qtable1;
if(!decodeJPEGfile(&data, &dataLen, &qtable0, &qtable1)) {
ERROR_PRINT("can't decode jpeg data\n");
return;
}
int offset = 0;
do {
offset = SendRtpPacket(data, dataLen, offset, qtable0, qtable1);
} while(offset != 0);
// Increment ONLY after a full frame
uint32_t units = 90000; // Hz per RFC 2435
m_Timestamp += (units * deltams / 1000); // fixed timestamp increment for a frame rate of 25fps
m_SendIdx++;
if (m_SendIdx > 1) m_SendIdx = 0;
}
8.SendRtpPacket是整个rtp报文的核心数据处理函数。
int CStreamer::SendRtpPacket(unsigned const char * jpeg, int jpegLen, int fragmentOffset, BufPtr quant0tbl, BufPtr quant1tbl)
{
#define KRtpHeaderSize 12 // size of the RTP header
#define KJpegHeaderSize 8 // size of the special JPEG payload header
#define MAX_FRAGMENT_SIZE 1300 // anything larger will blow out a payload if quant tables are included
int fragmentLen = MAX_FRAGMENT_SIZE;
if(fragmentLen + fragmentOffset > jpegLen) // Shrink last fragment if needed
fragmentLen = jpegLen - fragmentOffset;
bool isLastFragment = (fragmentOffset + fragmentLen) == jpegLen;
// Do we have custom quant tables? If so include them per RFC
bool includeQuantTbl = quant0tbl && quant1tbl && fragmentOffset == 0;
uint8_t q = includeQuantTbl ? 128 : 0x5e;
int RtpPacketSize = fragmentLen + KRtpHeaderSize + KJpegHeaderSize + (includeQuantTbl ? (4 + 64 * 2) : 0);
memset(RtpBuf,0x00,RTPBUF_SIZE);
// Prepare the first 4 byte of the packet. This is the Rtp over Rtsp header in case of TCP based transport
RtpBuf[0] = '$'; // magic number
RtpBuf[1] = 0; // number of multiplexed subchannel on RTPS connection - here the RTP channel
RtpBuf[2] = (RtpPacketSize & 0x0000FF00) >> 8;
RtpBuf[3] = (RtpPacketSize & 0x000000FF);
// Prepare the 12 byte RTP header
RtpBuf[4] = 0x80; // RTP version
RtpBuf[5] = 0x1a | (isLastFragment ? 0x80 : 0x00); // JPEG payload (26) and marker bit
RtpBuf[7] = m_SequenceNumber & 0x0FF; // each packet is counted with a sequence counter
RtpBuf[6] = m_SequenceNumber >> 8;
RtpBuf[8] = (m_Timestamp & 0xFF000000) >> 24; // each image gets a timestamp
RtpBuf[9] = (m_Timestamp & 0x00FF0000) >> 16;
RtpBuf[10] = (m_Timestamp & 0x0000FF00) >> 8;
RtpBuf[11] = (m_Timestamp & 0x000000FF);
RtpBuf[12] = 0x13; // 4 byte SSRC (sychronization source identifier)
RtpBuf[13] = 0xf9; // we just an arbitrary number here to keep it simple
RtpBuf[14] = 0x7e;
RtpBuf[15] = 0x67;
// Prepare the 8 byte payload JPEG header
RtpBuf[16] = 0x00; // type specific
RtpBuf[17] = (fragmentOffset & 0x00FF0000) >> 16; // 3 byte fragmentation offset for fragmented images
RtpBuf[18] = (fragmentOffset & 0x0000FF00) >> 8;
RtpBuf[19] = (fragmentOffset & 0x000000FF);
/* These sampling factors indicate that the chrominance components of
type 0 video is downsampled horizontally by 2 (often called 4:2:2)
while the chrominance components of type 1 video are downsampled both
horizontally and vertically by 2 (often called 4:2:0). */
RtpBuf[20] = 0x00; // type (fixme might be wrong for camera data) https://tools.ietf.org/html/rfc2435
RtpBuf[21] = q; // quality scale factor was 0x5e
RtpBuf[22] = m_width / 8; // width / 8
RtpBuf[23] = m_height / 8; // height / 8
int headerLen = 24; // Inlcuding jpeg header but not qant table header
if(includeQuantTbl) { // we need a quant header - but only in first packet of the frame
DEBUG_PRINT("inserting quanttbl\n");
RtpBuf[24] = 0; // MBZ
RtpBuf[25] = 0; // 8 bit precision
RtpBuf[26] = 0; // MSB of lentgh
int numQantBytes = 64; // Two 64 byte tables
RtpBuf[27] = 2 * numQantBytes; // LSB of length
headerLen += 4;
memcpy(RtpBuf + headerLen, quant0tbl, numQantBytes);
headerLen += numQantBytes;
memcpy(RtpBuf + headerLen, quant1tbl, numQantBytes);
headerLen += numQantBytes;
}
DEBUG_PRINT("Sending timestamp %d, seq %d, fragoff %d, fraglen %d, jpegLen %d\n", m_Timestamp, m_SequenceNumber, fragmentOffset, fragmentLen, jpegLen);
// append the JPEG scan data to the RTP buffer
memcpy(RtpBuf + headerLen,jpeg + fragmentOffset, fragmentLen);
fragmentOffset += fragmentLen;
m_SequenceNumber++; // prepare the packet counter for the next packet
IPADDRESS otherip;
IPPORT otherport;
// RTP marker bit must be set on last fragment
if (m_TCPTransport) // RTP over RTSP - we send the buffer + 4 byte additional header
tcpsocketsend(m_Client,RtpBuf,RtpPacketSize + 4);
else // UDP - we send just the buffer by skipping the 4 byte RTP over RTSP header
{
socketpeeraddr(m_Client, &otherip, &otherport);
udpsocketsend(m_RtpSocket,&RtpBuf[4],RtpPacketSize, otherip, m_RtpClientPort);
}
return isLastFragment ? 0 : fragmentOffset;
}