iOS videotool h264硬解码

-(CVPixelBufferRef) receivedRawVideoFrame:(uint8_t *)frame withSize:(uint32_t)frameSize

{

    OSStatus status;

    

    uint8_t *data = NULL;

    //uint8_t *pps = NULL;

    //uint8_t *sps = NULL;

    

    // I know what my H.264 data source's NALUs look like so I know start code index is always 0.

    // if you don't know where it starts, you can use a for loop similar to how i find the 2nd and 3rd start codes

    int startCodeIndex = 0;

    int secondStartCodeIndex = 0;

    int thirdStartCodeIndex = 0;

    

    long blockLength = 0;

    

    CMSampleBufferRef sampleBuffer = NULL;

    CMBlockBufferRef blockBuffer = NULL;

    

    int nalu_type = (frame[startCodeIndex + 4] & 0x1F);

    

    // if we havent already set up our format description with our SPS PPS parameters, we

    // can't process any frames except type 7 that has our parameters

    if (nalu_type != 7 && videoFormatDescription == NULL)

    {

        NSLog(@"Video error: Frame is not an I Frame and format description is null");

        return nil;

    }

    

    // NALU type 7 is the SPS parameter NALU

    if (nalu_type == 7)

    {

        // find where the second PPS start code begins, (the 0x00 00 00 01 code)

        // from which we also get the length of the first SPS code

        for (int i = startCodeIndex + 4; i < startCodeIndex + 40; i++)

        {

            if (frame[i] == 0x00 && frame[i+1] == 0x00 && frame[i+2] == 0x00 && frame[i+3] == 0x01)

            {

                secondStartCodeIndex = i;

                spsSize = secondStartCodeIndex;   // includes the header in the size

                break;

            }

        }

        

        // find what the second NALU type is

        nalu_type = (frame[secondStartCodeIndex + 4] & 0x1F);

    }

    

    // type 8 is the PPS parameter NALU

    if(nalu_type == 8)

    {

        // find where the NALU after this one starts so we know how long the PPS parameter is

        for (int i = spsSize + 4; i < spsSize + 30; i++)

        {

            if (frame[i] == 0x00 && frame[i+1] == 0x00 && frame[i+2] == 0x00 && frame[i+3] == 0x01)

            {

                thirdStartCodeIndex = i;

                ppsSize = thirdStartCodeIndex - spsSize;

                break;

            }

        }

        

        // allocate enough data to fit the SPS and PPS parameters into our data objects.

        // VTD doesn't want you to include the start code header (4 bytes long) so we add the - 4 here

        spsData = malloc(spsSize - 4);

        ppsData = malloc(ppsSize - 4);

        

        // copy in the actual sps and pps values, again ignoring the 4 byte header

        memcpy (spsData, &frame[4], spsSize-4);

        memcpy (ppsData, &frame[spsSize+4], ppsSize-4);

        

        // now we set our H264 parameters

        uint8_t*  parameterSetPointers[2] = {spsData, ppsData};

        size_t parameterSetSizes[2] = {spsSize-4, ppsSize-4};

        

        status = CMVideoFormatDescriptionCreateFromH264ParameterSets(kCFAllocatorDefault, 2,

                                                                     (const uint8_t *const*)parameterSetPointers,

                                                                     parameterSetSizes, 4,

                                                                     &videoFormatDescription);

        

        NSLog(@"\t\t Creation of CMVideoFormatDescription: %@", (status == noErr) ? @"successful!" : @"failed...");

        if(status != noErr) NSLog(@"\t\t Format Description ERROR type: %d", (int)status);

        

        nalu_type = (frame[thirdStartCodeIndex + 4] & 0x1F);

    }

    

    // create our VTDecompressionSession.  This isnt neccessary if you choose to use AVSampleBufferDisplayLayer

    if((status == noErr) && (decoderSession == NULL))

    {

        [self createDecompSession];

    }

    

    // type 5 is an IDR frame NALU.  The SPS and PPS NALUs should always be followed by an IDR (or IFrame) NALU, as far as I know

    if(nalu_type == 5)

    {

        // find the offset, or where the SPS and PPS NALUs end and the IDR frame NALU begins

        int offset = spsSize + ppsSize;

        blockLength = frameSize - offset;

        data = malloc(blockLength);

        data = memcpy(data, &frame[offset], blockLength);

        

        // replace the start code header on this NALU with its size.

        // AVCC format requires that you do this.

        // htonl converts the unsigned int from host to network byte order

        uint32_t dataLength32 = htonl (blockLength - 4);

        memcpy (data, &dataLength32, sizeof (uint32_t));

        

        // create a block buffer from the IDR NALU

        status = CMBlockBufferCreateWithMemoryBlock(NULL, data,  // memoryBlock to hold buffered data

                                                    blockLength,  // block length of the mem block in bytes.

                                                    kCFAllocatorNull, NULL,

                                                    0, // offsetToData

                                                    blockLength,   // dataLength of relevant bytes, starting at offsetToData

                                                    0, &blockBuffer);

        

        NSLog(@"\t\t BlockBufferCreation: \t %@", (status == kCMBlockBufferNoErr) ? @"successful!" : @"failed...");

        if(status == kCMBlockBufferNoErr) {

            CVPixelBufferRef outputPixelBuffer = NULL;

            CMSampleBufferRef sampleBuffer = NULL;

            const size_t sampleSizeArray[] = {frameSize};

            status = CMSampleBufferCreateReady(kCFAllocatorDefault,

                                               blockBuffer,

                                               videoFormatDescription ,

                                               1, 0, NULL, 1, sampleSizeArray,

                                               &sampleBuffer);

            if (status == kCMBlockBufferNoErr && sampleBuffer) {

                VTDecodeFrameFlags flags = 0;

                VTDecodeInfoFlags flagOut = 0;

                OSStatus decodeStatus = VTDecompressionSessionDecodeFrame(decoderSession,

                                                                          sampleBuffer,

                                                                          flags,

                                                                          &outputPixelBuffer,

                                                                          &flagOut);

                if(decodeStatus == noErr){

                    return outputPixelBuffer;

                }else if(decodeStatus == kVTInvalidSessionErr) {

                    NSLog(@"IOS8VT: Invalid session, reset decoder session");

                } else if(decodeStatus == kVTVideoDecoderBadDataErr) {

                    NSLog(@"IOS8VT: decode failed status=%d(Bad data)", decodeStatus);

                } else if(decodeStatus != noErr) {

                    NSLog(@"IOS8VT: decode failed status=%d", decodeStatus);

                }

                CFRelease(sampleBuffer);

            }

            CFRelease(blockBuffer);

        }

    }

    

    // NALU type 1 is non-IDR (or PFrame) picture

    if (nalu_type == 1)

    {

        // non-IDR frames do not have an offset due to SPS and PSS, so the approach

        // is similar to the IDR frames just without the offset

        blockLength = frameSize;

        data = malloc(blockLength);

        data = memcpy(data, &frame[0], blockLength);

        

        // again, replace the start header with the size of the NALU

        uint32_t dataLength32 = htonl (blockLength - 4);

        memcpy(data, &dataLength32, sizeof (uint32_t));

        

        status = CMBlockBufferCreateWithMemoryBlock(NULL, data,  // memoryBlock to hold data. If NULL, block will be alloc when needed

                                                    blockLength,  // overall length of the mem block in bytes

                                                    kCFAllocatorNull, NULL,

                                                    0,     // offsetToData

                                                    blockLength,  // dataLength of relevant data bytes, starting at offsetToData

                                                    0, &blockBuffer);

        

        NSLog(@"\t\t BlockBufferCreation: \t %@", (status == kCMBlockBufferNoErr) ? @"successful!" : @"failed...");

        

        if(status == kCMBlockBufferNoErr) {

            CVPixelBufferRef outputPixelBuffer = NULL;

            CMSampleBufferRef sampleBuffer = NULL;

            const size_t sampleSizeArray[] = {frameSize};

            status = CMSampleBufferCreateReady(kCFAllocatorDefault,

                                               blockBuffer,

                                               videoFormatDescription ,

                                               1, 0, NULL, 1, sampleSizeArray,

                                               &sampleBuffer);

            if (status == kCMBlockBufferNoErr && sampleBuffer) {

                VTDecodeFrameFlags flags = 0;

                VTDecodeInfoFlags flagOut = 0;

                OSStatus decodeStatus = VTDecompressionSessionDecodeFrame(decoderSession,

                                                                          sampleBuffer,

                                                                          flags,

                                                                          &outputPixelBuffer,

                                                                          &flagOut);

                if(decodeStatus == noErr){

                    return outputPixelBuffer;

                }else if(decodeStatus == kVTInvalidSessionErr) {

                    NSLog(@"IOS8VT: Invalid session, reset decoder session");

                } else if(decodeStatus == kVTVideoDecoderBadDataErr) {

                    NSLog(@"IOS8VT: decode failed status=%d(Bad data)", decodeStatus);

                } else if(decodeStatus != noErr) {

                    NSLog(@"IOS8VT: decode failed status=%d", decodeStatus);

                }

                CFRelease(sampleBuffer);

            }

            CFRelease(blockBuffer);

        }

    }

    

    /*

    // now create our sample buffer from the block buffer,

    if(status == noErr)

    {

        // here I'm not bothering with any timing specifics since in my case we displayed all frames immediately

        const size_t sampleSize = blockLength;

        status = CMSampleBufferCreate(kCFAllocatorDefault,

                                      blockBuffer, true, NULL, NULL,

                                      videoFormatDescription, 1, 0, NULL, 1,

                                      &sampleSize, &sampleBuffer);

        

        NSLog(@"\t\t SampleBufferCreate: \t %@", (status == noErr) ? @"successful!" : @"failed...");

    }

    

    if(status == noErr)

    {

        // set some values of the sample buffer's attachments

        CFArrayRef attachments = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, YES);

        CFMutableDictionaryRef dict = (CFMutableDictionaryRef)CFArrayGetValueAtIndex(attachments, 0);

        CFDictionarySetValue(dict, kCMSampleAttachmentKey_DisplayImmediately, kCFBooleanTrue);

        

        // either send the samplebuffer to a VTDecompressionSession or to an AVSampleBufferDisplayLayer

        //[self render:sampleBuffer];

    }

    */

    // free memory to avoid a memory leak, do the same for sps, pps and blockbuffer

    if (NULL != data)

    {

        free (data);

        data = NULL;

    }

    return nil;

}


-(void) createDecompSession

{

    // make sure to destroy the old VTD session

    decoderSession = NULL;

    VTDecompressionOutputCallbackRecord callBackRecord;

    callBackRecord.decompressionOutputCallback = didDecompress;

    

    // this is necessary if you need to make calls to Objective C "self" from within in the callback method.

    callBackRecord.decompressionOutputRefCon = (__bridge void *)self;

    

    // you can set some desired attributes for the destination pixel buffer.  I didn't use this but you may

    // if you need to set some attributes, be sure to uncomment the dictionary in VTDecompressionSessionCreate

    NSDictionary *destinationImageBufferAttributes = [NSDictionary dictionaryWithObjectsAndKeys:

                                                      [NSNumber numberWithBool:YES],

                                                      (id)kCVPixelBufferOpenGLESCompatibilityKey,

                                                      nil];

    

    OSStatus status =  VTDecompressionSessionCreate(NULL, videoFormatDescription, NULL,

                                                    NULL, // (__bridge CFDictionaryRef)(destinationImageBufferAttributes)

                                                    &callBackRecord, &decoderSession);

    NSLog(@"Video Decompression Session Create: \t %@", (status == noErr) ? @"successful!" : @"failed...");

    if(status != noErr) NSLog(@"\t\t VTD ERROR type: %d", (int)status);

}

评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值