Re: Deinterlace video (was: Replacing aging VDR for DVB-S2)

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



--- On Wed, 19/1/11, Niko Mikkilä <nm@xxxxxxxx> wrote:

> From: Niko Mikkilä <nm@xxxxxxxx>
> Subject: Re:  Deinterlace video (was: Replacing aging VDR for DVB-S2)
> To: "VDR Mailing List" <vdr@xxxxxxxxxxx>
> Date: Wednesday, 19 January, 2011, 10:48
> ke, 2011-01-19 kello 10:18 +0000,
> Stuart Morris kirjoitti:
> > My experience with an nVidia GT220 has been less than
> perfect. It can
> > perform temporal+spatial+inverse_telecine on HD video
> fast enough, but
> > my PC gets hot and it truly sucks at 2:2 pulldown
> detection. The
> > result of this is when viewing progressive video
> encoded as interlaced
> > field pairs (2:2 pulldown), deinterlacing keeps
> cutting in and out
> > every second or so, ruining the picture quality.
> 
> I think VDPAU's inverse telecine is only meant for non-even
> cadences
> like 3:2. Motion-adaptive deinterlacing handles 2:2 pullup
> perfectly
> well, so try without IVTC.
> 
> 
> > IMHO the best way to go for a low power HTPC is to
> decode in hardware
> > e.g. VDPAU, VAAPI, but output interlaced video to your
> TV and let the
> > TV sort out deinterlacing and inverse telecine.
> 
> Well, flat panel TVs have similar deinterlacing algorithms
> as what VDPAU
> provides, but it would certainly be a nice alternative.
> 
> > These are the key requirements to achieve interlaced
> output:
> > 
> > Get the right modelines for your video card and TV.
> Draw interlaced
> > fields to your frame buffer at field rate and in the
> correct order
> > (top field first or bottom field first). When drawing
> the field to the
> > frame buffer, do not overwrite the previous field
> still in the frame
> > buffer. Maintain 1:1 vertical scaling (no vertical
> scaling), so you
> > will need to switch video output to match the source
> video height
> > (480i, 576i or 1080i). Display the frame buffer at
> field rate and
> > synchronised to the graphics card vertical sync.
> Finally, there is NO
> > requirement to synchronise fields, fields are always
> displayed in the
> > same order they are written to the frame buffer, even
> if occasionally
> > fields are dropped.
> 
> Interesting. Could you perhaps write full instructions to
> some suitable
> wiki and post the code that you used to do this? I'm sure
> others would
> like to try it too.

I can provide the simple bit of source code I have used to demonstrate the basic principle. Please see attached.


      

#include <stdio.h>
#include <stdlib.h>

#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>

#include <SDL/SDL.h>
#include <GL/gl.h>

#ifdef __MINGW32__
#undef main /* Prevents SDL from overriding main() */
#endif

int main(int argc, char *argv[])
{
    AVFormatContext *pFormatCtx;
    int             i, j, k, videoStream;
    AVCodecContext  *pCodecCtx;
    AVCodec         *pCodec;
    AVFrame         *pFrame;
    AVPacket        packet;
    int             frameFinished;
    struct SwsContext *img_convert_ctx;
    float           aspect_ratio;
    int             frmcnt = 0;
    int             DispList;

    SDL_Surface     *screen;
    SDL_Event       event;

    if (argc < 2)
    {
        fprintf(stderr, "Usage: test <file>\n");
        exit(1);
    }
    // Register all formats and codecs
    av_register_all();

    if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER))
    {
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
        exit(1);
    }

    // Open video file
    if (av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0)
        return -1; // Couldn't open file

    // Retrieve stream information
    if (av_find_stream_info(pFormatCtx)<0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    dump_format(pFormatCtx, 0, argv[1], 0);

    // Find the first video stream
    videoStream=-1;
    for (i=0; i<pFormatCtx->nb_streams; i++)
        if (pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO)
        {
            videoStream=i;
            break;
        }
    if (videoStream==-1)
        return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    pCodecCtx=pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    if (pCodec==NULL)
    {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }

    // Open codec
    pCodecCtx->thread_count = 2; // Use 2 processor cores if available
    if (avcodec_open(pCodecCtx, pCodec)<0)
        return -1; // Could not open codec

    // Allocate video frame
    pFrame=avcodec_alloc_frame();

    // Calculate aspect ratio
    if (pCodecCtx->sample_aspect_ratio.num == 0) aspect_ratio = 0.0;
    else aspect_ratio = av_q2d(pCodecCtx->sample_aspect_ratio) * pCodecCtx->width / pCodecCtx->height;
    if (aspect_ratio <= 0.0) aspect_ratio = (float)pCodecCtx->width / (float)pCodecCtx->height;
    fprintf(stderr, "Aspect ratio = %f by %d\n", pCodecCtx->height*aspect_ratio, pCodecCtx->height);

    int disp_height = pCodecCtx->height;//1080;
    int disp_width = disp_height*aspect_ratio;

    // Make a screen to put our video
#ifndef __DARWIN__
    screen = SDL_SetVideoMode(disp_width, disp_height, 0, SDL_OPENGL);
#else
    screen = SDL_SetVideoMode(disp_width, disp_height, 24, 0);
#endif
    if (!screen)
    {
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
        exit(1);
    }
    glViewport(0, 0, disp_width, disp_height);
    glOrtho(0, disp_width, 0, disp_height, -1, 1);
//    fprintf(stderr, "OpenGL extensions:\n%s\n", glGetString(GL_EXTENSIONS));
/*
    // Find maximum supported texture size
    for (i=0x400;i<=0x08000000;i+=0x400)
    {
        glTexImage1D(GL_PROXY_TEXTURE_1D, 0, 3, i, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
        glGetTexLevelParameteriv(GL_PROXY_TEXTURE_1D, 0, GL_TEXTURE_WIDTH, &j);
        if (j==0)
        {
            fprintf(stderr, "Max 1D texture size = %d\n", i);
            break;
        }
    }
*/
    // Clear screen
    glClearColor(0.0f, 0.3f, 0.0f, 0.0f);
    glClear(GL_COLOR_BUFFER_BIT);
    glDisable(GL_DEPTH_TEST);

    // Allocate buffers to put our video fields
    AVPicture fyuv, frgb;
    // Determine required buffer size and allocate buffer
    // Field of YUV
    int numBytes=avpicture_get_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height/2); // Size of 1 field of YUV
    avpicture_fill(&fyuv, (uint8_t *)av_malloc(numBytes*sizeof(uint8_t)),
                        pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height/2);
    // Field of RGB24
    numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height/2); // Size of 1 field of RGB24
    avpicture_fill(&frgb, (uint8_t *)av_malloc(numBytes*sizeof(uint8_t)),
                        PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height/2);
    // Convert from 1 field of YUV to 1 field of RGB24
    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height/2, pCodecCtx->pix_fmt,
                                     pCodecCtx->width, pCodecCtx->height/2, PIX_FMT_RGB24, (SWS_CPU_CAPS_MMX2 | SWS_POINT), NULL, NULL, NULL);
    if (img_convert_ctx == NULL)
    {
        fprintf(stderr, "Cannot initialize the conversion context!\n");
        return -1;
    }

    // Setup for texture method (support required for non power-of-2 texture maps)
    glEnable(GL_TEXTURE_2D);
    glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
    // Interpolation required for horizontal access, so careful positioning of texture required
    glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
    glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
    // Texture is 1 rgb video field
    glTexImage2D(GL_TEXTURE_2D, 0, 3, pCodecCtx->width, pCodecCtx->height/2, 0, GL_RGB, GL_UNSIGNED_BYTE, frgb.data[0]);

	DispList = glGenLists(4); // Reserve space for display lists

	// Top field scan line display list
	int linecnt = 0;
	glNewList(DispList+0, GL_COMPILE);
        glBegin(GL_LINES);
        for (i=0; i<disp_height-1; i+=2)
        {
            glTexCoord2f(0.0, (i+1)/((float)disp_height));
            glVertex2f(-1.0, (disp_height-2*i-1)/((float)disp_height));
            glTexCoord2f(1.0, (i+1)/((float)disp_height));
            glVertex2f(1.0, (disp_height-2*i-1)/((float)disp_height));
//            fprintf(stderr,"Line %d, j %d: ", linecnt, j/2);
            linecnt++;
        }
        glEnd();
    glEndList();
    fprintf(stderr, "top field lines = %d\n", linecnt);
	// Bottom field scan line display list
	linecnt = 0;
	glNewList(DispList+1, GL_COMPILE);
        glBegin(GL_LINES);
        for (i=0; i<disp_height-1; i+=2)
        {
            glTexCoord2f(0.0, (i+1)/((float)disp_height));
            glVertex2f(-1.0, (disp_height-2*(i+1)-1)/((float)disp_height));
            glTexCoord2f(1.0, (i+1)/((float)disp_height));
            glVertex2f(1.0, (disp_height-2*(i+1)-1)/((float)disp_height));
            linecnt++;
        }
        glEnd();
    glEndList();
    fprintf(stderr, "bot field lines = %d\n", linecnt);

    // Read frames
    while (av_read_frame(pFormatCtx, &packet)>=0)
    {
        // Is this a packet from the video stream?
        if (packet.stream_index==videoStream)
        {
            // Decode video frame
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

            // Did we get a video frame?
            if (frameFinished)
            {
                frmcnt++;
                AVPicture psrc, pdst;
                // Display fields one at a time
                for (j=0; j<2; j++) // Need support for bottom field first video source
                {
                    // Point to decoded frame (source) and yuv field buffer (destination)
                    for (i=0; i<3;i++)
                    {
                        if (j == 0) psrc.data[i] = pFrame->data[i];
                        else psrc.data[i] = pFrame->data[i] + pFrame->linesize[i];
                        psrc.linesize[i] = pFrame->linesize[i];
                        pdst.data[i] = fyuv.data[i];
                        pdst.linesize[i] = fyuv.linesize[i];
                    }
                    // Extract 1 video field
                    for (i=j; i<pCodecCtx->height; i+=4)
                    {
                        // 2 lines of Y
                        memcpy(pdst.data[0], psrc.data[0], psrc.linesize[0]);
                        pdst.data[0]+=pdst.linesize[0]; psrc.data[0]+=(psrc.linesize[0] << 1);
                        memcpy(pdst.data[0], psrc.data[0], psrc.linesize[0]);
                        pdst.data[0]+=pdst.linesize[0]; psrc.data[0]+=(psrc.linesize[0] << 1);
                        // 1 line of U (corresponds to 2 lines of Y)
                        memcpy(pdst.data[1], psrc.data[1], psrc.linesize[1]);
                        pdst.data[1]+=pdst.linesize[1]; psrc.data[1]+=(psrc.linesize[1] << 1);
                        // 1 line of V (corresponds to 2 lines of Y)
                        memcpy(pdst.data[2], psrc.data[2], psrc.linesize[2]);
                        pdst.data[2]+=pdst.linesize[2]; psrc.data[2]+=(psrc.linesize[2] << 1);
                    }

                    // Convert the field into RGB format that OpenGL uses
                    sws_scale(img_convert_ctx, (const uint8_t**)fyuv.data, fyuv.linesize, 0,
                            pCodecCtx->height/2,  frgb.data, frgb.linesize);
                    // First field markers
                    if (j==0)
                    {
                        for (i=0; i<pCodecCtx->height/2; i+=8)
                        {
                            for(k=0; k<30;k++)
                            {
                                *(frgb.data[0]+frgb.linesize[0]*i+k) = 255;
                            }
                        }
                    }

                    glLoadIdentity();

                    // Display using texture map method
                    glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0 , pCodecCtx->width,
                                pCodecCtx->height/2, GL_RGB, GL_UNSIGNED_BYTE, frgb.data[0]);

                    glCallList(DispList+j);

                    // Display the framebuffer
                    SDL_GL_SwapBuffers();
//                    SDL_Delay(500);
                }
//                    SDL_Delay(2000);
            }
        }

        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
        SDL_PollEvent(&event);
        switch (event.type)
        {
        case SDL_QUIT:
            SDL_Quit();
            exit(0);
            break;
        default:
            break;
        }

    }

    fprintf(stderr,"Frame count %d\n", frmcnt);

    // Free the YUV frame
    av_free(pFrame);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    av_close_input_file(pFormatCtx);

    return 0;
}
_______________________________________________
vdr mailing list
vdr@xxxxxxxxxxx
http://www.linuxtv.org/cgi-bin/mailman/listinfo/vdr

[Index of Archives]     [Linux Media]     [Asterisk]     [DCCP]     [Netdev]     [Xorg]     [Util Linux NG]     [Xfree86]     [Big List of Linux Books]     [Fedora Users]     [Fedora Women]     [ALSA Devel]     [Linux USB]

  Powered by Linux