[gegl] Call avcodec_receive_*() functions in a loop to fetch all frames/packets



commit 6edbc5c3ff164341ba534def6fa99cd795d50092
Author: Behnam Momeni <sbmomeni gmail com>
Date:   Mon Mar 21 15:06:03 2022 +0330

    Call avcodec_receive_*() functions in a loop to fetch all frames/packets

 operations/external/ff-load.c | 201 +++++++++++++++++++++-------------------
 operations/external/ff-save.c | 208 +++++++++++++++++++++---------------------
 2 files changed, 211 insertions(+), 198 deletions(-)
---
diff --git a/operations/external/ff-load.c b/operations/external/ff-load.c
index c8889f342..2f658dd8f 100644
--- a/operations/external/ff-load.c
+++ b/operations/external/ff-load.c
@@ -211,7 +211,7 @@ decode_audio (GeglOperation *operation,
   while (p->prevapts <= pts2)
     {
       AVPacket  pkt = {0,};
-      int       decoded_bytes;
+      int       ret;
 
       if (av_read_frame (p->audio_fcontext, &pkt) < 0)
          {
@@ -221,86 +221,93 @@ decode_audio (GeglOperation *operation,
       if (pkt.stream_index==p->audio_index && p->audio_stream)
         {
           static AVFrame frame;
-          int got_frame;
 
-          decoded_bytes = avcodec_send_packet (p->audio_ctx, &pkt);
-          if (decoded_bytes < 0)
+          ret = avcodec_send_packet (p->audio_ctx, &pkt);
+          if (ret < 0)
             {
               fprintf (stderr, "avcodec_send_packet failed for %s\n",
                                 o->path);
             }
-          else
+          while (ret == 0)
             {
-              decoded_bytes = avcodec_receive_frame (p->audio_ctx, &frame);
-              if (decoded_bytes < 0)
+              ret = avcodec_receive_frame (p->audio_ctx, &frame);
+              if (ret == AVERROR(EAGAIN))
+                {
+                  // no more frames; should send the next packet now
+                  ret = 0;
+                  break;
+                }
+              else if (ret < 0)
                 {
                   fprintf (stderr, "avcodec_receive_frame failed for %s\n",
                                     o->path);
-                }
-              else
-                got_frame = 1;
-            }
-
-          if (got_frame) {
-            int samples_left = frame.nb_samples;
-            int si = 0;
-
-            while (samples_left)
-            {
-               int sample_count = samples_left;
-               int channels = MIN(p->audio_stream->codecpar->channels, GEGL_MAX_AUDIO_CHANNELS);
-               GeglAudioFragment *af = gegl_audio_fragment_new (o->audio_sample_rate, channels,
-                          AV_CH_LAYOUT_STEREO, samples_left);
-//);
-               switch (p->audio_ctx->sample_fmt)
-               {
-                 case AV_SAMPLE_FMT_FLT:
-                   for (gint i = 0; i < sample_count; i++)
-                     for (gint c = 0; c < channels; c++)
-                       af->data[c][i] = ((int16_t *)frame.data[0])[(i + si) * channels + c];
-                   break;
-                 case AV_SAMPLE_FMT_FLTP:
-                   for (gint i = 0; i < sample_count; i++)
-                     for (gint c = 0; c < channels; c++)
-                       {
-                         af->data[c][i] = ((float *)frame.data[c])[i + si];
-                       }
-                   break;
-                 case AV_SAMPLE_FMT_S16:
-                   for (gint i = 0; i < sample_count; i++)
-                     for (gint c = 0; c < channels; c++)
-                       af->data[c][i] = ((int16_t *)frame.data[0])[(i + si) * channels + c] / 32768.0;
-                   break;
-                 case AV_SAMPLE_FMT_S16P:
-                   for (gint i = 0; i < sample_count; i++)
-                     for (gint c = 0; c < channels; c++)
-                       af->data[c][i] = ((int16_t *)frame.data[c])[i + si] / 32768.0;
-                   break;
-                 case AV_SAMPLE_FMT_S32:
-                   for (gint i = 0; i < sample_count; i++)
-                     for (gint c = 0; c < channels; c++)
-                       af->data[c][i] = ((int32_t *)frame.data[0])[(i + si) * channels + c] / 2147483648.0;
-                  break;
-                case AV_SAMPLE_FMT_S32P:
-                   for (gint i = 0; i < sample_count; i++)
-                    for (gint c = 0; c < channels; c++)
-                      af->data[c][i] = ((int32_t *)frame.data[c])[i + si] / 2147483648.0;
                   break;
-                default:
-                  g_warning ("undealt with sample format\n");
                 }
-                gegl_audio_fragment_set_sample_count (af, sample_count);
-                gegl_audio_fragment_set_pos (af, 
-  (long int)av_rescale_q ((pkt.pts), p->audio_stream->time_base, AV_TIME_BASE_Q) * o->audio_sample_rate 
/AV_TIME_BASE);
-
-                p->audio_pos += sample_count;
-                p->audio_track = g_list_append (p->audio_track, af);
-
-                samples_left -= sample_count;
-                si += sample_count;
-              }
-            p->prevapts = pkt.pts * av_q2d (p->audio_stream->time_base);
-          }
+              int samples_left = frame.nb_samples;
+              int si = 0;
+
+              while (samples_left)
+              {
+                 int sample_count = samples_left;
+                 int channels = MIN(p->audio_stream->codecpar->channels, GEGL_MAX_AUDIO_CHANNELS);
+                 GeglAudioFragment *af = gegl_audio_fragment_new (o->audio_sample_rate, channels,
+                            AV_CH_LAYOUT_STEREO, samples_left);
+  //);
+                 switch (p->audio_ctx->sample_fmt)
+                 {
+                   case AV_SAMPLE_FMT_FLT:
+                     for (gint i = 0; i < sample_count; i++)
+                       for (gint c = 0; c < channels; c++)
+                         af->data[c][i] = ((int16_t *)frame.data[0])[(i + si) * channels + c];
+                     break;
+                   case AV_SAMPLE_FMT_FLTP:
+                     for (gint i = 0; i < sample_count; i++)
+                       for (gint c = 0; c < channels; c++)
+                         {
+                           af->data[c][i] = ((float *)frame.data[c])[i + si];
+                         }
+                     break;
+                   case AV_SAMPLE_FMT_S16:
+                     for (gint i = 0; i < sample_count; i++)
+                       for (gint c = 0; c < channels; c++)
+                         af->data[c][i] = ((int16_t *)frame.data[0])[(i + si) * channels + c] / 32768.0;
+                     break;
+                   case AV_SAMPLE_FMT_S16P:
+                     for (gint i = 0; i < sample_count; i++)
+                       for (gint c = 0; c < channels; c++)
+                         af->data[c][i] = ((int16_t *)frame.data[c])[i + si] / 32768.0;
+                     break;
+                   case AV_SAMPLE_FMT_S32:
+                     for (gint i = 0; i < sample_count; i++)
+                       for (gint c = 0; c < channels; c++)
+                         af->data[c][i] = ((int32_t *)frame.data[0])[(i + si) * channels + c] / 2147483648.0;
+                    break;
+                  case AV_SAMPLE_FMT_S32P:
+                     for (gint i = 0; i < sample_count; i++)
+                      for (gint c = 0; c < channels; c++)
+                        af->data[c][i] = ((int32_t *)frame.data[c])[i + si] / 2147483648.0;
+                    break;
+                  default:
+                    g_warning ("undealt with sample format\n");
+                  }
+                  gegl_audio_fragment_set_sample_count (af, sample_count);
+                  gegl_audio_fragment_set_pos (
+                    af,
+                    (long int)av_rescale_q (
+                      (pkt.pts),
+                      p->audio_stream->time_base,
+                      AV_TIME_BASE_Q
+                    ) * o->audio_sample_rate / AV_TIME_BASE
+                  );
+
+                  p->audio_pos += sample_count;
+                  p->audio_track = g_list_append (p->audio_track, af);
+
+                  samples_left -= sample_count;
+                  si += sample_count;
+                }
+              p->prevapts = pkt.pts * av_q2d (p->audio_stream->time_base);
+            }
         }
       av_packet_unref (&pkt);
     }
@@ -351,7 +358,7 @@ decode_frame (GeglOperation *operation,
       int       got_picture = 0;
       do
         {
-          int       decoded_bytes;
+          int       ret;
           AVPacket  pkt = {0,};
 
           do
@@ -365,42 +372,46 @@ decode_frame (GeglOperation *operation,
           }
           while (pkt.stream_index != p->video_index);
 
-          decoded_bytes = avcodec_send_packet (p->video_ctx, &pkt);
-          if (decoded_bytes < 0)
+          ret = avcodec_send_packet (p->video_ctx, &pkt);
+          if (ret < 0)
             {
               fprintf (stderr, "avcodec_send_packet failed for %s\n",
                        o->path);
               return -1;
             }
-          else
+          while (ret == 0)
             {
-              decoded_bytes = avcodec_receive_frame (p->video_ctx, p->lavc_frame);
-              if (decoded_bytes < 0)
+              ret = avcodec_receive_frame (p->video_ctx, p->lavc_frame);
+              if (ret == AVERROR(EAGAIN))
+                {
+                  // no more frames; should send the next packet now
+                  ret = 0;
+                  break;
+                }
+              else if (ret < 0)
                 {
                   fprintf (stderr, "avcodec_receive_frame failed for %s\n",
                                     o->path);
+                  break;
+                }
+              got_picture = 1;
+              if ((pkt.dts == pkt.pts) || (p->lavc_frame->key_frame!=0))
+                {
+                  p->lavc_frame->pts = (p->video_stream->cur_dts -
+                                        p->video_stream->first_dts);
+                  p->prevpts =  av_rescale_q (p->lavc_frame->pts,
+                                              p->video_stream->time_base,
+                                              AV_TIME_BASE_Q) * 1.0 / AV_TIME_BASE;
+                  decodeframe = roundf( p->prevpts * o->frame_rate);
                 }
               else
-                got_picture = 1;
+                {
+                  p->prevpts += 1.0 / o->frame_rate;
+                  decodeframe = roundf ( p->prevpts * o->frame_rate);
+                }
+              if (decodeframe > frame + p->codec_delay)
+                break;
             }
-
-          if(got_picture)
-          {
-             if ((pkt.dts == pkt.pts) || (p->lavc_frame->key_frame!=0))
-             {
-               p->lavc_frame->pts = (p->video_stream->cur_dts -
-                                     p->video_stream->first_dts);
-               p->prevpts =  av_rescale_q (p->lavc_frame->pts,
-                                           p->video_stream->time_base,
-                                           AV_TIME_BASE_Q) * 1.0 / AV_TIME_BASE;
-               decodeframe = roundf( p->prevpts * o->frame_rate);
-             }
-             else
-             {
-               p->prevpts += 1.0 / o->frame_rate;
-               decodeframe = roundf ( p->prevpts * o->frame_rate);
-             }
-          }
 #if 0
           if (decoded_bytes != pkt.size)
             fprintf (stderr, "bytes left!\n");
diff --git a/operations/external/ff-save.c b/operations/external/ff-save.c
index 23efb0887..cbd2957d6 100644
--- a/operations/external/ff-save.c
+++ b/operations/external/ff-save.c
@@ -413,16 +413,10 @@ static void encode_audio_fragments (Priv *p, AVFormatContext *oc, AVStream *st,
     AVCodecContext *c = p->audio_ctx;
     long i;
     int ret;
-    int got_packet = 0;
   static AVPacket  pkt = { 0 };  /* XXX: static, should be stored in instance somehow */
     AVFrame *frame = alloc_audio_frame (c->sample_fmt, c->channel_layout,
                                         c->sample_rate, frame_size);
 
-  if (pkt.size == 0)
-  {
-    av_init_packet (&pkt);
-  }
-
     av_frame_make_writable (frame);
     switch (c->sample_fmt) {
       case AV_SAMPLE_FMT_FLT:
@@ -491,25 +485,29 @@ static void encode_audio_fragments (Priv *p, AVFormatContext *oc, AVStream *st,
       {
         fprintf (stderr, "avcodec_send_frame failed: %s\n", av_err2str (ret));
       }
-    else
+    while (ret == 0)
       {
+        if (pkt.size == 0)
+          {
+            av_init_packet (&pkt);
+          }
         ret = avcodec_receive_packet (c, &pkt);
-        if (ret < 0)
+        if (ret == AVERROR(EAGAIN))
+          {
+            // no more packets; should send the next frame now
+          }
+        else if (ret < 0)
           {
             fprintf (stderr, "avcodec_receive_packet failed: %s\n", av_err2str (ret));
           }
         else
           {
-            got_packet = 1;
+            av_packet_rescale_ts (&pkt, c->time_base, st->time_base);
+            pkt.stream_index = st->index;
+            av_interleaved_write_frame (oc, &pkt);
+            av_packet_unref (&pkt);
           }
       }
-    if (got_packet)
-    {
-      av_packet_rescale_ts (&pkt, c->time_base, st->time_base);
-      pkt.stream_index = st->index;
-      av_interleaved_write_frame (oc, &pkt);
-      av_packet_unref (&pkt);
-    }
     av_frame_free (&frame);
     p->audio_read_pos += frame_size;
   }
@@ -878,23 +876,32 @@ write_video_frame (GeglProperties *o,
   else
 #endif
     {
-      /* encode the image */
-      AVPacket pkt2;
       int got_packet = 0;
-      av_init_packet(&pkt2);
-      pkt2.data = p->video_outbuf;
-      pkt2.size = p->video_outbuf_size;
-
-      out_size = avcodec_send_frame (c, picture_ptr);
-      if (!out_size)
+      int key_frame = 0;
+      ret = avcodec_send_frame (c, picture_ptr);
+      while (ret == 0)
         {
-          out_size = avcodec_receive_packet (c, &pkt2);
-          if (!out_size)
+          /* encode the image */
+          AVPacket pkt2;
+          av_init_packet(&pkt2);
+          // pkt2 will use its own buffer
+          // we may remove video_outbuf and video_outbuf_size too
+          //pkt2.data = p->video_outbuf;
+          //pkt2.size = p->video_outbuf_size;
+          ret = avcodec_receive_packet (c, &pkt2);
+          if (ret == AVERROR(EAGAIN))
             {
-              got_packet = 1;
+              // no more packets
+              ret = 0;
+              break;
             }
-        }
-
+          else if (ret < 0)
+            {
+              break;
+            }
+          // out_size = 0;
+          got_packet = 1;
+          key_frame = !!(pkt2.flags & AV_PKT_FLAG_KEY);
       if (!out_size && got_packet && c->coded_frame)
         {
           c->coded_frame->pts       = pkt2.pts;
@@ -902,38 +909,31 @@ write_video_frame (GeglProperties *o,
           if (c->codec->capabilities & AV_CODEC_CAP_INTRA_ONLY)
               c->coded_frame->pict_type = AV_PICTURE_TYPE_I;
         }
-
-      if (pkt2.side_data_elems > 0)
-        {
-          int i;
-          for (i = 0; i < pkt2.side_data_elems; i++)
-            av_free(pkt2.side_data[i].data);
-          av_freep(&pkt2.side_data);
-          pkt2.side_data_elems = 0;
-        }
-
-      if (!out_size)
-        out_size = pkt2.size;
-
-      /* if zero size, it means the image was buffered */
-      if (out_size != 0)
-        {
-          AVPacket  pkt;
-          av_init_packet (&pkt);
-          if (c->coded_frame->key_frame)
-            pkt.flags |= AV_PKT_FLAG_KEY;
-          pkt.stream_index = st->index;
-          pkt.data = p->video_outbuf;
-          pkt.size = out_size;
-          pkt.pts = picture_ptr->pts;
-          pkt.dts = picture_ptr->pts;
-          av_packet_rescale_ts (&pkt, c->time_base, st->time_base);
-          /* write the compressed frame in the media file */
-          ret = av_write_frame (oc, &pkt);
-        }
-      else
-        {
-          ret = 0;
+          if (pkt2.side_data_elems > 0)
+            {
+              int i;
+              for (i = 0; i < pkt2.side_data_elems; i++)
+                av_free(pkt2.side_data[i].data);
+              av_freep(&pkt2.side_data);
+              pkt2.side_data_elems = 0;
+            }
+          out_size = pkt2.size;
+          /* if zero size, it means the image was buffered */
+          if (out_size != 0)
+            {
+              AVPacket  pkt;
+              av_init_packet (&pkt);
+              if (key_frame)
+                pkt.flags |= AV_PKT_FLAG_KEY;
+              pkt.stream_index = st->index;
+              pkt.data = pkt2.data;
+              pkt.size = out_size;
+              pkt.pts = picture_ptr->pts;
+              pkt.dts = picture_ptr->pts;
+              av_packet_rescale_ts (&pkt, c->time_base, st->time_base);
+              /* write the compressed frame in the media file */
+              ret = av_write_frame (oc, &pkt);
+            }
         }
     }
   if (ret != 0)
@@ -1045,38 +1045,36 @@ static void flush_audio (GeglProperties *o)
 {
   Priv *p = (Priv*)o->user_data;
   AVPacket  pkt = { 0 };
-  int ret;
+  int ret = 0;
 
-  int got_packet = 0;
   if (!p->audio_st)
     return;
 
-  got_packet = 0;
-  av_init_packet (&pkt);
   ret = avcodec_send_frame (p->audio_ctx, NULL);
   if (ret < 0)
-  {
-    fprintf (stderr, "avcodec_send_frame failed\n");
-  }
-  else
+    {
+      fprintf (stderr, "avcodec_send_frame failed while entering to draining mode: %s\n", av_err2str (ret));
+    }
+  av_init_packet (&pkt);
+  while (ret == 0)
     {
       ret = avcodec_receive_packet (p->audio_ctx, &pkt);
-      if (ret < 0)
+      if (ret == AVERROR_EOF)
+        {
+          // no more packets
+        }
+      else if (ret < 0)
         {
-          fprintf (stderr, "avcodec_receive_packet failed\n");
+          fprintf (stderr, "avcodec_receive_packet failed: %s\n", av_err2str (ret));
         }
       else
         {
-          got_packet = 1;
+          pkt.stream_index = p->audio_st->index;
+          av_packet_rescale_ts (&pkt, p->audio_ctx->time_base, p->audio_st->time_base);
+          av_interleaved_write_frame (p->oc, &pkt);
+          av_packet_unref (&pkt);
         }
     }
-  if (got_packet)
-    {
-      pkt.stream_index = p->audio_st->index;
-      av_packet_rescale_ts (&pkt, p->audio_ctx->time_base, p->audio_st->time_base);
-      av_interleaved_write_frame (p->oc, &pkt);
-      av_packet_unref (&pkt);
-    }
 }
 
 static gboolean
@@ -1122,31 +1120,35 @@ process (GeglOperation       *operation,
 static void flush_video (GeglProperties *o)
 {
   Priv *p = (Priv*)o->user_data;
-  int got_packet = 0;
   long ts = p->frame_count;
-  do {
-    AVPacket  pkt = { 0 };
-    int ret;
-    got_packet = 0;
-    av_init_packet (&pkt);
-    ret = avcodec_send_frame (p->video_ctx, NULL);
-    if (ret < 0)
-      return;
-    ret = avcodec_receive_packet (p->video_ctx, &pkt);
-    if (ret < 0)
-      return;
-    got_packet = 1;
-
-     if (got_packet)
-     {
-       pkt.stream_index = p->video_st->index;
-       pkt.pts = ts;
-       pkt.dts = ts++;
-       av_packet_rescale_ts (&pkt, p->video_ctx->time_base, p->video_st->time_base);
-       av_interleaved_write_frame (p->oc, &pkt);
-       av_packet_unref (&pkt);
-     }
-  } while (got_packet);
+  AVPacket  pkt = { 0 };
+  int ret = 0;
+  ret = avcodec_send_frame (p->video_ctx, NULL);
+  if (ret < 0)
+    {
+      fprintf (stderr, "avcodec_send_frame failed while entering to draining mode: %s\n", av_err2str (ret));
+    }
+  av_init_packet (&pkt);
+  while (ret == 0)
+    {
+      ret = avcodec_receive_packet (p->video_ctx, &pkt);
+      if (ret == AVERROR_EOF)
+        {
+          // no more packets
+        }
+      else if (ret < 0)
+        {
+        }
+      else
+        {
+          pkt.stream_index = p->video_st->index;
+          pkt.pts = ts;
+          pkt.dts = ts++;
+          av_packet_rescale_ts (&pkt, p->video_ctx->time_base, p->video_st->time_base);
+          av_interleaved_write_frame (p->oc, &pkt);
+          av_packet_unref (&pkt);
+        }
+    }
 }
 
 static void


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]