gimp-gap r764 - in trunk: . gap libgapvidapi libgapvidutil po vid_enc_avi vid_enc_ffmpeg vid_enc_rawframes
- From: wolfgangh svn gnome org
- To: svn-commits-list gnome org
- Subject: gimp-gap r764 - in trunk: . gap libgapvidapi libgapvidutil po vid_enc_avi vid_enc_ffmpeg vid_enc_rawframes
- Date: Sun, 29 Jun 2008 12:13:00 +0000 (UTC)
Author: wolfgangh
Date: Sun Jun 29 12:12:59 2008
New Revision: 764
URL: http://svn.gnome.org/viewvc/gimp-gap?rev=764&view=rev
Log:
Merged in current local development.
Added:
trunk/gap/gap_audio_extract.c
trunk/gap/gap_audio_extract.h
trunk/gap/gap_story_render_lossless.c (contents, props changed)
trunk/libgapvidutil/gap_gve_png.c
trunk/libgapvidutil/gap_gve_png.h
trunk/vid_enc_rawframes/
trunk/vid_enc_rawframes/Makefile.am
trunk/vid_enc_rawframes/gap_enc_rawframes_main.c
Modified:
trunk/ChangeLog
trunk/Makefile.am
trunk/configure.in
trunk/gap/Makefile.am
trunk/gap/gap_player_dialog.c
trunk/gap/gap_story_render_processor.c
trunk/gap/gap_story_render_processor.h
trunk/gap/gap_vex_exec.c
trunk/libgapvidapi/gap_vid_api.c
trunk/libgapvidapi/gap_vid_api.h
trunk/libgapvidapi/gap_vid_api_ffmpeg.c
trunk/libgapvidapi/gap_vid_api_util.c
trunk/libgapvidutil/Makefile.am
trunk/po/POTFILES.in
trunk/vid_enc_avi/gap_enc_avi_gui.c
trunk/vid_enc_avi/gap_enc_avi_main.c
trunk/vid_enc_avi/gap_enc_avi_main.h
trunk/vid_enc_ffmpeg/gap_enc_ffmpeg_main.c
Modified: trunk/Makefile.am
==============================================================================
--- trunk/Makefile.am (original)
+++ trunk/Makefile.am Sun Jun 29 12:12:59 2008
@@ -16,6 +16,7 @@
LIBGAPVIDUTIL=libgapvidutil
VID_COMMON=vid_common
VID_ENC_FFMPEG=vid_enc_ffmpeg
+VID_ENC_RAWFRAMES=vid_enc_rawframes
VID_ENC_SINGLE=vid_enc_single
VID_ENC_AVI=vid_enc_avi
endif
@@ -26,6 +27,7 @@
$(VID_COMMON) \
$(VID_ENC_AVI) \
$(VID_ENC_FFMPEG) \
+ $(VID_ENC_RAWFRAMES) \
$(VID_ENC_SINGLE)
EXTRA_DIST = \
Modified: trunk/configure.in
==============================================================================
--- trunk/configure.in (original)
+++ trunk/configure.in Sun Jun 29 12:12:59 2008
@@ -802,6 +802,7 @@
vid_common/Makefile
vid_enc_avi/Makefile
vid_enc_ffmpeg/Makefile
+vid_enc_rawframes/Makefile
vid_enc_single/Makefile
po/Makefile.in
docs/Makefile
Modified: trunk/gap/Makefile.am
==============================================================================
--- trunk/gap/Makefile.am (original)
+++ trunk/gap/Makefile.am Sun Jun 29 12:12:59 2008
@@ -237,6 +237,8 @@
gap_player_dialog.h \
gap_player_cache.c \
gap_player_cache.h \
+ gap_audio_extract.c \
+ gap_audio_extract.h \
gap_libgapstory.h \
gap_libgimpgap.h
@@ -266,6 +268,8 @@
gap_story_properties.h \
gap_story_att_trans_dlg.c \
gap_story_att_trans_dlg.h \
+ gap_audio_extract.c \
+ gap_audio_extract.h \
gap_player_main.h \
gap_player_dialog.c \
gap_player_dialog.h \
@@ -281,6 +285,8 @@
gap_vex_exec.h \
gap_vex_dialog.c \
gap_vex_dialog.h \
+ gap_audio_extract.c \
+ gap_audio_extract.h \
gap_player_main.h \
gap_player_dialog.c \
gap_player_dialog.h \
@@ -368,6 +374,7 @@
TESTPROT_iter_ALT \
gimplastvaldesc.c \
gimplastvaldesc.h \
+ gap_story_render_lossless.c \
iter_ALT/README_iter_subdirs \
iter_ALT/gen/plug_in_CML_explorer_iter_ALT.inc \
iter_ALT/gen/plug_in_alpha2color_iter_ALT.inc \
Added: trunk/gap/gap_audio_extract.c
==============================================================================
--- (empty file)
+++ trunk/gap/gap_audio_extract.c Sun Jun 29 12:12:59 2008
@@ -0,0 +1,436 @@
+/* gap_audio_extract.c
+ *
+ * GAP extract audio from videofile procedures
+ *
+ */
+
+/* The GIMP -- an image manipulation program
+ * Copyright (C) 1995 Spencer Kimball and Peter Mattis
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* 2008.06.24 hof created (moved audio extract parts of gap_vex_exec.c to this module)
+ */
+
+/* SYTEM (UNIX) includes */
+#include <stdio.h>
+#include <stdlib.h>
+#include <glib/gstdio.h>
+
+/* GIMP includes */
+#include "gtk/gtk.h"
+#include "libgimp/gimp.h"
+
+
+/* GAP includes */
+#include "gap_vid_api.h"
+#include "gap_audio_util.h"
+#include "gap_audio_wav.h"
+#include "gap_audio_extract.h"
+
+
+
+extern int gap_debug; /* ==0 ... dont print debug infos */
+
+
+
+/* ---------------------
+ * p_init_progress
+ * ---------------------
+ */
+static void
+p_init_progress(const char *progressText
+ ,gboolean do_progress
+ ,GtkWidget *progressBar
+ )
+{
+ if (do_progress)
+ {
+ if (progressBar == NULL)
+ {
+ gimp_progress_init (progressText);
+ }
+ else
+ {
+ gtk_progress_bar_set_text(GTK_PROGRESS_BAR(progressBar), progressText);
+ gtk_progress_bar_set_fraction(GTK_PROGRESS_BAR(progressBar), 0);
+ }
+ }
+} /* end p_init_progress */
+
+
+/* ---------------------
+ * p_do_progress
+ * ---------------------
+ */
+static void
+p_do_progress(gdouble progressValue
+ ,gboolean do_progress
+ ,GtkWidget *progressBar // use NULL for gimp_progress
+ ,t_GVA_Handle *gvahand
+ ,gpointer user_data
+ )
+{
+ if (do_progress)
+ {
+ if (progressBar == NULL)
+ {
+ gimp_progress_update (progressValue);
+ }
+ else
+ {
+ gtk_progress_bar_set_fraction(GTK_PROGRESS_BAR(progressBar), progressValue);
+ }
+ }
+ if(gvahand->fptr_progress_callback)
+ {
+ gvahand->cancel_operation = (*gvahand->fptr_progress_callback)(progressValue, user_data);
+ }
+
+} /* end p_do_progress */
+
+
+/* -------------------------
+ * gap_audio_extract_as_wav
+ * -------------------------
+ * extract specified number of samples at current
+ * position of the specified (already opened) videohandle.
+ * and optional save extracted audiodata as RIFF WAVE file
+ * (set wav_save to FALSE to skip writing to wav file,
+ * this is typical used to perform dummy read for
+ * advancing current position in the videohandle)
+ */
+void
+gap_audio_extract_as_wav(const char *audiofile
+ , t_GVA_Handle *gvahand
+ , gdouble samples_to_read
+ , gboolean wav_save
+
+ , gboolean do_progress
+ , GtkWidget *progressBar // use NULL for gimp_progress
+ , t_GVA_progress_callback_fptr fptr_progress_callback
+ , gpointer user_data
+ )
+{
+#ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
+ int l_audio_channels;
+ int l_sample_rate;
+ long l_audio_samples;
+ unsigned short *left_ptr;
+ unsigned short *right_ptr;
+ unsigned short *l_lptr;
+ unsigned short *l_rptr;
+ long l_to_read;
+ gint64 l_left_to_read;
+ long l_block_read;
+ gdouble l_progress;
+ FILE *fp_wav;
+
+ l_audio_channels = gvahand->audio_cannels;
+ l_sample_rate = gvahand->samplerate;
+ l_audio_samples = gvahand->total_aud_samples;
+
+ if(gap_debug)
+ {
+ printf("Channels:%d samplerate:%d samples:%d samples_to_read: %.0f\n"
+ , (int)l_audio_channels
+ , (int)l_sample_rate
+ , (int)l_audio_samples
+ , (float)samples_to_read
+ );
+ }
+
+ fp_wav = NULL;
+ if(wav_save)
+ {
+ fp_wav = g_fopen(audiofile, "wb");
+ }
+
+ if((fp_wav) || (!wav_save))
+ {
+ gint32 l_bytes_per_sample;
+ gint32 l_ii;
+
+ if(l_audio_channels == 1) { l_bytes_per_sample = 2;} /* mono */
+ else { l_bytes_per_sample = 4;} /* stereo */
+
+ if(wav_save)
+ {
+ /* write the header */
+ gap_audio_wav_write_header(fp_wav
+ , (gint32)samples_to_read
+ , l_audio_channels /* cannels 1 or 2 */
+ , l_sample_rate
+ , l_bytes_per_sample
+ , 16 /* 16 bit sample resolution */
+ );
+ }
+ if(gap_debug) printf("samples_to_read:%d\n", (int)samples_to_read);
+
+ /* audio block read (blocksize covers playbacktime for 250 frames */
+ l_left_to_read = samples_to_read;
+ l_block_read = (double)(250.0) / (double)gvahand->framerate * (double)l_sample_rate;
+ l_to_read = MIN(l_left_to_read, l_block_read);
+
+ /* allocate audio buffers */
+ left_ptr = g_malloc0((sizeof(short) * l_block_read) + 16);
+ right_ptr = g_malloc0((sizeof(short) * l_block_read) + 16);
+
+ while(l_to_read > 0)
+ {
+ l_lptr = left_ptr;
+ l_rptr = right_ptr;
+ /* read the audio data of channel 0 (left or mono) */
+ GVA_get_audio(gvahand
+ ,l_lptr /* Pointer to pre-allocated buffer if int16's */
+ ,1 /* Channel to decode */
+ ,(gdouble)l_to_read /* Number of samples to decode */
+ ,GVA_AMOD_CUR_AUDIO /* read from current audio position (and advance) */
+ );
+ if((l_audio_channels > 1) && (wav_save))
+ {
+ /* read the audio data of channel 2 (right)
+ * NOTE: GVA_get_audio has advanced the stream position,
+ * so we have to set GVA_AMOD_REREAD to read from
+ * the same startposition as for channel 1 (left).
+ */
+ GVA_get_audio(gvahand
+ ,l_rptr /* Pointer to pre-allocated buffer if int16's */
+ ,2 /* Channel to decode */
+ ,l_to_read /* Number of samples to decode */
+ ,GVA_AMOD_REREAD /* read from */
+ );
+ }
+ l_left_to_read -= l_to_read;
+
+ if(wav_save)
+ {
+ /* write 16 bit wave datasamples
+ * sequence mono: (lo, hi)
+ * sequence stereo: (lo_left, hi_left, lo_right, hi_right)
+ */
+ for(l_ii=0; l_ii < l_to_read; l_ii++)
+ {
+ gap_audio_wav_write_gint16(fp_wav, *l_lptr);
+ l_lptr++;
+ if(l_audio_channels > 1)
+ {
+ gap_audio_wav_write_gint16(fp_wav, *l_rptr);
+ l_rptr++;
+ }
+ }
+ }
+
+
+ l_to_read = MIN(l_left_to_read, l_block_read);
+
+ /* calculate progress */
+ l_progress = (gdouble)(samples_to_read - l_left_to_read) / ((gdouble)samples_to_read + 1.0);
+ if(gap_debug) printf("l_progress:%f\n", (float)l_progress);
+
+ p_do_progress(l_progress, do_progress, progressBar, gvahand, user_data);
+ if(gvahand->cancel_operation)
+ {
+ printf("Audio extract was cancelled.\n");
+ break;
+ }
+ }
+
+ if(wav_save)
+ {
+ /* close wavfile */
+ fclose(fp_wav);
+ }
+
+ /* free audio buffers */
+ g_free(left_ptr);
+ g_free(right_ptr);
+ }
+#endif
+ return;
+} /* end gap_audio_extract_as_wav */
+
+
+/* ---------------------------------
+ * gap_audio_extract_from_videofile
+ * ---------------------------------
+ * extract the specified audiotrack to WAVE file. (name specified via audiofile)
+ * starting at position (specified by l_pos and l_pos_unit)
+ * in length of extracted_frames (if number of frames is exactly known)
+ * or in length expected_frames (is a guess if l_extracted_frames < 1)
+ * use do_progress flag value TRUE for progress feedback on the specified
+ * progressBar.
+ * (if progressBar = NULL gimp progress is used
+ * this usually refers to the progress bar in the image window)
+ * Note:
+ * this feature is not present if compiled without GAP_ENABLE_VIDEOAPI_SUPPORT
+ */
+void
+gap_audio_extract_from_videofile(const char *videoname
+ , const char *audiofile
+ , gint32 audiotrack
+ , const char *preferred_decoder
+ , gint exact_seek
+ , t_GVA_PosUnit pos_unit
+ , gdouble pos
+ , gint32 extracted_frames
+ , gint32 expected_frames
+ , gboolean do_progress
+ , GtkWidget *progressBar
+ , t_GVA_progress_callback_fptr fptr_progress_callback
+ , gpointer user_data
+ )
+{
+#ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
+ t_GVA_Handle *gvahand;
+
+ /* --------- OPEN the videofile --------------- */
+ gvahand = GVA_open_read_pref(videoname
+ ,1 /* videotrack (not relevant for audio) */
+ ,audiotrack
+ ,preferred_decoder
+ , FALSE /* use MMX if available (disable_mmx == FALSE) */
+ );
+ if(gvahand == NULL)
+ {
+ printf("Could not open videofile:%s\n", videoname);
+ return;
+ }
+
+
+ gvahand->image_id = -1; /* prenvent API from deleting that image at close */
+ gvahand->progress_cb_user_data = user_data;
+ gvahand->fptr_progress_callback = fptr_progress_callback;
+
+ /* ------ extract Audio ---------- */
+ if(gvahand->atracks > 0)
+ {
+ if (audiotrack > 0)
+ {
+ gdouble l_samples_to_read;
+ gdouble extracted_frames;
+
+ if(gap_debug)
+ {
+ printf("EXTRACTING audio, writing to file %s\n", audiofile);
+ }
+
+ if(gvahand->audio_cannels > 0)
+ {
+ /* seek needed only if extract starts not at pos 1 */
+ if(pos > 1)
+ {
+ p_init_progress(_("Seek Audio Position..."), do_progress, progressBar);
+
+ /* check for exact frame_seek */
+ if (exact_seek != 0)
+ {
+ gint32 l_seek_framenumber;
+
+
+ l_seek_framenumber = pos;
+ if(pos_unit == GVA_UPOS_PRECENTAGE)
+ {
+ l_seek_framenumber = gvahand->total_frames * pos;
+ }
+
+ l_samples_to_read = (gdouble)(l_seek_framenumber)
+ / (gdouble)gvahand->framerate * (gdouble)gvahand->samplerate;
+
+ /* extract just for exact positioning (without save to wav file) */
+ if(gap_debug)
+ {
+ printf("extract just for exact positioning (without save to wav file)\n");
+ }
+ gap_audio_extract_as_wav(audiofile
+ , gvahand
+ , l_samples_to_read
+ , FALSE /* wav_save */
+ , do_progress
+ , progressBar
+ , fptr_progress_callback
+ , user_data
+ );
+ }
+ else
+ {
+ /* audio pos 1 frame before video pos
+ * example: extract frame 1 upto 2
+ * results in audio range 0 upto 2
+ * this way we can get the audioduration of frame 1
+ */
+ GVA_seek_audio(gvahand, pos -1, pos_unit);
+ }
+ }
+ if(gvahand->cancel_operation)
+ {
+ /* stop if we were cancelled (via request from fptr_progress_callback) */
+ return;
+ }
+
+ p_init_progress(_("Extracting Audio..."), do_progress, progressBar);
+
+
+ if(extracted_frames > 1)
+ {
+ l_samples_to_read = (gdouble)(extracted_frames +1.0)
+ / (gdouble)gvahand->framerate
+ * (gdouble)gvahand->samplerate;
+ if(gap_debug)
+ {
+ printf("A: l_samples_to_read %.0f extracted_frames:%d\n"
+ , (float)l_samples_to_read
+ ,(int)extracted_frames
+ );
+ }
+ }
+ else
+ {
+ l_samples_to_read = (gdouble)(expected_frames +1.0)
+ / (gdouble)gvahand->framerate
+ * (gdouble)gvahand->samplerate;
+ if(gap_debug)
+ {
+ printf("B: l_samples_to_read %.0f extracted_frames:%d expected_frames:%d\n"
+ ,(float)l_samples_to_read
+ ,(int)extracted_frames
+ ,(int)expected_frames
+ );
+ }
+ }
+
+ /* extract and save to wav file */
+ if(gap_debug)
+ {
+ printf("extract (with save to wav file)\n");
+ }
+
+ gap_audio_extract_as_wav(audiofile
+ , gvahand
+ , l_samples_to_read
+ , TRUE /* wav_save */
+ , do_progress
+ , progressBar
+ , fptr_progress_callback
+ , user_data
+ );
+ }
+
+ }
+ }
+#endif
+ return;
+} /* end gap_audio_extract_from_videofile */
Added: trunk/gap/gap_audio_extract.h
==============================================================================
--- (empty file)
+++ trunk/gap/gap_audio_extract.h Sun Jun 29 12:12:59 2008
@@ -0,0 +1,73 @@
+/* gap_audio_extract.h
+ *
+ * GAP extract audio from videofile procedures
+ *
+ */
+/* 2008.06.24 hof created (moved audio extract parts of gap_vex_exec.c to this module)
+ */
+
+#ifndef GAP_AUDIO_EXTRACT_H
+#define GAP_AUDIO_EXTRACT_H
+
+#include "config.h"
+
+/* GIMP includes */
+#include "gtk/gtk.h"
+#include "libgimp/gimp.h"
+
+/* GAP includes */
+#include "gap_vid_api.h"
+#include "gap-intl.h"
+
+/* -------------------------
+ * gap_audio_extract_as_wav
+ * -------------------------
+ * extract specified number of samples at current
+ * position of the specified (already opened) videohandle.
+ * and optional save extracted audiodata as RIFF WAVE file
+ * (set wav_save to FALSE to skip writing to wav file,
+ * this is typical used to perform dummy read for
+ * advancing current position in the videohandle)
+ */
+void
+gap_audio_extract_as_wav(const char *audiofile
+ , t_GVA_Handle *gvahand
+ , gdouble samples_to_read
+ , gboolean wav_save
+ , gboolean do_progress
+ , GtkWidget *progressBar
+ , t_GVA_progress_callback_fptr fptr_progress_callback
+ , gpointer user_data
+ );
+
+/* ---------------------------------
+ * gap_audio_extract_from_videofile
+ * ---------------------------------
+ * extract the specified audiotrack to WAVE file. (name specified via audiofile)
+ * starting at position (specified by l_pos and l_pos_unit)
+ * in length of extracted_frames (if number of frames is exactly known)
+ * or in length expected_frames (is a guess if l_extracted_frames < 1)
+ * use do_progress flag value TRUE for progress feedback on the specified
+ * progressBar.
+ * (if progressBar = NULL gimp progress is used
+ * this usually refers to the progress bar in the image window)
+ * Note:
+ * this feature is not present if compiled without GAP_ENABLE_VIDEOAPI_SUPPORT
+ */
+void
+gap_audio_extract_from_videofile(const char *videoname
+ , const char *audiofile
+ , gint32 audiotrack
+ , const char *preferred_decoder
+ , gint exact_seek
+ , t_GVA_PosUnit pos_unit
+ , gdouble pos
+ , gint32 extracted_frames
+ , gint32 expected_frames
+ , gboolean do_progress
+ , GtkWidget *progressBar
+ , t_GVA_progress_callback_fptr fptr_progress_callback
+ , gpointer user_data
+ );
+
+#endif /* end GAP_AUDIO_EXTRACT_H */
Modified: trunk/gap/gap_player_dialog.c
==============================================================================
--- trunk/gap/gap_player_dialog.c (original)
+++ trunk/gap/gap_player_dialog.c Sun Jun 29 12:12:59 2008
@@ -707,7 +707,10 @@
u_long samples; /* The number of samples in this file */
u_long datastart; /* The offset to the wav data */
- if (gap_debug) printf("p_audio_filename_changed to:%s:\n", gpp->audio_filename);
+ if (gap_debug)
+ {
+ printf("p_audio_filename_changed to:%s:\n", gpp->audio_filename);
+ }
p_audio_stop(gpp);
gpp->audio_status = MIN(gpp->audio_status, GAP_PLAYER_MAIN_AUSTAT_SERVER_STARTED);
@@ -741,7 +744,12 @@
gpp->audio_samples = samples;
p_audio_print_labels(gpp);
- p_audio_init(gpp); /* tell ausioserver to go standby for this audiofile */
+
+ /* p_audio_init(gpp) must not be called here.
+ * becuase it calls p_audio_startup_server and this will recursive call
+ * p_audio_filename_changed in an endless loop....
+ * For now the audio_init is done later on 1st attempt to start playback.
+ */
#endif
return;
@@ -844,7 +852,10 @@
const char *cp;
gboolean wavplay_server_found;
- if (gap_debug) printf("p_audio_startup_server\n");
+ if (gap_debug)
+ {
+ printf("p_audio_startup_server\n");
+ }
wavplay_server_found = FALSE;
Added: trunk/gap/gap_story_render_lossless.c
==============================================================================
--- (empty file)
+++ trunk/gap/gap_story_render_lossless.c Sun Jun 29 12:12:59 2008
@@ -0,0 +1,1283 @@
+/* gap_story_render_lossless.c
+ *
+ *
+ * GAP storyboard pseudo rendering for lossless video cut.
+ * (includes checks if frames in the referenced video source
+ * can be provided 1:1 as raw data chunks to the calling encoder
+ * and performs the raw data chunk fetch where possible
+ * and requested by the calling encoder)
+ *
+ * Copyright (C) 2008 Wolfgang Hofer <hof gimp org>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * 2008.06.11 hof - created (moved stuff from the former gap_gve_story_render_processor to this new module)
+ */
+
+/* ----------------------------------------------------
+ * p_debug_print_vcodec_missmatch
+ * ----------------------------------------------------
+ */
+static void
+p_debug_print_vcodec_missmatch(const char *videofile
+ ,GapCodecNameElem *vcodec_list
+ ,const char *vcodec_name_chunk
+)
+{
+ printf("chunk_fetch not possible, video codec missmatch. ");
+ printf("videofile:");
+ if(videofile)
+ {
+ printf("%s", videofile);
+ }
+ else
+ {
+ printf("NULL");
+ }
+
+ printf(", vcodec_list:");
+ if(vcodec_list)
+ {
+ GapCodecNameElem *codec_elem;
+ for(codec_elem = vcodec_list; codec_elem != NULL; codec_elem = (GapCodecNameElem *) codec_elem->next)
+ {
+ if(codec_elem->codec_name)
+ {
+ printf("%s ", codec_elem->codec_name);
+ }
+ }
+ }
+ else
+ {
+ printf("NULL");
+ }
+
+
+ printf(", vcodec_name_chunk:");
+ if(vcodec_name_chunk)
+ {
+ printf("%s", vcodec_name_chunk);
+ }
+ else
+ {
+ printf("NULL");
+ }
+
+ printf("\n");
+
+} /* end p_debug_print_vcodec_missmatch */
+
+/* ----------------------------------------------------
+ * p_check_vcodec_name
+ * ----------------------------------------------------
+ * return TRUE if vcodec_name_chunk is found in the
+ * specified list of compatible codecs names
+ * (The vcodec_list is provided by the calling encoder
+ * and includes a list of compatible codec names for lossles video cut purpose)
+ * FALSE if vcodec check condition failed.
+ *
+ * Note: vcodec_name_chunk NULL (unknown) is never compatible,
+ * an empty list (vcodec_list == NULL) matches to
+ * all codec names (but not to unknown codec)
+ */
+static gboolean
+p_check_vcodec_name(gint32 check_flags
+ , GapCodecNameElem *vcodec_list, const char *vcodec_name_chunk)
+{
+ if (check_flags & GAP_VID_CHCHK_FLAG_VCODEC_NAME)
+ {
+ if(vcodec_name_chunk == NULL)
+ {
+ return (FALSE);
+ }
+
+ if(vcodec_list == NULL)
+ {
+ return (TRUE);
+ }
+ else
+ {
+ GapCodecNameElem *codec_elem;
+ for(codec_elem = vcodec_list; codec_elem != NULL; codec_elem = (GapCodecNameElem *) codec_elem->next)
+ {
+ if(codec_elem->codec_name)
+ {
+ if(strcmp(codec_elem->codec_name, vcodec_name_chunk) == 0)
+ {
+ return (TRUE);
+ }
+ }
+ }
+ }
+ return (FALSE);
+ }
+
+ return (TRUE);
+} /* end p_check_vcodec_name */
+
+
+/* ----------------------------------------------------
+ * p_check_flags_for_matching_vcodec
+ * ----------------------------------------------------
+ * return TRUE if vcodec names fit to the specified check_flags conditions,
+ * FALSE if vcodec check condition failed.
+ *
+ * Note: vcodec_name_encoder value * (or NULL pointer) matches always.
+ */
+static void
+p_check_flags_for_matching_vcodec(gint32 check_flags, gint32 *check_flags_result
+ , const char *videofile, GapCodecNameElem *vcodec_list, const char *vcodec_name_chunk)
+{
+ if (TRUE == p_check_vcodec_name(check_flags
+ ,vcodec_list
+ ,vcodec_name_chunk))
+ {
+ *check_flags_result |= (check_flags & GAP_VID_CHCHK_FLAG_VCODEC_NAME);
+ }
+ else
+ {
+ if(gap_debug)
+ {
+ p_debug_print_vcodec_missmatch(videofile, vcodec_list, vcodec_name_chunk);
+
+ }
+ }
+
+} /* end p_check_flags_for_matching_vcodec */
+
+
+
+/* ----------------------------------------------------
+ * p_chunk_fetch_from_single_image
+ * ----------------------------------------------------
+ * This procedure fetches frame images from disc
+ *
+ * if possible and if the check conditions according to specified check_flags
+ * are fulfilled.
+ *
+ * TODO: GAP_VID_CHCHK_FLAG_SIZE
+ * (need to parsing image or load as image and let gimp do the parsing
+ * such a check can significantly reduce performance)
+ *
+ * return FALSE if fetch not possible or failed.
+ */
+static gboolean
+p_chunk_fetch_from_single_image(const char *videofile
+ , unsigned char *video_frame_chunk_data // IN/OUT
+ , gint32 video_frame_chunk_maxsize // IN
+ , gint32 *video_frame_chunk_hdr_size // OUT
+ , gint32 *video_frame_chunk_size // OUT
+ , GapCodecNameElem *vcodec_list // IN
+ , gint32 check_flags // IN
+ )
+{
+ const char *vcodec_name_chunk;
+ gint32 fileSize;
+ gint32 bytesRead;
+
+ *video_frame_chunk_size = 0;
+ *video_frame_chunk_hdr_size = 0;
+ vcodec_name_chunk = NULL;
+
+ /* no need to check GAP_VID_CHCHK_FLAG_FULL_FRAME
+ * (single image fetch is always a full frame)
+ */
+
+ if (check_flags & GAP_VID_CHCHK_FLAG_MPEG_INTEGRITY)
+ {
+ if(gap_debug)
+ {
+ printf("p_chunk_fetch_from_single_image: single image never fits MPEG_INTEGRITY (file:%s)\n"
+ ,videofile
+ );
+ }
+ return (FALSE);
+ }
+
+ fileSize = gap_file_get_filesize(videofile);
+ if (fileSize > video_frame_chunk_maxsize)
+ {
+ if(gap_debug)
+ {
+ printf("p_chunk_fetch_from_single_image: fileSize:%d biiger than max chunk buffer:%d (file:%s)\n"
+ ,(int)fileSize
+ ,(int)video_frame_chunk_maxsize
+ ,videofile
+ );
+ }
+ return (FALSE);
+ }
+
+ if (check_flags & GAP_VID_CHCHK_FLAG_JPG)
+ {
+
+ bytesRead = gap_file_load_file_segment(videofile
+ ,video_frame_chunk_data
+ ,0 /* seek_index, start byte of datasegment in file */
+ ,16 /* segment size in byets (must be a multiple of 4) */
+ );
+ if(TRUE != GVA_util_check_jpg_picture( video_frame_chunk_data
+ , 32 /* video_frame_chunk_size */
+ , 0 /* max_check_size */
+ , video_frame_chunk_hdr_size))
+ {
+ if(gap_debug)
+ {
+ printf("p_chunk_fetch_from_single_image: not a JPEG file (file:%s)\n"
+ ,videofile
+ );
+ }
+ return (FALSE);
+ }
+ vcodec_name_chunk = "JPEG";
+ }
+
+ if (check_flags & GAP_VID_CHCHK_FLAG_PNG)
+ {
+
+ bytesRead = gap_file_load_file_segment(videofile
+ ,video_frame_chunk_data
+ ,0 /* seek_index, start byte of datasegment in file */
+ ,16 /* segment size in byets (must be a multiple of 4) */
+ );
+ if(TRUE != GVA_util_check_png_picture( video_frame_chunk_data
+ , 32 /* video_frame_chunk_size */
+ , 0 /* max_check_size */
+ , video_frame_chunk_hdr_size))
+ {
+ if(gap_debug)
+ {
+ printf("p_chunk_fetch_from_single_image: not a PNG file (file:%s)\n"
+ ,videofile
+ );
+ }
+ return (FALSE);
+ }
+ vcodec_name_chunk = "PNG ";
+ }
+
+ if (vcodec_name_chunk == NULL)
+ {
+ if (check_flags & GAP_VID_CHCHK_FLAG_VCODEC_NAME)
+ {
+ vcodec_name_chunk = NULL; // TODO p_get_vcodec_name_by_magic_number ....
+ }
+ }
+
+ if (TRUE != p_check_vcodec_name(check_flags
+ ,vcodec_list
+ ,vcodec_name_chunk))
+ {
+ if(gap_debug)
+ {
+ p_debug_print_vcodec_missmatch(videofile, vcodec_list, vcodec_name_chunk);
+ }
+ return (FALSE);
+ }
+
+ bytesRead = gap_file_load_file_segment(videofile
+ ,video_frame_chunk_data
+ ,0 /* seek_index, start byte of datasegment in file */
+ ,fileSize /* segment size in byets (must be a multiple of 4) */
+ );
+ if (bytesRead != fileSize)
+ {
+ printf("p_chunk_fetch_from_single_image: ERROR failed reading %d bytes from file. (bytesRead:%d) (file:%s)\n"
+ ,(int)fileSize
+ ,(int)bytesRead
+ ,videofile
+ );
+ return (FALSE);
+ }
+
+ *video_frame_chunk_size = bytesRead;
+ return (TRUE);
+
+} /* end p_chunk_fetch_from_single_image */
+
+
+
+
+
+/* ----------------------------------------------------
+ * p_check_chunk_fetch_possible
+ * ----------------------------------------------------
+ * This procedure checks the preconditions for a possible
+ * fetch of already compresses frame chunk.
+ * (a frame chunk can be one raw frame chunk fetched from a videofile
+ * or a single image frame file that shall be loaded 1:1 into memory)
+ * - there is only 1 videoinput track at this master_frame_nr
+ * - the videoframe must match 1:1 in size
+ * - there are no transformations (opacity, offsets ....)
+ *
+ * return the name of the input videofile if preconditions are OK,
+ * or NULL if not.
+ */
+static char *
+p_check_chunk_fetch_possible(GapStoryRenderVidHandle *vidhand
+ , gint32 master_frame_nr /* starts at 1 */
+ , gint32 vid_width /* desired Video Width in pixels */
+ , gint32 vid_height /* desired Video Height in pixels */
+ , gint32 *video_frame_nr /* OUT: corresponding frame number in the input video */
+ , GapStoryRenderFrameRangeElem **frn_elem /* OUT: pointer to relevant frame range element */
+ )
+{
+ gint l_track;
+ gint32 l_track_min;
+ gint32 l_track_max;
+ gchar *l_framename;
+ gchar *l_videofile;
+ gdouble l_opacity;
+ gdouble l_scale_x;
+ gdouble l_scale_y;
+ gdouble l_move_x;
+ gdouble l_move_y;
+ GapStoryRenderFrameRangeElem *l_frn_elem_2;
+
+ gint32 l_localframe_index;
+ gint32 l_local_stepcount;
+ gboolean l_keep_proportions;
+ gboolean l_fit_width;
+ gboolean l_fit_height;
+ GapStoryRenderFrameType l_frn_type;
+ char *l_trak_filtermacro_file;
+ gdouble l_red_f;
+ gdouble l_green_f;
+ gdouble l_blue_f;
+ gdouble l_alpha_f;
+ gint l_cnt_active_tracks;
+
+
+ *video_frame_nr = -1;
+ *frn_elem = NULL;
+
+ l_videofile = NULL;
+ l_cnt_active_tracks = 0;
+
+ p_find_min_max_vid_tracknumbers(vidhand->frn_list, &l_track_min, &l_track_max);
+
+ /* findout if there is just one input track from type videofile
+ * (that possibly could be fetched as comressed videoframe_chunk
+ * and passed 1:1 to the calling encoder)
+ */
+ for(l_track = MIN(GAP_STB_MAX_VID_INTERNAL_TRACKS, l_track_max); l_track >= MAX(0, l_track_min); l_track--)
+ {
+ l_framename = p_fetch_framename(vidhand->frn_list
+ , master_frame_nr /* starts at 1 */
+ , l_track
+ , &l_frn_type
+ , &l_trak_filtermacro_file
+ , &l_localframe_index /* used for ANIMIMAGE and Videoframe Number, -1 for all other types */
+ , &l_local_stepcount
+ , &l_keep_proportions
+ , &l_fit_width
+ , &l_fit_height
+ , &l_red_f
+ , &l_green_f
+ , &l_blue_f
+ , &l_alpha_f
+ , &l_opacity /* output opacity 0.0 upto 1.0 */
+ , &l_scale_x /* output 0.0 upto 10.0 where 1.0 is 1:1 */
+ , &l_scale_y /* output 0.0 upto 10.0 where 1.0 is 1:1 */
+ , &l_move_x /* output -1.0 upto 1.0 where 0.0 is centered */
+ , &l_move_y /* output -1.0 upto 1.0 where 0.0 is centered */
+ , &l_frn_elem_2 /* output selected to the relevant framerange element */
+ );
+
+ if(gap_debug)
+ {
+ printf("l_track:%d l_frn_type:%d\n", (int)l_track, (int)l_frn_type);
+ }
+
+ if(l_frn_type != GAP_FRN_SILENCE)
+ {
+ l_cnt_active_tracks++;
+ }
+
+ if((l_framename) || (l_frn_type == GAP_FRN_COLOR))
+ {
+ if(l_framename)
+ {
+ if((l_frn_type == GAP_FRN_MOVIE)
+ || (l_frn_type == GAP_FRN_IMAGE)
+ || (l_frn_type == GAP_FRN_FRAMES))
+ {
+ if(l_cnt_active_tracks == 1)
+ {
+ /* check for transformations */
+ if((l_opacity == 1.0)
+ && (l_scale_x == 1.0)
+ && (l_scale_y == 1.0)
+ && (l_move_x == 0.0)
+ && (l_move_y == 0.0)
+ && (l_fit_width)
+ && (l_fit_height)
+ && (!l_keep_proportions)
+ && (l_frn_elem_2->flip_request == GAP_STB_FLIP_NONE)
+ && (l_frn_elem_2->mask_name == NULL)
+ && (l_trak_filtermacro_file == NULL))
+ {
+ if(gap_debug)
+ {
+ printf("gap_story_render_fetch_composite_image_or_chunk: video:%s\n", l_framename);
+ }
+ l_videofile = g_strdup(l_framename);
+ *video_frame_nr = l_localframe_index;
+ *frn_elem = l_frn_elem_2;
+ }
+ else
+ {
+ if(gap_debug)
+ {
+ printf("gap_story_render_fetch_composite_image_or_chunk: there are transformations\n");
+ }
+ /* there are transformations, cant use compressed frame */
+ l_videofile = NULL;
+ break;
+ }
+ }
+ else
+ {
+ if(gap_debug)
+ {
+ printf("gap_story_render_fetch_composite_image_or_chunk: 2 or more videotracks found\n");
+ }
+ l_videofile = NULL;
+ break;
+ }
+ }
+ else
+ {
+ l_videofile = NULL;
+ break;
+ }
+
+ g_free(l_framename);
+ }
+ else
+ {
+ l_videofile = NULL;
+ break;
+ }
+ }
+ /* else: (vid track not used) continue */
+
+ } /* end for loop over all video tracks */
+
+ return(l_videofile);
+} /* end p_check_chunk_fetch_possible */
+
+
+
+/* ----------------------------------------------------
+ * p_check_basic_chunk_fetch_conditions
+ * ----------------------------------------------------
+ * check some basic conditions for raw frame chunk fetching.
+ * return FALSE if fetch not possible.
+ */
+static gboolean
+p_check_basic_chunk_fetch_conditions(gint32 check_flags
+ , gint32 vid_width
+ , gint32 vid_height
+ , GapStoryRenderFrameRangeElem *frn_elem)
+{
+ if(GVA_has_video_chunk_proc(frn_elem->gvahand) != TRUE)
+ {
+ if(gap_debug)
+ {
+ printf("p_check_basic_chunk_fetch_conditions: Decoder does not support raw chunk fetching\n");
+ }
+ /* chunk fetch not possible because the used decoder implementation
+ * does not provide the required support.
+ */
+ return (FALSE);
+ }
+
+ if((check_flags & GAP_VID_CHCHK_FLAG_SIZE) != 0)
+ {
+ if((frn_elem->gvahand->width != vid_width)
+ || (frn_elem->gvahand->height != vid_height) )
+ {
+ if(gap_debug)
+ {
+ printf("p_check_basic_chunk_fetch_conditions: size (%d x %d) does not match required size (%d x %d)\n"
+ , (int)frn_elem->gvahand->width
+ , (int)frn_elem->gvahand->height
+ , (int)vid_width
+ , (int)vid_height
+ );
+ }
+ return (FALSE);
+ }
+ }
+
+ return (TRUE);
+} /* end p_check_basic_chunk_fetch_conditions */
+
+
+/* ----------------------------------------------------
+ * p_check_and_open_video_handle
+ * ----------------------------------------------------
+ *
+ */
+static void
+p_check_and_open_video_handle(GapStoryRenderFrameRangeElem *frn_elem
+ , GapStoryRenderVidHandle *vidhand
+ , gint32 master_frame_nr
+ , const gchar *videofile
+ )
+{
+ if(frn_elem->gvahand == NULL)
+ {
+ /* before we open a new GVA videohandle, lets check
+ * if another element has already opened this videofile,
+ * and reuse the already open gvahand handle if possible
+ */
+ frn_elem->gvahand = p_try_to_steal_gvahand(vidhand
+ , master_frame_nr
+ , frn_elem->basename
+ , frn_elem->exact_seek
+ );
+ if(frn_elem->gvahand == NULL)
+ {
+ if(vidhand->preferred_decoder)
+ {
+ frn_elem->gvahand = GVA_open_read_pref(videofile
+ , frn_elem->seltrack
+ , 1 /* aud_track */
+ , vidhand->preferred_decoder
+ , FALSE /* use MMX if available (disable_mmx == FALSE) */
+ );
+ }
+ else
+ {
+ frn_elem->gvahand = GVA_open_read(videofile
+ ,frn_elem->seltrack
+ ,1 /* aud_track */
+ );
+ }
+ if(frn_elem->gvahand)
+ {
+ GVA_set_fcache_size(frn_elem->gvahand, GAP_STB_RENDER_GVA_FRAMES_TO_KEEP_CACHED);
+
+ frn_elem->gvahand->do_gimp_progress = vidhand->do_gimp_progress;
+ if(frn_elem->exact_seek == 1)
+ {
+ /* configure the GVA Procedures for exact (but slow) seek emulaion */
+ frn_elem->gvahand->emulate_seek = TRUE;
+ }
+ }
+ }
+ }
+} /* end p_check_and_open_video_handle */
+
+
+/* ----------------------------------------------------
+ * p_debug_dump_chunk_to_file
+ * ----------------------------------------------------
+ *
+ */
+ static void
+ p_debug_dump_chunk_to_file(const unsigned char *video_frame_chunk_data
+ , gint32 video_frame_chunk_size
+ , gint32 video_frame_nr
+ , gint32 master_frame_nr
+ )
+ {
+ FILE *fp;
+ char *fname;
+ char *l_env;
+ gint32 l_dump_chunk_frames;
+
+ l_dump_chunk_frames = 0;
+ l_env = g_getenv("GAP_DUMP_FRAME_CHUNKS");
+ if(l_env)
+ {
+ l_dump_chunk_frames = atol(l_env);
+ }
+
+ if (master_frame_nr > l_dump_chunk_frames)
+ {
+ return;
+ }
+
+ fname = g_strdup_printf("zz_chunk_data_%06d.dmp", (int)video_frame_nr);
+
+ printf("DEBUG: SAVING fetched raw chunk to file:%s\n", fname);
+
+ fp = g_fopen(fname, "wb");
+ if(fp)
+ {
+ fwrite(video_frame_chunk_data, video_frame_chunk_size, 1, fp);
+ fclose(fp);
+ }
+
+ g_free(fname);
+ } /* end p_debug_dump_chunk_to_file */
+
+
+
+/* ----------------------------------------------------
+ * p_story_attempt_fetch_chunk
+ * ----------------------------------------------------
+ * fetch the frame as uncompressed chunk into the buffer
+ * that is provide by the caller
+ * (via parameter video_frame_chunk_data at size video_frame_chunk_maxsize)
+ * if not possible return NULL.
+ * else return the name of the referenced videofile.
+ */
+static gchar*
+p_story_attempt_fetch_chunk(GapStoryRenderVidHandle *vidhand
+ , gint32 master_frame_nr /* starts at 1 */
+ , gint32 vid_width /* desired Video Width in pixels */
+ , gint32 vid_height /* desired Video Height in pixels */
+ , GapCodecNameElem *vcodec_list /* IN: list of video_codec names that are compatible to the calling encoder program */
+ , unsigned char *video_frame_chunk_data /* OUT: */
+ , gint32 *video_frame_chunk_size /* OUT: total size of frame (may include a videoformat specific frameheader)*/
+ , gint32 video_frame_chunk_maxsize /* IN: sizelimit (larger chunks are not fetched) */
+ , gdouble master_framerate
+ , gint32 max_master_frame_nr /* the number of frames that will be encode in total */
+ , gint32 *video_frame_chunk_hdr_size /* OUT: size of videoformat specific frameheader (0 if has no hdr) */
+ , gint32 check_flags /* IN: combination of GAP_VID_CHCHK_FLAG_* flag values */
+
+ , gboolean *last_fetch_was_compressed_chunk
+ , const char *last_videofile
+
+ )
+{
+#define GAP_MPEG_ASSUMED_REFERENCE_DISTANCE 3
+ static gint32 last_video_frame_nr = -1;
+
+ gchar *l_framename;
+ gchar *l_videofile;
+ GapStoryRenderFrameRangeElem *l_frn_elem;
+ GapStoryRenderFrameRangeElem *l_frn_elem_2;
+ GapStoryRenderFrameType l_curr_frn_type;
+
+ gint32 l_video_frame_nr;
+
+
+ l_frn_elem = NULL;
+ *video_frame_chunk_size = 0;
+ *video_frame_chunk_hdr_size = 0; /* assume chunk contains no frame header */
+
+
+ l_videofile = NULL; /* NULL: also used as flag for "MUST fetch regular uncompressed frame" */
+ l_framename = NULL;
+ l_video_frame_nr = 1;
+
+
+ l_curr_frn_type = GAP_FRN_SILENCE;
+
+ l_videofile = p_check_chunk_fetch_possible(vidhand
+ , master_frame_nr
+ , vid_width
+ , vid_height
+ , &l_video_frame_nr
+ , &l_frn_elem
+ );
+
+ if((l_videofile) && (l_frn_elem) )
+ {
+ l_curr_frn_type = l_frn_elem->frn_type;
+ if (l_curr_frn_type != GAP_FRN_MOVIE)
+ {
+ gboolean l_singleFetchOk;
+ if(gap_debug)
+ {
+ printf("p_story_attempt_fetch_chunk: MASTER_FRAME_NR: %d refers to imagefile :%s \n"
+ , (int)master_frame_nr
+ , l_videofile
+ );
+ }
+ last_video_frame_nr = -1;
+ l_singleFetchOk = p_chunk_fetch_from_single_image(l_videofile
+ , video_frame_chunk_data
+ , video_frame_chunk_maxsize
+ , video_frame_chunk_hdr_size
+ , video_frame_chunk_size
+ , vcodec_list
+ , check_flags
+ );
+ if (l_singleFetchOk == TRUE)
+ {
+ /* passed all requested checks */
+ return(l_videofile);
+ }
+ g_free(l_videofile);
+ l_videofile = NULL;
+ return (NULL);
+ }
+ }
+
+
+ if(l_curr_frn_type == GAP_FRN_MOVIE)
+ {
+ if(gap_debug)
+ {
+ printf("p_story_attempt_fetch_chunk: MASTER_FRAME_NR: %d refers to videofile :%s \n"
+ , (int)master_frame_nr
+ , l_videofile
+ );
+ }
+
+ p_check_and_open_video_handle(l_frn_elem, vidhand, master_frame_nr, l_videofile);
+
+ if(l_frn_elem->gvahand)
+ {
+ /* check if framesize matches 1:1 to output video size
+ * and if the videodecoder does support a read procedure for compressed vodeo chunks
+ * TODO: should also check for compatible vcodec_name
+ * (cannot check that, because libmpeg3 does not deliver vcodec_name information
+ * and there is no implementation to fetch uncompressed chunks in other decoders)
+ */
+
+ if (p_check_basic_chunk_fetch_conditions(check_flags, vid_width, vid_height, l_frn_elem) != TRUE)
+ {
+ if(gap_debug)
+ {
+ printf("p_story_attempt_fetch_chunk: MASTER_FRAME_NR: %d basic conditions NOT OK (no chunk fetch possible)\n"
+ ,(int)master_frame_nr
+ );
+ }
+ }
+ else
+ {
+ t_GVA_RetCode l_fcr;
+
+ if(gap_debug)
+ {
+ printf("p_story_attempt_fetch_chunk: MASTER_FRAME_NR: %d video_frame_nr:%d performing CHUNK fetch\n"
+ ,(int)master_frame_nr
+ ,(int)l_video_frame_nr
+ );
+ }
+
+ /* FETCH compressed video chunk
+ * (on successful fetch the chunk contains (at least) one frame, and may start with
+ * MPEG typical sequence header and/or GOP header, Picture Header
+ */
+ l_fcr = GVA_get_video_chunk(l_frn_elem->gvahand
+ , l_video_frame_nr
+ , video_frame_chunk_data
+ , video_frame_chunk_size
+ , video_frame_chunk_maxsize
+ );
+
+ if(gap_debug)
+ {
+ printf("p_story_attempt_fetch_chunk: AFTER CHUNK fetch max:%d chunk_data:%d chunk_size:%d\n"
+ ,(int)video_frame_chunk_maxsize
+ ,(int)video_frame_chunk_data
+ ,(int)*video_frame_chunk_size
+ );
+ }
+
+ if(l_fcr == GVA_RET_OK)
+ {
+ gint l_frame_type;
+ gint32 check_flags_result;
+ gint32 check_flags_mask;
+ gboolean is_mpeg_integrity_check_done;
+ const char *vcodec_name_chunk;
+
+ vcodec_name_chunk = GVA_get_codec_name(l_frn_elem->gvahand
+ ,GVA_VIDEO_CODEC
+ ,l_frn_elem->seltrack
+ );
+
+ is_mpeg_integrity_check_done = FALSE;
+ check_flags_result = check_flags & GAP_VID_CHCHK_FLAG_SIZE;
+
+ p_check_flags_for_matching_vcodec(check_flags, &check_flags_result
+ , l_videofile
+ , vcodec_list
+ , vcodec_name_chunk
+ );
+
+ if(vcodec_name_chunk)
+ {
+ g_free(vcodec_name_chunk);
+ vcodec_name_chunk = NULL;
+ }
+
+ l_frame_type = GVA_util_check_mpg_frame_type(video_frame_chunk_data
+ ,*video_frame_chunk_size
+ );
+ if(gap_debug)
+ {
+ printf("\nfetched CHUNK with l_frame_type %d(1=I,2=P,3=B)\n", (int)l_frame_type);
+ }
+
+ /* debug code: dump first video chunks to file(s) */
+ p_debug_dump_chunk_to_file(video_frame_chunk_data
+ , *video_frame_chunk_size
+ , l_video_frame_nr
+ , master_frame_nr
+ );
+
+ if (l_frame_type != GVA_MPGFRAME_UNKNOWN)
+ {
+ /* known MPEG specific framehaedr information is present
+ * (typical when chunk was read via libmpeg3)
+ * in this case try to fix timecode information in the header.
+ */
+ GVA_util_fix_mpg_timecode(video_frame_chunk_data
+ ,*video_frame_chunk_size
+ ,master_framerate
+ ,master_frame_nr
+ );
+ *video_frame_chunk_hdr_size = GVA_util_calculate_mpeg_frameheader_size(video_frame_chunk_data
+ , *video_frame_chunk_size
+ );
+ }
+
+ check_flags_mask = check_flags & (GAP_VID_CHCHK_FLAG_MPEG_INTEGRITY | GAP_VID_CHCHK_FLAG_FULL_FRAME);
+ if ((l_frame_type == GVA_MPGFRAME_I_TYPE)
+ && (check_flags_mask))
+ {
+ is_mpeg_integrity_check_done = TRUE;
+ /* intra frame has no dependencies to other frames
+ * can use that frame type at any place in an MPEG stream
+ * (or save it as JPEG)
+ */
+ *last_fetch_was_compressed_chunk = TRUE;
+
+ if(gap_debug)
+ {
+ printf("\nReuse I-FRAME at %06d,", (int)master_frame_nr);
+ }
+
+ check_flags_result |= check_flags_mask;
+ }
+
+ check_flags_mask = check_flags & (GAP_VID_CHCHK_FLAG_JPG | GAP_VID_CHCHK_FLAG_FULL_FRAME | GAP_VID_CHCHK_FLAG_MPEG_INTEGRITY);
+ if (check_flags_mask)
+ {
+ if(TRUE == GVA_util_check_jpg_picture( video_frame_chunk_data
+ , *video_frame_chunk_size
+ , 32 /* max_check_size */
+ , video_frame_chunk_hdr_size))
+ {
+ check_flags_result |= check_flags_mask;
+ }
+ }
+
+ check_flags_mask = check_flags & (GAP_VID_CHCHK_FLAG_PNG | GAP_VID_CHCHK_FLAG_FULL_FRAME);
+ if (check_flags_mask)
+ {
+ if(TRUE == GVA_util_check_png_picture( video_frame_chunk_data
+ , *video_frame_chunk_size
+ , 32 /* max_check_size */
+ , video_frame_chunk_hdr_size))
+ {
+ check_flags_result |= check_flags_mask;
+ }
+ }
+
+ check_flags_mask = check_flags & GAP_VID_CHCHK_FLAG_MPEG_INTEGRITY;
+ if ((l_frame_type == GVA_MPGFRAME_P_TYPE)
+ && (check_flags_mask))
+ {
+ is_mpeg_integrity_check_done = TRUE;
+ /* predicted frame has dependencies to the previous intra frame
+ * can use that frame if fetch sequence contains previous i frame
+ */
+ if(last_videofile)
+ {
+ /* check if frame is the next in sequence in the same videofile */
+ if((strcmp(l_videofile, last_videofile) == 0)
+ && (l_video_frame_nr == last_video_frame_nr +1))
+ {
+ *last_fetch_was_compressed_chunk = TRUE;
+
+ if(gap_debug)
+ {
+ printf("P,");
+ // printf(" Reuse P-FRAME Chunk at %06d\n", (int)master_frame_nr);
+ }
+ check_flags_result |= check_flags_mask;
+ }
+ }
+ }
+
+ check_flags_mask = check_flags & GAP_VID_CHCHK_FLAG_MPEG_INTEGRITY;
+ if (((l_frame_type == GVA_MPGFRAME_B_TYPE)
+ || (l_frame_type == GVA_MPGFRAME_P_TYPE))
+ && (is_mpeg_integrity_check_done != TRUE)
+ && (check_flags_mask))
+ {
+ is_mpeg_integrity_check_done = TRUE;
+ /* bi-directional predicted frame has dependencies both to
+ * the previous intra frame or p-frame and to the following i or p-frame.
+ *
+ * can use that frame if fetch sequence contains previous i frame
+ * and fetch will continue until the next i or p frame.
+ *
+ * we do a simplified check if the next few (say 3) frames in storyboard sequence
+ * will fetch the next (3) frames in videofile sequence from the same videofile.
+ * this is just a guess, but maybe sufficient in most cases.
+ */
+ if(last_videofile)
+ {
+ gboolean l_bframe_ok;
+
+ l_bframe_ok = FALSE;
+
+ /* check if frame is the next in sequence in the same videofile */
+ if((strcmp(l_videofile, last_videofile) == 0)
+ && (l_video_frame_nr == last_video_frame_nr +1))
+ {
+ /* B-frame are not reused at the last few frames in the output video.
+ * (unresolved references to following p or i frames of the
+ * input video could be the result)
+ */
+ if(master_frame_nr + GAP_MPEG_ASSUMED_REFERENCE_DISTANCE <= max_master_frame_nr)
+ {
+ gint ii;
+ gint32 l_next_video_frame_nr;
+ char *l_next_videofile;
+
+ l_bframe_ok = TRUE; /* now assume that B-frame may be used */
+
+ /* look ahead if the next few fetches in storyboard sequence
+ * will deliver the next frames from the same inputvideo
+ * in ascending input_video sequence at stepsize 1
+ * (it is assumed that the referenced P or I frame
+ * will be fetched in later calls then)
+ */
+ for(ii=1; ii <= GAP_MPEG_ASSUMED_REFERENCE_DISTANCE; ii++)
+ {
+ gboolean is_next_video_the_same;
+
+ is_next_video_the_same = FALSE;
+ l_next_videofile = p_check_chunk_fetch_possible(vidhand
+ , (master_frame_nr + ii)
+ , vid_width
+ , vid_height
+ , &l_next_video_frame_nr
+ , &l_frn_elem_2
+ );
+ if(l_next_videofile)
+ {
+ if (strcmp(l_next_videofile, l_videofile) == 0)
+ {
+ is_next_video_the_same = TRUE;
+ }
+ g_free(l_next_videofile);
+ }
+ if((is_next_video_the_same == TRUE) && (l_frn_elem_2))
+ {
+ if((l_next_video_frame_nr != l_video_frame_nr +ii)
+ || (l_frn_elem_2->frn_type != GAP_FRN_MOVIE))
+ {
+ l_bframe_ok = FALSE;
+ }
+ }
+ else
+ {
+ l_bframe_ok = FALSE;
+ }
+ if(!l_bframe_ok)
+ {
+ break;
+ }
+
+ } /* end for loop (look ahed next few frames in storyboard sequence) */
+ }
+
+ if(gap_debug)
+ {
+ if(l_bframe_ok) printf("Look Ahead B-FRAME OK to copy\n");
+ else printf("Look Ahead B-FRAME dont USE\n");
+ }
+
+ if(l_bframe_ok)
+ {
+ *last_fetch_was_compressed_chunk = TRUE;
+
+ if(gap_debug)
+ {
+ if (l_frame_type == GVA_MPGFRAME_B_TYPE)
+ {
+ printf("B,");
+ }
+ else
+ {
+ printf("p,");
+ }
+ printf(" Reuse B-FRAME Chunk at %06d\n", (int)master_frame_nr);
+ }
+ check_flags_result |= check_flags_mask;
+ }
+ }
+ }
+ }
+
+ last_video_frame_nr = l_video_frame_nr;
+
+ if(check_flags_result == check_flags)
+ {
+ if(gap_debug)
+ {
+ printf("oo OK, Reuse of fetched CHUNK type: %d (1=I/2=P/3=B) masterFrameNr:%d frame_nr:%d (last_frame_nr:%d) \n"
+ " check_flags_result:%d (requested check_flags:%d)\n"
+ ,(int)l_frame_type
+ ,(int)master_frame_nr
+ ,(int)l_video_frame_nr
+ ,(int)last_video_frame_nr
+ ,(int)check_flags_result
+ ,(int)check_flags
+ );
+ }
+ /* passed all requested checks */
+ return(l_videofile);
+ }
+
+ if(gap_debug)
+ {
+ printf("** sorry, no reuse of fetched CHUNK type: %d (1=I/2=P/3=B) masterFrameNr:%d frame_nr:%d (last_frame_nr:%d) \n"
+ " check_flags_result:%d (requeste) check_flags:%d\n"
+ ,(int)l_frame_type
+ ,(int)master_frame_nr
+ ,(int)l_video_frame_nr
+ ,(int)last_video_frame_nr
+ ,(int)check_flags_result
+ ,(int)check_flags
+ );
+ }
+
+ }
+ else
+ {
+ last_video_frame_nr = -1;
+ if(gap_debug)
+ {
+ printf("**# sorry, no reuse fetch failed frame_nr:%d (last_frame_nr:%d)\n"
+ ,(int)l_video_frame_nr
+ ,(int)last_video_frame_nr
+ );
+ }
+ }
+ }
+ }
+ }
+
+ *last_fetch_was_compressed_chunk = FALSE;
+ *video_frame_chunk_size = 0;
+
+ if (l_videofile != NULL)
+ {
+ g_free(l_videofile);
+ l_videofile = NULL;
+ }
+ return (NULL); /* Chunk fetch Not possible */
+
+} /* end p_story_attempt_fetch_chunk */
+
+
+
+
+/* ----------------------------------------------------
+ * gap_story_render_fetch_composite_image_or_chunk
+ * ----------------------------------------------------
+ *
+ * fetch composite VIDEO Image at a given master_frame_nr
+ * within a storyboard framerange list
+ *
+ * if desired (and possible) try directly fetch the (already compressed) Frame chunk from
+ * an input videofile for the master_frame_nr.
+ *
+ * This procedure is typically used in encoders that support lossless video cut.
+ *
+ * the compressed fetch depends on following conditions:
+ * - dont_recode_flag == TRUE
+ * - there is only 1 videoinput track at this master_frame_nr
+ * - the videodecoder must support a read_video_chunk procedure
+ * (libmpeg3 has this support, for the libavformat the support is available vie the gap video api)
+ * TODO: for future releases should also check for the same vcodec_name)
+ * - the videoframe must match 1:1 in size
+ * - there are no transformations (opacity, offsets ....)
+ * - there are no filtermacros to perform on the fetched frame
+ *
+ * check_flags:
+ * force checks if corresponding bit value is set. Supportet Bit values are:
+ * GAP_VID_CHCHK_FLAG_SIZE check if width and height are equal
+ * GAP_VID_CHCHK_FLAG_MPEG_INTEGRITY checks for MPEG P an B frames if the sequence of fetched frames
+ * also includes the refered I frame (before or after the current
+ * handled frame)
+ * GAP_VID_CHCHK_FLAG_JPG check if fetched cunk is a jpeg encoded frame.
+ * (typical for MPEG I frames)
+ * GAP_VID_CHCHK_FLAG_VCODEC_NAME check for a compatible vcodec_name
+ *
+ +
+ * RETURN TRUE on success, FALSE on ERRORS
+ * if an already compressed video_frame_chunk was fetched then return the size of the chunk
+ * in the *video_frame_chunk_size OUT Parameter.
+ * (both *image_id an *layer_id will deliver -1 in that case)
+ * if a composite image was fetched deliver its id in the *image_id OUT parameter
+ * and the id of the only layer in the *layer_id OUT Parameter
+ * the *force_keyframe OUT parameter tells the calling encoder to write an I-Frame
+ * (*video_frame_chunk_size will deliver 0 in that case)
+ */
+gboolean
+gap_story_render_fetch_composite_image_or_chunk(GapStoryRenderVidHandle *vidhand
+ , gint32 master_frame_nr /* starts at 1 */
+ , gint32 vid_width /* desired Video Width in pixels */
+ , gint32 vid_height /* desired Video Height in pixels */
+ , char *filtermacro_file /* NULL if no filtermacro is used */
+ , gint32 *layer_id /* output: Id of the only layer in the composite image */
+
+ , gint32 *image_id /* output: Id of the only layer in the composite image */
+ , gboolean dont_recode_flag /* IN: TRUE try to fetch comressed chunk if possible */
+ , GapCodecNameElem *vcodec_list /* IN: list of video_codec names that are compatible to the calling encoder program */
+ , gboolean *force_keyframe /* OUT: the calling encoder should encode an I-Frame */
+ , unsigned char *video_frame_chunk_data /* OUT: */
+ , gint32 *video_frame_chunk_size /* OUT: total size of frame (may include a videoformat specific frameheader)*/
+ , gint32 video_frame_chunk_maxsize /* IN: sizelimit (larger chunks are not fetched) */
+ , gdouble master_framerate
+ , gint32 max_master_frame_nr /* the number of frames that will be encode in total */
+ , gint32 *video_frame_chunk_hdr_size /* OUT: size of videoformat specific frameheader (0 if has no hdr) */
+ , gint32 check_flags /* IN: combination of GAP_VID_CHCHK_FLAG_* flag values */
+ )
+{
+#define GAP_MPEG_ASSUMED_REFERENCE_DISTANCE 3
+ static char *last_videofile = NULL;
+ static gboolean last_fetch_was_compressed_chunk = FALSE;
+
+ gchar *l_videofile;
+ GapStoryRenderFrameRangeElem *l_frn_elem;
+
+ gboolean l_enable_chunk_fetch;
+
+
+ *image_id = -1;
+ *layer_id = -1;
+ *force_keyframe = FALSE;
+ l_frn_elem = NULL;
+ *video_frame_chunk_size = 0;
+ *video_frame_chunk_hdr_size = 0; /* assume chunk contains no frame header */
+ l_enable_chunk_fetch = dont_recode_flag;
+
+ if(gap_debug)
+ {
+ printf("gap_story_render_fetch_composite_image_or_chunk START master_frame_nr:%d %dx%d dont_recode:%d\n"
+ , (int)master_frame_nr
+ , (int)vid_width
+ , (int)vid_height
+ , (int)dont_recode_flag
+ );
+ }
+
+ l_videofile = NULL; /* NULL: also used as flag for "MUST fetch regular uncompressed frame" */
+
+ if(filtermacro_file)
+ {
+ if(*filtermacro_file != '\0')
+ {
+ if(gap_debug)
+ {
+ printf("chunk fetch disabled due to filtermacro procesing\n");
+ }
+ /* if a filtermacro_file is force disable chunk fetching */
+ l_enable_chunk_fetch = FALSE;
+ }
+ }
+
+ if (l_enable_chunk_fetch)
+ {
+ if(gap_debug)
+ {
+ printf("start check if chunk fetch is possible\n");
+ }
+
+ l_videofile = p_story_attempt_fetch_chunk(vidhand
+ , master_frame_nr
+ , vid_width
+ , vid_height
+ , vcodec_list
+ , video_frame_chunk_data
+ , video_frame_chunk_size
+ , video_frame_chunk_maxsize
+ , master_framerate
+ , max_master_frame_nr
+ , video_frame_chunk_hdr_size
+ , check_flags
+
+ , &last_fetch_was_compressed_chunk
+ , last_videofile
+ );
+ }
+
+ if(last_fetch_was_compressed_chunk)
+ {
+ *force_keyframe = TRUE;
+ }
+
+ /* keep the videofile name for the next call
+ * (for MPEG INTEGRITY checks that require continous sequence
+ * in the same referenced source video
+ */
+ if(last_videofile)
+ {
+ g_free(last_videofile);
+ }
+ last_videofile = l_videofile;
+
+
+ if(l_videofile != NULL)
+ {
+ /* chunk fetch was successful */
+ if(gap_debug)
+ {
+ printf("gap_story_render_fetch_composite_image_or_chunk: CHUNK fetch succsessful\n");
+ }
+ return(TRUE);
+ }
+ else
+ {
+ last_fetch_was_compressed_chunk = FALSE;
+ if(last_videofile)
+ {
+ g_free(last_videofile);
+ }
+ last_videofile = l_videofile;
+
+
+ if(gap_debug)
+ {
+ printf("gap_story_render_fetch_composite_image_or_chunk: CHUNK fetch not possible (doing frame fetch instead)\n");
+ }
+
+ *video_frame_chunk_size = 0;
+ *image_id = gap_story_render_fetch_composite_image(vidhand
+ , master_frame_nr /* starts at 1 */
+ , vid_width /* desired Video Width in pixels */
+ , vid_height /* desired Video Height in pixels */
+ , filtermacro_file /* NULL if no filtermacro is used */
+ , layer_id /* output: Id of the only layer in the composite image */
+ );
+ if(*image_id >= 0)
+ {
+ return(TRUE);
+ }
+ }
+
+ return(FALSE);
+
+} /* end gap_story_render_fetch_composite_image_or_chunk */
Modified: trunk/gap/gap_story_render_processor.c
==============================================================================
--- trunk/gap/gap_story_render_processor.c (original)
+++ trunk/gap/gap_story_render_processor.c Sun Jun 29 12:12:59 2008
@@ -484,7 +484,7 @@
p_debug_print_render_section_names(GapStoryRenderVidHandle *vidhand)
{
GapStoryRenderSection *section;
-
+
printf("\nDEBUG p_debug_print_render_section_names START\n");
for(section = vidhand->section_list; section != NULL; section = section->next)
{
@@ -1117,7 +1117,7 @@
);
fflush(stdout);
}
-
+
} /* end p_select_section_by_name */
@@ -1570,7 +1570,7 @@
printf("** INTERNAL ERROR parsing_section is NULL\n");
return;
}
-
+
frn_listend = vidhand->parsing_section->frn_list;
if (vidhand->parsing_section->frn_list == NULL)
{
@@ -2177,7 +2177,7 @@
/* ----------------------------------------------------
* p_storyboard_analyze
* ----------------------------------------------------
- * this procedure checks the specified storyboard (GapStoryBoard *stb) in memory
+ * this procedure checks the specified storyboard (GapStoryBoard *stb) in memory
* and converts all elements to the
* corresponding rangelist structures (audio or frames or attr tables)
* OUT: mainsection_frame_count is set to the number of frames in the main section
@@ -2242,7 +2242,7 @@
vidhand->section_list = NULL;
-
+
for(stb_section = stb->stb_section; stb_section != NULL; stb_section = stb_section->next)
{
if (stb_section == stb->mask_section)
@@ -2256,7 +2256,7 @@
continue;
}
vidhand->parsing_section = p_new_render_section(stb_section->section_name);
- p_append_render_section_to_vidhand(vidhand, vidhand->parsing_section);
+ p_append_render_section_to_vidhand(vidhand, vidhand->parsing_section);
/* clear video track attribute settings for all tracks.
* video attribute array holds current transition values (opacity, move and scale)
@@ -2270,7 +2270,7 @@
* The video attribute array must be cleared at start of each section.
*/
p_clear_vattr_array(vtarr);
-
+
/* count Elements in the current STB section and check highest video track number */
highest_vtrack = 0;
l_max_elems = 0;
@@ -2307,7 +2307,7 @@
*/
if (stb->master_vtrack1_is_toplayer)
{
- /* default order track 1 is the top layer
+ /* default order track 1 is the top layer
* 1 ==> 1
* 2 ==> 3
* 3 ==> 5
@@ -2337,7 +2337,7 @@
stb_elem->record_type = GAP_STBREC_VID_BLACKSECTION;
}
}
-
+
/* reset mask frame progress to 1 before processing each parsed clip.
* the mask_framecount handles clip internal start value for fetching layer mask frames
* in case were the scanned clip is splitted internally
@@ -2731,7 +2731,7 @@
} /* END Loop foreach Element in the current STB section */
-
+
if (stb_section->section_name == NULL)
{
gint vii;
@@ -2739,7 +2739,7 @@
*/
vidhand->frn_list = vidhand->parsing_section->frn_list;
vidhand->aud_list = vidhand->parsing_section->aud_list;
-
+
/* findout total frame_count of the main section
* (is the max frame_count of all tracks
* within the MAIN section)
@@ -2757,7 +2757,7 @@
} /* END loop for all sections of the storyboard */
-
+
} /* end p_storyboard_analyze */
@@ -2916,7 +2916,7 @@
p_new_render_section(const char *section_name)
{
GapStoryRenderSection *new_render_section;
-
+
new_render_section = g_new(GapStoryRenderSection, 1);
new_render_section->frn_list = NULL;
new_render_section->aud_list = NULL;
@@ -2941,7 +2941,7 @@
, GapStoryRenderSection *new_render_section)
{
GapStoryRenderSection *render_section;
-
+
for(render_section = vidhand->section_list; render_section != NULL; render_section = render_section->next)
{
if(render_section->next == NULL)
@@ -3038,7 +3038,7 @@
{
GapStoryElem *stb_elem;
vidhand->maskdef_elem = NULL; /* start with empty mask definition list */
-
+
if (stb_ptr->mask_section == NULL)
{
return;
@@ -3144,15 +3144,15 @@
gap_story_render_close_vid_handle(GapStoryRenderVidHandle *vidhand)
{
GapStoryRenderSection *render_section;
-
+
render_section = vidhand->section_list;
-
+
while(render_section != NULL)
{
GapStoryRenderSection *next_render_section;
-
+
next_render_section = render_section->next;
-
+
p_free_framerange_list(render_section->frn_list);
if (render_section->section_name != NULL)
@@ -3160,7 +3160,7 @@
g_free(render_section->section_name);
}
g_free(render_section);
-
+
render_section = next_render_section;
}
@@ -3544,7 +3544,7 @@
if (vidhand->section_list == NULL)
{
- /* make sure that the video handle always has a main section
+ /* make sure that the video handle always has a main section
* with section Name NULL.
* (this can occure if not directly created from a storyboard
* or when creating sub video handle for mask processing)
@@ -3552,7 +3552,7 @@
vidhand->section_list = p_new_render_section(NULL);
vidhand->parsing_section = vidhand->section_list;
render_section = vidhand->section_list;
-
+
if(gap_debug)
{
printf("p_open_video_handle_private: added default MAIN render_section\n");
@@ -4553,7 +4553,7 @@
gint32 th_bpp;
gint32 th_width;
gint32 th_height;
-
+
th_data = gap_story_convert_layer_to_RGB_thdata(l_layer_id
, &RAW_size
, &th_bpp
@@ -4580,625 +4580,6 @@
-/* ----------------------------------------------------
- * p_check_chunk_fetch_possible
- * ----------------------------------------------------
- * This procedure checks the preconditions for a possible
- * fetch of already compresses MPEG chunks.
- * - there is only 1 videoinput track at this master_frame_nr
- * - the videoframe must match 1:1 in size
- * - there are no transformations (opacity, offsets ....)
- *
- * return the name of the input videofile if preconditions are OK,
- * or NULL if not.
- */
-static char *
-p_check_chunk_fetch_possible(GapStoryRenderVidHandle *vidhand
- , gint32 master_frame_nr /* starts at 1 */
- , gint32 vid_width /* desired Video Width in pixels */
- , gint32 vid_height /* desired Video Height in pixels */
- , gint32 *video_frame_nr /* OUT: corresponding frame number in the input video */
- , GapStoryRenderFrameRangeElem **frn_elem /* OUT: pointer to relevant frame range element */
- )
-{
- gint l_track;
- gint32 l_track_min;
- gint32 l_track_max;
- gchar *l_framename;
- gchar *l_videofile;
- gdouble l_opacity;
- gdouble l_scale_x;
- gdouble l_scale_y;
- gdouble l_move_x;
- gdouble l_move_y;
- GapStoryRenderFrameRangeElem *l_frn_elem_2;
-
- gint32 l_localframe_index;
- gint32 l_local_stepcount;
- gboolean l_keep_proportions;
- gboolean l_fit_width;
- gboolean l_fit_height;
- GapStoryRenderFrameType l_frn_type;
- char *l_trak_filtermacro_file;
- gdouble l_red_f;
- gdouble l_green_f;
- gdouble l_blue_f;
- gdouble l_alpha_f;
-
-
- *video_frame_nr = -1;
- *frn_elem = NULL;
-
- l_videofile = NULL;
-
- p_find_min_max_vid_tracknumbers(vidhand->frn_list, &l_track_min, &l_track_max);
-
- /* findout if there is just one input track from type videofile
- * (that possibly could be fetched as comressed videoframe_chunk
- * and passed 1:1 to the calling encoder)
- */
- for(l_track = MIN(GAP_STB_MAX_VID_INTERNAL_TRACKS, l_track_max); l_track >= MAX(0, l_track_min); l_track--)
- {
- l_framename = p_fetch_framename(vidhand->frn_list
- , master_frame_nr /* starts at 1 */
- , l_track
- , &l_frn_type
- , &l_trak_filtermacro_file
- , &l_localframe_index /* used for ANIMIMAGE and Videoframe Number, -1 for all other types */
- , &l_local_stepcount
- , &l_keep_proportions
- , &l_fit_width
- , &l_fit_height
- , &l_red_f
- , &l_green_f
- , &l_blue_f
- , &l_alpha_f
- , &l_opacity /* output opacity 0.0 upto 1.0 */
- , &l_scale_x /* output 0.0 upto 10.0 where 1.0 is 1:1 */
- , &l_scale_y /* output 0.0 upto 10.0 where 1.0 is 1:1 */
- , &l_move_x /* output -1.0 upto 1.0 where 0.0 is centered */
- , &l_move_y /* output -1.0 upto 1.0 where 0.0 is centered */
- , &l_frn_elem_2 /* output selected to the relevant framerange element */
- );
-
- if(gap_debug) printf("l_track:%d l_frn_type:%d\n", (int)l_track, (int)l_frn_type);
-
-
- if((l_framename) || (l_frn_type == GAP_FRN_COLOR))
- {
- if(l_framename)
- {
- if(l_frn_type == GAP_FRN_MOVIE)
- {
- if(l_videofile == NULL)
- {
- /* check for transformations */
- if((l_opacity == 1.0)
- && (l_scale_x == 1.0)
- && (l_scale_y == 1.0)
- && (l_move_x == 0.0)
- && (l_move_y == 0.0)
- && (l_fit_width)
- && (l_fit_height)
- && (!l_keep_proportions)
- && (l_frn_elem_2->flip_request == GAP_STB_FLIP_NONE)
- && (l_frn_elem_2->mask_name == NULL)
-
-
- && (l_trak_filtermacro_file == NULL))
- {
- if(gap_debug) printf("gap_story_render_fetch_composite_image_or_chunk: video:%s\n", l_framename);
- l_videofile = g_strdup(l_framename);
- *video_frame_nr = l_localframe_index;
- *frn_elem = l_frn_elem_2;
- }
- else
- {
- if(gap_debug) printf("gap_story_render_fetch_composite_image_or_chunk: there are transformations\n");
- /* there are transformations, cant use compressed frame */
- l_videofile = NULL;
- break;
- }
- }
- else
- {
- if(gap_debug) printf("gap_story_render_fetch_composite_image_or_chunk: 2 or more videotracks found\n");
- l_videofile = NULL;
- break;
- }
- }
- else
- {
- l_videofile = NULL;
- break;
- }
-
- g_free(l_framename);
- }
- else
- {
- l_videofile = NULL;
- break;
- }
- }
- /* else: (vid track not used) continue */
-
- } /* end for loop over all video tracks */
-
- return(l_videofile);
-} /* end p_check_chunk_fetch_possible */
-
-
-/* ----------------------------------------------------
- * gap_story_render_fetch_composite_image_or_chunk
- * ----------------------------------------------------
- *
- * fetch composite VIDEO Image at a given master_frame_nr
- * within a storyboard framerange list
- *
- * if desired (and possible) try directly fetch the (already compressed) Frame chunk from
- * an input videofile for the master_frame_nr.
- *
- * the compressed fetch depends on following conditions:
- * - dont_recode_flag == TRUE
- * - there is only 1 videoinput track at this master_frame_nr
- * - the videodecoder must support a read_video_chunk procedure
- * (currently only libmpeg3 has this support
- * TODO: for future releases should also check for the same vcodec_name)
- * - the videoframe must match 1:1 in size
- * - there are no transformations (opacity, offsets ....)
- * - there are no filtermacros to perform on the fetched frame
- *
- * RETURN TRUE on success, FALSE on ERRORS
- * if an already compressed video_frame_chunk was fetched then return the size of the chunk
- * in the *video_frame_chunk_size OUT Parameter.
- * (both *image_id an *layer_id will deliver -1 in that case)
- * if a composite image was fetched deliver its id in the *image_id OUT parameter
- * and the id of the only layer in the *layer_id OUT Parameter
- * the *force_keyframe OUT parameter tells the calling encoder to write an I-Frame
- * (*video_frame_chunk_size will deliver 0 in that case)
- */
-gboolean
-gap_story_render_fetch_composite_image_or_chunk(GapStoryRenderVidHandle *vidhand
- , gint32 master_frame_nr /* starts at 1 */
- , gint32 vid_width /* desired Video Width in pixels */
- , gint32 vid_height /* desired Video Height in pixels */
- , char *filtermacro_file /* NULL if no filtermacro is used */
- , gint32 *layer_id /* output: Id of the only layer in the composite image */
-
- , gint32 *image_id /* output: Id of the only layer in the composite image */
- , gboolean dont_recode_flag /* IN: TRUE try to fetch comressed chunk if possible */
- , char *vcodec_name /* IN: video_codec used in the calling encoder program */
- , gboolean *force_keyframe /* OUT: the calling encoder should encode an I-Frame */
- , unsigned char *video_frame_chunk_data /* OUT: */
- , gint32 *video_frame_chunk_size /* OUT: */
- , gint32 video_frame_chunk_maxsize /* IN: */
- , gdouble master_framerate
- , gint32 max_master_frame_nr /* the number of frames that will be encode in total */
- )
-{
-#define GAP_MPEG_ASSUMED_REFERENCE_DISTANCE 3
- static gint32 last_video_frame_nr = -1;
- static char *last_videofile = NULL;
- static gboolean last_fetch_was_compressed_chunk = FALSE;
- static gboolean last_intra_frame_fetched = FALSE;
-
- gchar *l_framename;
- gchar *l_videofile_name;
- gchar *l_videofile;
- GapStoryRenderFrameRangeElem *l_frn_elem;
- GapStoryRenderFrameRangeElem *l_frn_elem_2;
-
- gint32 l_video_frame_nr;
-
-
- *image_id = -1;
- *layer_id = -1;
- *force_keyframe = FALSE;
- l_frn_elem = NULL;
- *video_frame_chunk_size = 0;
-
- if(gap_debug)printf("gap_story_render_fetch_composite_image_or_chunk START master_frame_nr:%d %dx%d dont_recode:%d\n"
- , (int)master_frame_nr
- , (int)vid_width
- , (int)vid_height
- , (int)dont_recode_flag
- );
-
- l_videofile = NULL; /* NULL: also used as flag for "MUST fetch regular uncompressed frame" */
- l_videofile_name = NULL;
- l_framename = NULL;
- l_video_frame_nr = 1;
-
- if(filtermacro_file)
- {
- if(*filtermacro_file != '\0')
- {
- dont_recode_flag = FALSE; /* if a filtermacro_file used we must force recode */
- }
- }
-
- /* first check if recode is forced by the calling program */
- if (dont_recode_flag)
- {
- l_videofile = p_check_chunk_fetch_possible(vidhand
- , master_frame_nr
- , vid_width
- , vid_height
- , &l_video_frame_nr
- , &l_frn_elem
- );
- if(l_videofile)
- {
- l_videofile_name = g_strdup(l_videofile);
- }
-
-
- if((l_videofile) && (l_frn_elem))
- {
- if(gap_debug) printf("gap_story_render_fetch_composite_image_or_chunk: ATTEMPT access l_videofile :%s \n", l_videofile);
-
- /* check if we can FETCH compressed video chunk */
- if(l_frn_elem->gvahand == NULL)
- {
- /* before we open a new GVA videohandle, lets check
- * if another element has already opened this videofile,
- * and reuse the already open gvahand handle if possible
- */
- l_frn_elem->gvahand = p_try_to_steal_gvahand(vidhand
- , master_frame_nr
- , l_frn_elem->basename
- , l_frn_elem->exact_seek
- );
- if(l_frn_elem->gvahand == NULL)
- {
- if(vidhand->preferred_decoder)
- {
- l_frn_elem->gvahand = GVA_open_read_pref(l_videofile
- , l_frn_elem->seltrack
- , 1 /* aud_track */
- , vidhand->preferred_decoder
- , FALSE /* use MMX if available (disable_mmx == FALSE) */
- );
- }
- else
- {
- l_frn_elem->gvahand = GVA_open_read(l_videofile
- ,l_frn_elem->seltrack
- ,1 /* aud_track */
- );
- }
- if(l_frn_elem->gvahand)
- {
- GVA_set_fcache_size(l_frn_elem->gvahand, GAP_STB_RENDER_GVA_FRAMES_TO_KEEP_CACHED);
-
- l_frn_elem->gvahand->do_gimp_progress = vidhand->do_gimp_progress;
- if(l_frn_elem->exact_seek == 1)
- {
- /* configure the GVA Procedures for exact (but slow) seek emulaion */
- l_frn_elem->gvahand->emulate_seek = TRUE;
- }
- }
- }
- }
-
-
- if(l_frn_elem->gvahand)
- {
- /* check if framesize matches 1:1 to output video size
- * and if the videodecoder does support a read procedure for compressed vodeo chunks
- * TODO: should also check for compatible vcodec_name
- * (cannot check that, because libmpeg3 does not deliver vcodec_name information
- * and there is no implementation to fetch uncompressed chunks in other decoders)
- */
- if((l_frn_elem->gvahand->width != vid_width)
- || (l_frn_elem->gvahand->height != vid_height)
- || (GVA_has_video_chunk_proc(l_frn_elem->gvahand) != TRUE) )
- {
- l_videofile = NULL;
- }
- else
- {
- t_GVA_RetCode l_fcr;
-
- if(gap_debug) printf("gap_story_render_fetch_composite_image_or_chunk: performing CHUNK fetch\n");
-
- /* FETCH compressed video chunk */
- l_fcr = GVA_get_video_chunk(l_frn_elem->gvahand
- , l_video_frame_nr
- , video_frame_chunk_data
- , video_frame_chunk_size
- , video_frame_chunk_maxsize
- );
-
- if(gap_debug) printf("gap_story_render_fetch_composite_image_or_chunk: AFTER CHUNK fetch max:%d chunk_data:%d chunk_size:%d\n"
- ,(int)video_frame_chunk_maxsize
- ,(int)video_frame_chunk_data
- ,(int)*video_frame_chunk_size
- );
- if(l_videofile_name)
- {
- g_free(l_videofile_name);
- l_videofile_name = NULL;
- }
-
- if(l_fcr == GVA_RET_OK)
- {
- gint l_frame_type;
-
- l_frame_type = GVA_util_check_mpg_frame_type(video_frame_chunk_data
- ,*video_frame_chunk_size
- );
- GVA_util_fix_mpg_timecode(video_frame_chunk_data
- ,*video_frame_chunk_size
- ,master_framerate
- ,master_frame_nr
- );
- if ((1==0)
- && (master_frame_nr < 10)) /* debug code: dump first 9 video chunks to file(s) */
- {
- FILE *fp;
- char *fname;
-
- fname = g_strdup_printf("zz_chunk_data_%06d.dmp", (int)l_video_frame_nr);
- fp = g_fopen(fname, "wb");
- if(fp)
- {
- fwrite(video_frame_chunk_data, *video_frame_chunk_size, 1, fp);
- fclose(fp);
- }
-
- g_free(fname);
- }
-
- if (l_frame_type == GVA_MPGFRAME_I_TYPE)
- {
- /* intra frame has no dependencies to other frames
- * can use that frame type at any place in the stream
- */
- last_video_frame_nr = l_video_frame_nr;
- last_intra_frame_fetched = TRUE;
- if(last_videofile)
- {
- g_free(last_videofile);
- }
- last_videofile = g_strdup(l_videofile);
- last_fetch_was_compressed_chunk = TRUE;
-
- if(gap_debug)
- {
- printf("\nReuse I-FRAME at %06d,", (int)master_frame_nr);
- }
- return(TRUE);
- }
-
- if ((l_frame_type == GVA_MPGFRAME_P_TYPE)
- && (1==1))
- {
- /* predicted frame has dependencies to the previous intra frame
- * can use that frame if fetch sequence contains previous i frame
- */
- if(last_videofile)
- {
- if((strcmp(l_videofile, last_videofile) == 0)
- && (l_video_frame_nr == last_video_frame_nr +1))
- {
- last_video_frame_nr = l_video_frame_nr;
- last_fetch_was_compressed_chunk = TRUE;
-
- if(gap_debug)
- {
- printf("P,");
- // printf(" Reuse P-FRAME Chunk at %06d\n", (int)master_frame_nr);
- }
- return(TRUE);
- }
- }
- }
-
- if ((l_frame_type == GVA_MPGFRAME_B_TYPE)
- || (l_frame_type == GVA_MPGFRAME_P_TYPE))
- {
- /* bi-directional predicted frame has dependencies both to
- * the previous intra frame or p-frame and to the following i or p-frame.
- *
- * can use that frame if fetch sequence contains previous i frame
- * and fetch will continue until the next i or p frame.
- *
- * we do a simplified check if the next few (say 3) frames in storyboard sequence
- * will fetch the next (3) frames in videofile sequence from the same videofile.
- * this is just a guess, but maybe sufficient in most cases.
- */
- if(last_videofile)
- {
- gboolean l_bframe_ok;
-
- l_bframe_ok = TRUE; /* assume that B-frame can be used */
-
- if((strcmp(l_videofile, last_videofile) == 0)
- && (l_video_frame_nr == last_video_frame_nr +1))
- {
- if(master_frame_nr + GAP_MPEG_ASSUMED_REFERENCE_DISTANCE > max_master_frame_nr)
- {
- /* never deliver B-frame at the last few frames in the output video.
- * (unresolved references to following p or i frames of the
- * input video could be the result)
- */
- l_bframe_ok = FALSE;
- }
- else
- {
- gint ii;
- gint32 l_next_video_frame_nr;
- char *l_next_videofile;
-
- /* look ahead if the next few fetches in storyboard sequence
- * will deliver the next frames from the same inputvideo
- * in ascending input_video sequence at stepsize 1
- * (it is assumed that the referenced P or I frame
- * will be fetched in later calls then)
- */
- for(ii=1; ii <= GAP_MPEG_ASSUMED_REFERENCE_DISTANCE; ii++)
- {
- l_next_videofile = p_check_chunk_fetch_possible(vidhand
- , (master_frame_nr + ii)
- , vid_width
- , vid_height
- , &l_next_video_frame_nr
- , &l_frn_elem_2
- );
- if(l_next_videofile)
- {
- if((strcmp(l_next_videofile, l_videofile) != 0)
- || (l_next_video_frame_nr != l_video_frame_nr +ii))
- {
- l_bframe_ok = FALSE;
- }
- g_free(l_next_videofile);
- }
- else
- {
- l_bframe_ok = FALSE;
- }
- if(!l_bframe_ok)
- {
- break;
- }
-
- } /* end for loop (look ahed next few frames in storyboard sequence) */
- }
-
- if(gap_debug)
- {
- if(l_bframe_ok) printf("Look Ahead B-FRAME OK to copy\n");
- else printf("Look Ahead B-FRAME dont USE\n");
- }
-
- if(l_bframe_ok)
- {
- last_video_frame_nr = l_video_frame_nr;
- last_fetch_was_compressed_chunk = TRUE;
-
- if(gap_debug)
- {
- if (l_frame_type == GVA_MPGFRAME_B_TYPE)
- {
- printf("B,");
- }
- else
- {
- printf("p,");
- }
- //printf(" Reuse B-FRAME Chunk at %06d\n", (int)master_frame_nr);
- }
- return(TRUE);
- }
- }
- }
- }
-
- if(gap_debug)
- {
- printf("** sorry, no reuse of fetched CHUNK type: %d (1=I/2=P/3=B) frame_nr:%d (last_frame_nr:%d)\n"
- ,(int)l_frame_type
- ,(int)l_video_frame_nr
- ,(int)last_video_frame_nr
- );
- }
-
-
- if((l_frame_type != GVA_MPGFRAME_I_TYPE)
- && (l_frame_type != GVA_MPGFRAME_P_TYPE)
- && (l_frame_type != GVA_MPGFRAME_B_TYPE))
- {
- printf("** WARNING, unsupported frame_type: %d (supported are: 1=I/2=P/3=B) file:%s frame_nr:%d (last_frame_nr in the video:%d)\n"
- ,(int)l_frame_type
- ,l_videofile
- ,(int)l_video_frame_nr
- ,(int)last_video_frame_nr
- );
- }
-
- last_fetch_was_compressed_chunk = FALSE;
- last_intra_frame_fetched = FALSE;
- l_videofile = NULL;
-
- }
- else
- {
- if(gap_debug)
- {
- printf("**# sorry, no reuse fetch failed frame_nr:%d (last_frame_nr:%d)\n"
- ,(int)l_video_frame_nr
- ,(int)last_video_frame_nr
- );
- }
- last_fetch_was_compressed_chunk = FALSE;
- last_intra_frame_fetched = FALSE;
- *video_frame_chunk_size = 0;
- return(FALSE);
- }
- }
- }
- }
-
- } /* end IF dont_recode_flag */
-
- if(last_fetch_was_compressed_chunk)
- {
- *force_keyframe = TRUE;
- }
-
- if(l_videofile_name)
- {
- g_free(l_videofile_name);
- }
-
- if(l_videofile == NULL)
- {
- last_video_frame_nr = -1;
- last_intra_frame_fetched = FALSE;
- last_fetch_was_compressed_chunk = FALSE;
- if(last_videofile)
- {
- g_free(last_videofile);
- }
- last_videofile = l_videofile;
-
-
- if(gap_debug)
- {
- printf("gap_story_render_fetch_composite_image_or_chunk: CHUNK fetch not possible (doing frame fetch instead)\n");
- }
-
- *video_frame_chunk_size = 0;
- *image_id = gap_story_render_fetch_composite_image(vidhand
- , master_frame_nr /* starts at 1 */
- , vid_width /* desired Video Width in pixels */
- , vid_height /* desired Video Height in pixels */
- , filtermacro_file /* NULL if no filtermacro is used */
- , layer_id /* output: Id of the only layer in the composite image */
- );
- if(*image_id >= 0)
- {
- return(TRUE);
- }
- }
- else
- {
- /*if(gap_debug)*/
- {
- printf("### INTERNAL ERROR at gap_story_render_fetch_composite_image_or_chunk frame_nr:%d (last_frame_nr:%d)\n"
- ,(int)l_video_frame_nr
- ,(int)last_video_frame_nr
- );
- }
- }
-
- return(FALSE);
-
-} /* end gap_story_render_fetch_composite_image_or_chunk */
-
/* ----------------------------------------------------
@@ -5578,7 +4959,7 @@
const char *sub_section_name;
gboolean orig_do_progress;
gdouble orig_progress;
-
+
if(gap_debug)
{
printf("SUB-SECTION: before RECURSIVE call\n");
@@ -5597,7 +4978,7 @@
, &sub_layer_id
, sub_section_name
);
-
+
if(gap_debug)
{
printf("SUB-SECTION: after RECURSIVE call\n");
@@ -5839,3 +5220,12 @@
} /* end p_story_render_fetch_composite_image_private */
+
+
+/* -------------------------------------------------------------------
+ * gap_story_render_fetch_composite_image_or_chunk (see included file)
+ * -------------------------------------------------------------------
+ *
+ */
+
+#include "gap_story_render_lossless.c"
Modified: trunk/gap/gap_story_render_processor.h
==============================================================================
--- trunk/gap/gap_story_render_processor.h (original)
+++ trunk/gap/gap_story_render_processor.h Sun Jun 29 12:12:59 2008
@@ -65,8 +65,21 @@
#define GAP_VID_ENC_SAVE_FLAT "GAP_VID_ENC_SAVE_FLAT"
#define GAP_VID_ENC_MONITOR "GAP_VID_ENC_MONITOR"
-
-
+#define GAP_VID_CHCHK_FLAG_SIZE 1
+#define GAP_VID_CHCHK_FLAG_MPEG_INTEGRITY 2
+#define GAP_VID_CHCHK_FLAG_JPG 4
+#define GAP_VID_CHCHK_FLAG_VCODEC_NAME 8
+#define GAP_VID_CHCHK_FLAG_FULL_FRAME 16
+#define GAP_VID_CHCHK_FLAG_PNG 32
+
+/* codec name list element
+ */
+typedef struct GapCodecNameElem {
+ guchar *codec_name;
+ gint32 video_id;
+
+ void *next;
+} GapCodecNameElem;
/* --------------------------*/
/* PROCEDURE DECLARATIONS */
@@ -104,13 +117,17 @@
, gint32 *image_id /* output: Id of the only layer in the composite image */
, gboolean dont_recode_flag /* IN: TRUE try to fetch comressed chunk if possible */
- , char *vcodec_name /* IN: video_codec used in the calling encoder program */
+ , GapCodecNameElem *vcodec_list /* IN: list of video_codec names that are compatible to the calling encoder program */
, gboolean *force_keyframe /* OUT: the calling encoder should encode an I-Frame */
- , unsigned char *video_frame_chunk_data /* OUT: */
- , gint32 *video_frame_chunk_size /* OUT: */
- , gint32 video_frame_chunk_maxsize /* IN: */
+ , unsigned char *video_frame_chunk_data /* OUT: copy of the already compressed video frame from source video */
+ , gint32 *video_frame_chunk_size /* OUT: total size of frame (may include a videoformat specific frameheader) */
+ , gint32 video_frame_chunk_maxsize /* IN: sizelimit (larger chunks are not fetched) */
, gdouble master_framerate
- , gint32 max_master_frame_nr /* the number of frames that will be encode in total */
+ , gint32 max_master_frame_nr /* the number of frames that will be encoded in total */
+
+
+ , gint32 *video_frame_chunk_hdr_size /* OUT: size of videoformat specific frameheader (0 if has no hdr) */
+ , gint32 check_flags /* IN: combination of GAP_VID_CHCHK_FLAG_* flag values */
);
GapStoryRenderVidHandle * gap_story_render_open_vid_handle_from_stb(
Modified: trunk/gap/gap_vex_exec.c
==============================================================================
--- trunk/gap/gap_vex_exec.c (original)
+++ trunk/gap/gap_vex_exec.c Sun Jun 29 12:12:59 2008
@@ -59,151 +59,6 @@
-
-/* ---------------------
- * p_extract_audio
- * ---------------------
- * - extract audio and optional save to wav file
- */
-void
-p_extract_audio(GapVexMainGlobalParams *gpp, t_GVA_Handle *gvahand, gdouble samples_to_read, gboolean wav_save)
-{
-#ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
- int l_audio_channels;
- int l_sample_rate;
- long l_audio_samples;
- unsigned short *left_ptr;
- unsigned short *right_ptr;
- unsigned short *l_lptr;
- unsigned short *l_rptr;
- long l_to_read;
- gint64 l_left_to_read;
- long l_block_read;
- gdouble l_progress;
- FILE *fp_wav;
-
- l_audio_channels = gvahand->audio_cannels;
- l_sample_rate = gvahand->samplerate;
- l_audio_samples = gvahand->total_aud_samples;
-
- if(gap_debug)
- {
- printf("Channels:%d samplerate:%d samples:%d samples_to_read: %.0f\n"
- , (int)l_audio_channels
- , (int)l_sample_rate
- , (int)l_audio_samples
- , (float)samples_to_read
- );
- }
-
- fp_wav = NULL;
- if(wav_save)
- {
- fp_wav = g_fopen(gpp->val.audiofile, "wb");
- }
-
- if((fp_wav) || (!wav_save))
- {
- gint32 l_bytes_per_sample;
- gint32 l_ii;
-
- if(l_audio_channels == 1) { l_bytes_per_sample = 2;} /* mono */
- else { l_bytes_per_sample = 4;} /* stereo */
-
- if(wav_save)
- {
- /* write the header */
- gap_audio_wav_write_header(fp_wav
- , (gint32)samples_to_read
- , l_audio_channels /* cannels 1 or 2 */
- , l_sample_rate
- , l_bytes_per_sample
- , 16 /* 16 bit sample resolution */
- );
- }
- if(gap_debug) printf("samples_to_read:%d\n", (int)samples_to_read);
-
- /* audio block read (blocksize covers playbacktime for 250 frames */
- l_left_to_read = samples_to_read;
- l_block_read = (double)(250.0) / (double)gvahand->framerate * (double)l_sample_rate;
- l_to_read = MIN(l_left_to_read, l_block_read);
-
- /* allocate audio buffers */
- left_ptr = g_malloc0((sizeof(short) * l_block_read) + 16);
- right_ptr = g_malloc0((sizeof(short) * l_block_read) + 16);
-
- while(l_to_read > 0)
- {
- l_lptr = left_ptr;
- l_rptr = right_ptr;
- /* read the audio data of channel 0 (left or mono) */
- GVA_get_audio(gvahand
- ,l_lptr /* Pointer to pre-allocated buffer if int16's */
- ,1 /* Channel to decode */
- ,(gdouble)l_to_read /* Number of samples to decode */
- ,GVA_AMOD_CUR_AUDIO /* read from current audio position (and advance) */
- );
- if((l_audio_channels > 1) && (wav_save))
- {
- /* read the audio data of channel 2 (right)
- * NOTE: GVA_get_audio has advanced the stream position,
- * so we have to set GVA_AMOD_REREAD to read from
- * the same startposition as for channel 1 (left).
- */
- GVA_get_audio(gvahand
- ,l_rptr /* Pointer to pre-allocated buffer if int16's */
- ,2 /* Channel to decode */
- ,l_to_read /* Number of samples to decode */
- ,GVA_AMOD_REREAD /* read from */
- );
- }
- l_left_to_read -= l_to_read;
-
- if(wav_save)
- {
- /* write 16 bit wave datasamples
- * sequence mono: (lo, hi)
- * sequence stereo: (lo_left, hi_left, lo_right, hi_right)
- */
- for(l_ii=0; l_ii < l_to_read; l_ii++)
- {
- gap_audio_wav_write_gint16(fp_wav, *l_lptr);
- l_lptr++;
- if(l_audio_channels > 1)
- {
- gap_audio_wav_write_gint16(fp_wav, *l_rptr);
- l_rptr++;
- }
- }
- }
-
-
- l_to_read = MIN(l_left_to_read, l_block_read);
-
- /* calculate progress */
- l_progress = (gdouble)(samples_to_read - l_left_to_read) / ((gdouble)samples_to_read + 1.0);
- if(gap_debug) printf("l_progress:%f\n", (float)l_progress);
- if (gpp->val.run_mode != GIMP_RUN_NONINTERACTIVE)
- {
- gimp_progress_update (l_progress);
- }
- }
- if(wav_save)
- {
- /* close wavfile */
- fclose(fp_wav);
- }
-
- /* free audio buffers */
- g_free(left_ptr);
- g_free(right_ptr);
- }
-#endif
- return;
-} /* end p_extract_audio */
-
-
-
/* ------------------------------
* gap_vex_exe_extract_videorange
* ------------------------------
@@ -582,113 +437,41 @@
}
}
+
/* ------ extract Audio ---------- */
if((gvahand->atracks > 0)
&& (gpp->val.audiotrack > 0))
{
if (l_overwrite_mode_audio >= 0)
{
- gdouble l_samples_to_read;
- gdouble l_extracted_frames;
-
- if(gap_debug) printf("EXTRACTING audio, writing to file %s\n", gpp->val.audiofile);
-
- if (gpp->val.exact_seek != 0)
- {
- /* close and reopen to make sure that we start at begin */
-
- gvahand->image_id = -1; /* prenvent API from deleting that image at close */
- GVA_close(gvahand);
-
- /* --------- (RE)OPEN the videofile --------------- */
- gvahand = GVA_open_read_pref(gpp->val.videoname
- ,gpp->val.videotrack
- ,gpp->val.audiotrack
- ,gpp->val.preferred_decoder
- , FALSE /* use MMX if available (disable_mmx == FALSE) */
- );
- if(gvahand == NULL)
- {
- return;
- }
- }
-
- if(gvahand->audio_cannels > 0)
- {
- /* seek needed only if extract starts not at pos 1 */
- if(l_pos > 1)
- {
- if (gpp->val.run_mode != GIMP_RUN_NONINTERACTIVE)
- {
- gimp_progress_init (_("Seek Audio Position..."));
- }
-
- /* check for exact frame_seek */
- if (gpp->val.exact_seek != 0)
- {
- gint32 l_seek_framenumber;
-
-
- l_seek_framenumber = l_pos;
- if(l_pos_unit == GVA_UPOS_PRECENTAGE)
- {
- l_seek_framenumber = gvahand->total_frames * l_pos;
- }
-
- l_samples_to_read = (gdouble)(l_seek_framenumber)
- / (gdouble)gvahand->framerate * (gdouble)gvahand->samplerate;
-
- /* extract just for exact positioning (without save to wav file) */
- if(gap_debug) printf("extract just for exact positioning (without save to wav file)\n");
- p_extract_audio(gpp, gvahand, l_samples_to_read, FALSE);
- }
- else
- {
- /* audio pos 1 frame before video pos
- * example: extract frame 1 upto 2
- * results in audio range 0 upto 2
- * this way we can get the audioduration of frame 1
- */
- GVA_seek_audio(gvahand, l_pos -1, l_pos_unit);
- }
- }
-
- if (gpp->val.run_mode != GIMP_RUN_NONINTERACTIVE)
- {
- gimp_progress_init (_("Extracting Audio..."));
- }
-
- l_extracted_frames = framenumber - framenumber1;
- if(l_extracted_frames > 1)
- {
- l_samples_to_read = (gdouble)(l_extracted_frames +1.0)
- / (gdouble)gvahand->framerate
- * (gdouble)gvahand->samplerate;
- if(gap_debug) printf("A: l_samples_to_read %.0f l_extracted_frames:%d\n"
- , (float)l_samples_to_read
- ,(int)l_extracted_frames
- );
- }
- else
- {
- l_samples_to_read = (gdouble)(l_expected_frames +1.0)
- / (gdouble)gvahand->framerate
- * (gdouble)gvahand->samplerate;
- if(gap_debug) printf("B: l_samples_to_read %.0f l_extracted_frames:%d l_expected_frames:%d\n"
- , (float)l_samples_to_read
- ,(int)l_extracted_frames
- ,(int)l_expected_frames
- );
- }
+ gdouble l_extracted_frames;
+ gboolean do_progress;
+
+ l_extracted_frames = framenumber - framenumber1;
+
+ do_progress = TRUE;
+ if(gpp->val.run_mode != GIMP_RUN_NONINTERACTIVE)
+ {
+ do_progress = FALSE;
+ }
- /* extract and save to wav file */
- if(gap_debug) printf("extract (with save to wav file)\n");
- p_extract_audio(gpp, gvahand, l_samples_to_read, TRUE);
- }
-
+ gap_audio_extract_from_videofile(gpp->val.videoname
+ , gpp->val.audiotrack
+ , gpp->val.preferred_decoder
+ , gpp->val.exact_seek
+ , l_pos_unit
+ , l_pos
+ , l_extracted_frames
+ , l_expected_frames
+ , do_progress
+ , NULL /* GtkWidget *progressBar using NULL for gimp_progress */
+ , NULL /* fptr_progress_callback */
+ , NULL /* user_data */
+ );
}
}
+
if((gpp->val.image_ID >= 0)
&& (gpp->val.multilayer == 0)
&& (gpp->val.videotrack > 0))
Modified: trunk/libgapvidapi/gap_vid_api.c
==============================================================================
--- trunk/libgapvidapi/gap_vid_api.c (original)
+++ trunk/libgapvidapi/gap_vid_api.c Sun Jun 29 12:12:59 2008
@@ -1154,6 +1154,41 @@
} /* end p_gva_worker_get_video_chunk */
+/* ----------------------------
+ * p_gva_worker_get_codec_name
+ * ----------------------------
+ */
+static char *
+p_gva_worker_get_codec_name(t_GVA_Handle *gvahand
+ ,t_GVA_CodecType codec_type
+ ,gint32 track_nr
+ )
+{
+ char *codec_name;
+
+ codec_name = NULL;
+ if(gvahand)
+ {
+ t_GVA_DecoderElem *dec_elem;
+
+ dec_elem = (t_GVA_DecoderElem *)gvahand->dec_elem;
+
+ if(dec_elem)
+ {
+ if(dec_elem->fptr_get_codec_name == NULL)
+ {
+ printf("p_gva_worker_get_codec_name: Method not implemented in decoder %s\n", dec_elem->decoder_name);
+ return(NULL);
+ }
+ codec_name = (*dec_elem->fptr_get_codec_name)(gvahand
+ , codec_type
+ , track_nr
+ );
+ }
+ }
+ return(codec_name);
+} /* end p_gva_worker_get_codec_name */
+
/* --------------------------
* p_gva_worker_open_read
* --------------------------
@@ -1524,6 +1559,42 @@
return(l_rc);
}
+
+char *
+GVA_get_codec_name(t_GVA_Handle *gvahand
+ ,t_GVA_CodecType codec_type
+ ,gint32 track_nr
+ )
+{
+ char *codec_name;
+
+ if(gap_debug)
+ {
+ printf("GVA_get_video_chunk: START handle:%d, codec_type:%d track_nr:%d\n"
+ , (int)gvahand
+ , (int)codec_type
+ , (int)track_nr
+ );
+ }
+
+ codec_name = p_gva_worker_get_codec_name(gvahand, codec_type, track_nr);
+ //if(gap_debug)
+ {
+ printf("GVA_get_codec_name: END codec_name:");
+ if (codec_name)
+ {
+ printf("%s", codec_name);
+ }
+ else
+ {
+ printf("NULL");
+ }
+ printf("\n");
+ }
+
+ return(codec_name);
+}
+
/* -------------------------------------------------------------------------
* Converter Procedures (Framebuffer <--> GIMP
* -------------------------------------------------------------------------
Modified: trunk/libgapvidapi/gap_vid_api.h
==============================================================================
--- trunk/libgapvidapi/gap_vid_api.h (original)
+++ trunk/libgapvidapi/gap_vid_api.h Sun Jun 29 12:12:59 2008
@@ -280,6 +280,11 @@
gboolean gva_thread_save;
} t_GVA_Handle;
+typedef enum
+{
+ GVA_VIDEO_CODEC
+ ,GVA_AUDIO_CODEC
+} t_GVA_CodecType;
/* Function Typedefs */
typedef gboolean (*t_check_sig_fptr)(char *filename);
@@ -304,6 +309,10 @@
,gint32 max_size
);
+typedef char * (*t_get_codec_name_fptr)(t_GVA_Handle *gvahand
+ ,t_GVA_CodecType codec_type
+ ,gint32 track_nr
+ );
/* List Element Description for a Decoder */
@@ -325,6 +334,7 @@
t_count_frames_fptr fptr_count_frames;
t_seek_support_fptr fptr_seek_support;
t_get_video_chunk_fptr fptr_get_video_chunk;
+ t_get_codec_name_fptr fptr_get_codec_name;
} t_GVA_DecoderElem;
@@ -430,11 +440,30 @@
, unsigned char *chunk
, gint32 *size
, gint32 max_size);
+
+char * GVA_get_codec_name(t_GVA_Handle *gvahand
+ ,t_GVA_CodecType codec_type
+ ,gint32 track_nr
+ );
+
gint GVA_util_check_mpg_frame_type(unsigned char *buffer, gint32 buf_size);
void GVA_util_fix_mpg_timecode(unsigned char *buffer
,gint32 buf_size
,gdouble master_framerate
,gint32 master_frame_nr
);
+gint32 GVA_util_calculate_mpeg_frameheader_size(unsigned char *buffer
+ ,gint32 buf_size
+ );
+gboolean GVA_util_check_jpg_picture(unsigned char *buffer
+ ,gint32 buf_size
+ ,gint32 max_check_size
+ ,gint32 *hdr_size
+ );
+gboolean GVA_util_check_png_picture(unsigned char *buffer
+ ,gint32 buf_size
+ ,gint32 max_check_size
+ ,gint32 *hdr_size
+ );
#endif
Modified: trunk/libgapvidapi/gap_vid_api_ffmpeg.c
==============================================================================
--- trunk/libgapvidapi/gap_vid_api_ffmpeg.c (original)
+++ trunk/libgapvidapi/gap_vid_api_ffmpeg.c Sun Jun 29 12:12:59 2008
@@ -145,6 +145,10 @@
gboolean prefere_native_seek; /* prefere native seek if both vindex and native seek available */
gboolean all_timecodes_verified;
gboolean critical_timecodesteps_found;
+
+ unsigned char * chunk_ptr;
+ gint32 chunk_len;
+
} t_GVA_ffmpeg;
@@ -214,6 +218,9 @@
static void p_analyze_stepsize_pattern(gint max_idx, t_GVA_Handle *gvahand);
static void p_probe_timecode_offset(t_GVA_Handle *gvahand);
+static t_GVA_RetCode p_wrapper_ffmpeg_seek_frame(t_GVA_Handle *gvahand, gdouble pos, t_GVA_PosUnit pos_unit);
+static t_GVA_RetCode p_wrapper_ffmpeg_get_next_frame(t_GVA_Handle *gvahand);
+static t_GVA_RetCode p_private_ffmpeg_get_next_frame(t_GVA_Handle *gvahand, gboolean do_copy_raw_chunk_data);
/* -----------------------------
@@ -323,6 +330,9 @@
handle->samples_buffer_size = 0;
handle->samples_read = 0;
handle->key_frame_detection_works = FALSE; /* assume a Codec with non working detection */
+ handle->chunk_len = 0;
+ handle->chunk_ptr = NULL;
+
p_reset_proberead_results(handle);
@@ -599,6 +609,151 @@
/* ----------------------------------
+ * p_wrapper_ffmpeg_get_codec_name
+ * ----------------------------------
+ * read one frame as raw data from the video_track (stream)
+ * note that raw data is not yet decoded and contains
+ * videofileformat specific frame header information.
+ * this procedure is intended to 1:1 (lossless) copy of frames
+ */
+char *
+p_wrapper_ffmpeg_get_codec_name(t_GVA_Handle *gvahand
+ ,t_GVA_CodecType codec_type
+ ,gint32 track_nr
+ )
+{
+ t_GVA_ffmpeg *handle;
+
+ handle = (t_GVA_ffmpeg *)gvahand->decoder_handle;
+ if(handle == NULL)
+ {
+ if(gap_debug)
+ {
+ printf("p_wrapper_ffmpeg_get_codec_name handle == NULL\n");
+ }
+ return(NULL);
+ }
+
+ if (codec_type == GVA_VIDEO_CODEC)
+ {
+ if(handle->vcodec)
+ {
+ if(gap_debug)
+ {
+ printf("p_wrapper_ffmpeg_get_codec_name handle->vcodec == %s\n", handle->vcodec->name);
+ }
+ return(g_strdup(handle->vcodec->name));
+ }
+ }
+
+ if (codec_type == GVA_AUDIO_CODEC)
+ {
+ if(handle->acodec)
+ {
+ if(gap_debug)
+ {
+ printf("p_wrapper_ffmpeg_get_codec_name handle->acodec == %s\n", handle->acodec->name);
+ }
+ return(g_strdup(handle->acodec->name));
+ }
+ }
+
+ if(gap_debug)
+ {
+ printf("p_wrapper_ffmpeg_get_codec_name codec is NULL\n");
+ }
+ return(NULL);
+
+} /* end p_wrapper_ffmpeg_get_codec_name */
+
+
+/* ----------------------------------
+ * p_wrapper_ffmpeg_get_video_chunk
+ * ----------------------------------
+ * read one frame as raw data from the video_track (stream)
+ * note that raw data is not yet decoded and contains
+ * videofileformat specific frame header information.
+ * this procedure is intended to 1:1 (lossless) copy of frames
+ */
+t_GVA_RetCode
+p_wrapper_ffmpeg_get_video_chunk(t_GVA_Handle *gvahand
+ , gint32 frame_nr
+ , unsigned char *chunk
+ , gint32 *size
+ , gint32 max_size)
+{
+ t_GVA_RetCode l_rc;
+ t_GVA_ffmpeg *handle;
+
+ if(frame_nr < 1)
+ {
+ /* illegal frame_nr (first frame starts at Nr 1 */
+ return(GVA_RET_ERROR);
+ }
+
+ handle = (t_GVA_ffmpeg *)gvahand->decoder_handle;
+ if(handle == NULL)
+ {
+ return(GVA_RET_ERROR);
+ }
+
+ /* check if current position is before the wanted frame */
+ if (frame_nr != gvahand->current_seek_nr)
+ {
+ gdouble pos;
+
+ pos = frame_nr;
+ l_rc = p_wrapper_ffmpeg_seek_frame(gvahand, pos, GVA_UPOS_FRAMES);
+ if (l_rc != GVA_RET_OK)
+ {
+ return (l_rc);
+ }
+ }
+
+
+ l_rc = p_private_ffmpeg_get_next_frame(gvahand, TRUE);
+ if(handle->chunk_len > max_size)
+ {
+ printf("CALLING ERROR p_wrapper_ffmpeg_get_video_chunk chunk_len:%d is greater than sepcified max_size:%d\n"
+ ,(int)handle->chunk_len
+ ,(int)max_size
+ );
+ return(GVA_RET_ERROR);
+ }
+ if(handle->chunk_ptr == NULL)
+ {
+ printf("CALLING ERROR p_wrapper_ffmpeg_get_video_chunk fetch raw frame failed, chunk_ptr is NULL\n");
+ return(GVA_RET_ERROR);
+ }
+
+ if(gap_debug)
+ {
+ char *vcodec_name;
+
+ vcodec_name = "NULL";
+ if(handle->vcodec)
+ {
+ if(handle->vcodec->name)
+ {
+ vcodec_name = handle->vcodec->name;
+ }
+ }
+
+ printf("p_wrapper_ffmpeg_get_video_chunk: chunk:%d chunk_ptr:%d, chunk_len:%d vcodec_name:%s\n"
+ ,(int)chunk
+ ,(int)handle->chunk_ptr
+ ,(int)handle->chunk_len
+ ,vcodec_name
+ );
+ }
+
+ *size = handle->chunk_len;
+ memcpy(chunk, handle->chunk_ptr, handle->chunk_len);
+ return (l_rc);
+} /* end p_wrapper_ffmpeg_get_video_chunk */
+
+
+/* ----------------------------------
* p_wrapper_ffmpeg_get_next_frame
* ----------------------------------
* read one frame from the video_track (stream)
@@ -608,6 +763,20 @@
static t_GVA_RetCode
p_wrapper_ffmpeg_get_next_frame(t_GVA_Handle *gvahand)
{
+ p_private_ffmpeg_get_next_frame(gvahand, FALSE);
+} /* end p_wrapper_ffmpeg_get_next_frame*/
+
+
+/* ----------------------------------
+ * p_private_ffmpeg_get_next_frame
+ * ----------------------------------
+ * read one frame from the video_track (stream)
+ * and decode the frame
+ * when EOF reached: update total_frames and close the stream)
+ */
+static t_GVA_RetCode
+p_private_ffmpeg_get_next_frame(t_GVA_Handle *gvahand, gboolean do_copy_raw_chunk_data)
+{
t_GVA_ffmpeg *handle;
int l_got_picture;
int l_rc;
@@ -736,7 +905,42 @@
}
- /* if (gap_debug) printf("before avcodec_decode_video: inbuf_ptr:%d inbuf_len:%d\n", (int)handle->inbuf_ptr, (int)handle->inbuf_len); */
+ if (gap_debug)
+ {
+ printf("before avcodec_decode_video: inbuf_ptr:%d inbuf_len:%d\n",
+ (int)handle->inbuf_ptr,
+ (int)handle->inbuf_len);
+ }
+
+
+ /* --------- START potential CHUNK ----- */
+ /* make a copy of the raw data packages for one video frame.
+ * (we do not yet know if the raw data chunk is complete until
+ * avcodec_decode_video the frame may
+ */
+ if (do_copy_raw_chunk_data == TRUE)
+ {
+ if (handle->chunk_ptr != NULL)
+ {
+ g_free(handle->chunk_ptr);
+ handle->chunk_ptr = NULL;
+ handle->chunk_len = 0;
+ }
+ handle->chunk_len = handle->inbuf_len;
+ if (handle->chunk_len > 0)
+ {
+ handle->chunk_ptr = g_malloc(handle->chunk_len);
+ memcpy(handle->chunk_ptr, handle->inbuf_ptr, handle->chunk_len);
+ if (gap_debug)
+ {
+ printf("copy potential raw chunk: chunk_ptr:%d chunk_len:%d\n",
+ (int)handle->chunk_ptr,
+ (int)handle->chunk_len);
+ }
+ }
+
+ }
+ /* --------- END potential CHUNK ----- */
avcodec_get_frame_defaults(&handle->big_picture_yuv);
@@ -962,7 +1166,7 @@
if(l_rc == 1) { return(GVA_RET_EOF); }
return(GVA_RET_ERROR);
-} /* end p_wrapper_ffmpeg_get_next_frame */
+} /* end p_private_ffmpeg_get_next_frame */
/* ------------------------------
@@ -1474,7 +1678,7 @@
gvahand->percentage_done = 0.0;
- //if(gap_debug)
+ if(gap_debug)
{
printf("p_wrapper_ffmpeg_seek_frame: start: l_frame_pos: %d cur_seek:%d cur_frame:%d (prefere_native:%d gopsize:%d)\n"
, (int)l_frame_pos
@@ -1502,7 +1706,7 @@
l_rc_rd = p_seek_native_timcode_based(gvahand, l_frame_pos);
if(l_rc_rd == GVA_RET_OK)
{
- //if(gap_debug)
+ if(gap_debug)
{
printf("NATIVE SEEK performed for videofile:%s\n"
, gvahand->filename
@@ -1518,7 +1722,7 @@
gint64 seek_pos;
gint32 l_idx;
- //if(gap_debug)
+ if(gap_debug)
{
printf("VIDEO INDEX is available for videofile:%s\n"
, gvahand->filename
@@ -2712,7 +2916,7 @@
master_handle->prefere_native_seek = TRUE;
}
- //if(gap_debug)
+ if(gap_debug)
{
printf("VINDEX done, critical_timecodesteps_found:%d\n"
" master_handle->all_timecodes_verified %d\n"
@@ -2805,7 +3009,8 @@
dec_elem->fptr_get_audio = &p_wrapper_ffmpeg_get_audio;
dec_elem->fptr_count_frames = &p_wrapper_ffmpeg_count_frames;
dec_elem->fptr_seek_support = &p_wrapper_ffmpeg_seek_support;
- dec_elem->fptr_get_video_chunk = NULL; /* &p_wrapper_ffmpeg_get_video_chunk */
+ dec_elem->fptr_get_video_chunk = &p_wrapper_ffmpeg_get_video_chunk;
+ dec_elem->fptr_get_codec_name = &p_wrapper_ffmpeg_get_codec_name;
dec_elem->next = NULL;
}
Modified: trunk/libgapvidapi/gap_vid_api_util.c
==============================================================================
--- trunk/libgapvidapi/gap_vid_api_util.c (original)
+++ trunk/libgapvidapi/gap_vid_api_util.c Sun Jun 29 12:12:59 2008
@@ -8,6 +8,7 @@
#include <glib/gstdio.h>
+
/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX END fcache procedures */
@@ -310,12 +311,15 @@
gint l_idx;
gint l_frame_type;
unsigned l_picture_number;
+ gint32 l_max_check_size;
+
+ l_max_check_size = buf_size;
l_frame_type = GVA_MPGFRAME_UNKNOWN;
l_picture_number = 0;
code = 0;
l_idx = 0;
- while(l_idx < buf_size)
+ while(l_idx < l_max_check_size)
{
code <<= 8;
code |= buffer[l_idx++];
@@ -394,14 +398,20 @@
second = code >> 13 & 0x3f;
frame = code >> 7 & 0x3f;
- if(gap_debug) printf("Timecode old: %02d:%02d:%02d:%02d ", hour, minute, second, frame);
+ if(gap_debug)
+ {
+ printf("Timecode old: %02d:%02d:%02d:%02d ", hour, minute, second, frame);
+ }
if((hour == 0)
&& (minute == 0)
&& (second == 0)
&& (frame == 0))
{
- if(gap_debug) printf("\n");
+ if(gap_debug)
+ {
+ printf("\n");
+ }
}
else
{
@@ -419,10 +429,183 @@
buffer[l_idx + 2] = ((second & 0x7) << 5) | (frame >> 1);
buffer[l_idx + 3] = (code & 0x7f) | ((frame & 0x1) << 7);
- if(gap_debug) printf("new: %02d:%02d:%02d:%02d\n", hour, minute, second, frame);
+ if(gap_debug)
+ {
+ printf("new: %02d:%02d:%02d:%02d\n", hour, minute, second, frame);
+ }
}
break;
}
}
} /* end GVA_util_fix_mpg_timecode */
+
+
+/* ----------------------------------------
+ * GVA_util_calculate_mpeg_frameheader_size
+ * ----------------------------------------
+ * scan the buffer for the 1st Mpeg picture start code.
+ * all information from start of the buffer inclusive the picuture header
+ * are considered as MPG header information.
+ * (TODO: )
+ *
+ * returns the size of frame/gop header or 0 if no header is present.
+ */
+gint32
+GVA_util_calculate_mpeg_frameheader_size(unsigned char *buffer
+ ,gint32 buf_size
+ )
+{
+ unsigned long code;
+ gint l_idx;
+ gint l_frame_type;
+ unsigned l_picture_number;
+ gint32 l_max_check_size;
+ gint32 l_hdr_size;
+
+ l_max_check_size = buf_size;
+
+ l_frame_type = GVA_MPGFRAME_UNKNOWN;
+ l_picture_number = 0;
+ l_hdr_size = 0;
+ code = 0;
+ l_idx = 0;
+ while(l_idx < l_max_check_size)
+ {
+ code <<= 8;
+ code |= buffer[l_idx++];
+
+ if(code == GVA_MPGHDR_PICTURE_START_CODE)
+ {
+ /* found a picture start code
+ * get next 10 bits for picture_number
+ */
+ l_picture_number =(unsigned long)buffer[l_idx] << 2;
+ l_picture_number |= (unsigned long)((buffer[l_idx +1] >> 6) & 0x3);
+
+ /* get next 3 bits for frame_type information */
+ l_frame_type = ((buffer[l_idx +1] >> 3) & 0x7);
+
+ l_hdr_size = l_idx + 2; // TODO dont know if there are more bytes in the picture header ???
+ break;
+ }
+ }
+
+
+ if(gap_debug)
+ {
+ printf("GVA_util_calculate_mpeg_frameheader_size: %d l_picture_number:%d frame_type:%d (1=I,2=P,3=B)\n"
+ ,(int)l_hdr_size
+ ,(int)l_picture_number
+ ,(int)l_frame_type
+ );
+ }
+
+ return(l_hdr_size);
+} /* end GVA_util_calculate_mpeg_frameheader_size */
+
+
+/* ----------------------------------------
+ * GVA_util_check_jpg_picture
+ * ----------------------------------------
+ * scan the buffer for the 1st JPEG picture.
+ * This implementation checks for the jpeg typical "magic number"
+ * ff d8 ff
+ * TODO: if libjpeg is available (#ifdef HAVE_??LIBJPG)
+ * we colud try to a more sophisticated check
+ * via jpeg_read_header from memory...
+ *
+ * returns TRUE if the specified buffer contains a JPEG image.
+ */
+gboolean
+GVA_util_check_jpg_picture(unsigned char *buffer
+ ,gint32 buf_size
+ ,gint32 max_check_size
+ ,gint32 *hdr_size
+ )
+{
+ gint l_idx;
+ gint32 l_max_check_size;
+ gboolean l_jpeg_magic_number_found;
+
+ l_max_check_size = MAX(max_check_size, 1);
+ l_jpeg_magic_number_found = FALSE;
+ l_idx = 0;
+ while(l_idx < l_max_check_size)
+ {
+ /* check magic number */
+ if ((buffer[l_idx] == 0xff)
+ && (buffer[l_idx +1] == 0xd8)
+ && (buffer[l_idx +2] == 0xff))
+ {
+ *hdr_size = l_idx;
+ l_jpeg_magic_number_found = TRUE;
+ break;
+ }
+ l_idx++;
+ }
+
+
+ if(gap_debug)
+ {
+ printf("GVA_util_check_jpg_magic_number: l_jpeg_magic_number_found:%d at idx:%d hdr_size:%d\n"
+ ,(int)l_jpeg_magic_number_found
+ ,(int)l_idx
+ ,(int)*hdr_size
+ );
+ }
+
+ return(l_jpeg_magic_number_found);
+} /* end GVA_util_check_jpg_picture */
+
+
+/* ----------------------------------------
+ * GVA_util_check_png_picture
+ * ----------------------------------------
+ * scan the buffer for the 1st PNG picture.
+ * This implementation checks for the PNG typical "magic number"
+ * 89 50 4e 47 (.PNG)
+ *
+ * returns TRUE if the specified buffer contains a PNG image.
+ */
+gboolean
+GVA_util_check_png_picture(unsigned char *buffer
+ ,gint32 buf_size
+ ,gint32 max_check_size
+ ,gint32 *hdr_size
+ )
+{
+ gint l_idx;
+ gint32 l_max_check_size;
+ gboolean l_png_magic_number_found;
+
+ l_max_check_size = MAX(max_check_size, 1);
+ l_png_magic_number_found = FALSE;
+ l_idx = 0;
+ while(l_idx < l_max_check_size)
+ {
+ /* check magic number */
+ if ((buffer[l_idx] == 0x89)
+ && (buffer[l_idx +1] == 0x50) // 'P'
+ && (buffer[l_idx +2] == 0x4e) // 'N'
+ && (buffer[l_idx +3] == 0x47)) // 'G'
+ {
+ *hdr_size = l_idx;
+ l_png_magic_number_found = TRUE;
+ break;
+ }
+ l_idx++;
+ }
+
+
+ if(gap_debug)
+ {
+ printf("GVA_util_check_png_picture: l_png_magic_number_found:%d at idx:%d hdr_size:%d\n"
+ ,(int)l_png_magic_number_found
+ ,(int)l_idx
+ ,(int)*hdr_size
+ );
+ }
+
+ return(l_png_magic_number_found);
+} /* end GVA_util_check_png_picture */
Modified: trunk/libgapvidutil/Makefile.am
==============================================================================
--- trunk/libgapvidutil/Makefile.am (original)
+++ trunk/libgapvidutil/Makefile.am Sun Jun 29 12:12:59 2008
@@ -26,6 +26,8 @@
libgapvidutil_a_SOURCES = \
gap_gve_jpeg.c \
gap_gve_jpeg.h \
+ gap_gve_png.c \
+ gap_gve_png.h \
gap_gve_misc_util.c \
gap_gve_misc_util.h \
gap_gve_raw.c \
Added: trunk/libgapvidutil/gap_gve_png.c
==============================================================================
--- (empty file)
+++ trunk/libgapvidutil/gap_gve_png.c Sun Jun 29 12:12:59 2008
@@ -0,0 +1,194 @@
+/* gap_gve_png.c by Wolfgang Hofer
+ *
+ * (GAP ... GIMP Animation Plugins, now also known as GIMP Video)
+ *
+ *
+ * In short, this module contains
+ * .) software PNG encoder or
+ *
+ */
+/* The GIMP -- an image manipulation program
+ * Copyright (C) 1995 Spencer Kimball and Peter Mattis
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+
+/* revision history (see svn)
+ * 2008.06.21 hof: created
+ */
+
+
+/* SYSTEM (UNIX) includes */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <errno.h>
+#include <unistd.h>
+
+/* GIMP includes */
+#include "gtk/gtk.h"
+#include "libgimp/gimp.h"
+
+/* GAP includes */
+#include "gap_file_util.h"
+
+#include "gtk/gtk.h"
+
+/* --------------------------------
+ * p_save_as_tmp_png_file
+ * --------------------------------
+ * call the GIMP PNG file save plug-in as "PNG" encoding engine.
+ */
+gboolean
+p_save_as_tmp_png_file(const char *filename, gint32 image_id, gint32 drawable_id
+ , gint32 png_interlaced, gint32 png_compression)
+{
+ static char *l_called_proc = "file-png-save2";
+ GimpParam *return_vals;
+ int nreturn_vals;
+
+ //if(gap_debug)
+ {
+ printf("GAP: PNG encode via call of %s on filename: %s, image_id:%d, drawable_id:%d %s\n"
+ , l_called_proc
+ , filename
+ , image_id
+ , drawable_id
+ );
+ }
+
+ return_vals = gimp_run_procedure (l_called_proc,
+ &nreturn_vals,
+ GIMP_PDB_INT32, GIMP_RUN_NONINTERACTIVE /* runmode */,
+ GIMP_PDB_IMAGE, image_id,
+ GIMP_PDB_DRAWABLE, drawable_id,
+ GIMP_PDB_STRING, filename,
+ GIMP_PDB_STRING, filename,
+ GIMP_PDB_INT32, png_interlaced,
+ GIMP_PDB_INT32, png_compression,
+ GIMP_PDB_INT32, 0, /* Write bKGD chunk? */
+ GIMP_PDB_INT32, 0, /* Write gAMA chunk? */
+ GIMP_PDB_INT32, 0, /* Write oFFs chunk? */
+ GIMP_PDB_INT32, 0, /* Write pHYs chunk? */
+ GIMP_PDB_INT32, 0, /* Write tIME chunk? */
+ GIMP_PDB_INT32, 0, /* Write comment? */
+ GIMP_PDB_INT32, 0, /* Preserve color of transparent pixels? */
+ GIMP_PDB_END);
+
+ if (return_vals[0].data.d_status == GIMP_PDB_SUCCESS)
+ {
+ gimp_destroy_params(return_vals, nreturn_vals);
+ return (TRUE);
+ }
+
+ gimp_destroy_params(return_vals, nreturn_vals);
+ printf("GAP: Error: PDB call of %s failed on filename: %s, image_id:%d, drawable_id:%d, d_status:%d %s\n"
+ , l_called_proc
+ , filename
+ , image_id
+ , drawable_id
+ , (int)return_vals[0].data.d_status
+ , p_status_to_string(return_vals[0].data.d_status)
+ );
+ return(FALSE);
+
+} /* end p_save_as_tmp_png_file */
+
+
+
+/* --------------------------------
+ * gap_gve_png_drawable_encode_png
+ * --------------------------------
+ * gap_gve_png_drawable_encode_png
+ * in: drawable: Describes the picture to be compressed in GIMP terms.
+ * png_interlaced: TRUE: Generate interlaced png.
+ * png_compression: The compression of the generated PNG (0-9, where 9 is best, 0 fastest).
+ * app0_buffer: if != NULL, the content of the APP0-marker to write.
+ * app0_length: the length of the APP0-marker.
+ * out:PNG_size: The size of the buffer that is returned.
+ * returns: guchar *: A buffer, allocated by this routines, which contains
+ * the compressed PNG, NULL on error.
+ */
+guchar *
+gap_gve_png_drawable_encode_png(GimpDrawable *drawable, gint32 png_interlaced, gint32 *PNG_size,
+ gint32 png_compression,
+ void *app0_buffer, gint32 app0_length)
+{
+ guchar *buffer;
+ guchar *PNG_data;
+ size_t totalsize = 0;
+ gint32 image_id;
+ gboolean l_pngSaveOk;
+ char *l_tmpname;
+
+ l_tmpname = gimp_temp_name("tmp.png");
+ image_id = gimp_drawable_get_image(drawable->drawable_id);
+
+ l_pngSaveOk = p_save_as_tmp_png_file(l_tmpname
+ , image_id
+ , drawable->drawable_id
+ , png_interlaced
+ , png_compression
+ );
+ if (l_pngSaveOk)
+ {
+ gint32 fileSize;
+ gint32 bytesRead;
+
+ fileSize = gap_file_get_filesize(l_tmpname);
+
+ totalsize = app0_length + fileSize;
+ buffer = g_malloc(totalsize);
+ PNG_data = buffer;
+ if(app0_length > 0)
+ {
+ memcpy(buffer, app0_buffer, app0_length);
+ PNG_data = buffer + app0_length;
+
+ bytesRead = gap_file_load_file_segment(l_tmpname
+ ,PNG_data
+ ,0 /* seek_index, start byte of datasegment in file */
+ ,fileSize /* segment size in byets */
+ );
+ if (bytesRead != fileSize)
+ {
+ g_free(buffer);
+ buffer = NULL;
+ totalsize = 0;
+ printf("gap_gve_png_drawable_encode_png: read error: bytesRead:%d (expected: %d) file:%s\n"
+ ,(int)bytesRead
+ ,(int)fileSize
+ ,l_tmpname
+ );
+ }
+ }
+
+ }
+
+ if(g_file_test(l_tmpname, G_FILE_TEST_EXISTS))
+ {
+ g_remove(l_tmpname);
+ }
+
+ /* free the temporary buffer */
+ g_free (l_tmpname);
+
+ *PNG_size = totalsize;
+ return buffer;
+
+} /* end gap_gve_png_drawable_encode_png */
Added: trunk/libgapvidutil/gap_gve_png.h
==============================================================================
--- (empty file)
+++ trunk/libgapvidutil/gap_gve_png.h Sun Jun 29 12:12:59 2008
@@ -0,0 +1,35 @@
+/* gap_gve_png.h by Wolfgang Hofer (hof gimp org)
+ *
+ * (GAP ... GIMP Animation Plugins, now also known as GIMP Video)
+ *
+ */
+
+
+/* revision history (see svn)
+ * 2008.06.22 hof: created
+ */
+
+#ifndef GAP_GVE_PNG_H
+#define GAP_GVE_PNG_H
+
+
+/* ------------------------------------
+ * gap_gve_png_drawable_encode_png
+ * ------------------------------------
+ * in: drawable: Describes the picture to be compressed in GIMP terms.
+ png_interlaced: TRUE: Generate two PNGs (one for odd/even lines each) into one buffer.
+ png_compression: The compression of the generated PNG (0-9, where 9 is best 0 fastest).
+ app0_buffer: if != NULL, the content of the APP0-marker to write.
+ app0_length: the length of the APP0-marker.
+ out:PNG_size: The size of the buffer that is returned.
+ returns: guchar *: A buffer, allocated by this routines, which contains
+ the compressed PNG, NULL on error. */
+ */
+
+guchar *gap_gve_png_drawable_encode_png(GimpDrawable *drawable, gint32 png_interlaced, gint32 *PNG_size,
+ gint32 png_compression,
+ void *app0_buffer, gint32 app0_length);
+
+
+
+#endif
Modified: trunk/po/POTFILES.in
==============================================================================
--- trunk/po/POTFILES.in (original)
+++ trunk/po/POTFILES.in Sun Jun 29 12:12:59 2008
@@ -44,6 +44,7 @@
gap/gap_story_properties.c
gap/gap_story_render_audio.c
gap/gap_story_render_processor.c
+gap/gap_story_render_lossless.c
gap/gap_story_section_properties.c
gap/gap_story_vthumb.c
gap/gap_vex_dialog.c
Modified: trunk/vid_enc_avi/gap_enc_avi_gui.c
==============================================================================
--- trunk/vid_enc_avi/gap_enc_avi_gui.c (original)
+++ trunk/vid_enc_avi/gap_enc_avi_gui.c Sun Jun 29 12:12:59 2008
@@ -54,12 +54,12 @@
#define GAP_ENC_AVI_RESPONSE_RESET 1
-
-
static const char *gtab_avi_codecname[GAP_AVI_VIDCODEC_MAX_ELEMENTS]
- = { "JPEG"
- , "RAW "
- , "XVID"
+ = { GAP_AVI_CODEC_JPEG
+ , GAP_AVI_CODEC_MJPG
+ , GAP_AVI_CODEC_PNG
+ , GAP_AVI_CODEC_RAW
+ , GAP_AVI_CODEC_XVID
};
@@ -184,7 +184,7 @@
/* init checkbuttons */
gtk_toggle_button_set_active (GTK_TOGGLE_BUTTON (gpp->jpg_dont_recode_checkbutton)
- , epp->dont_recode_frames);
+ , epp->jpeg_dont_recode_frames);
gtk_toggle_button_set_active (GTK_TOGGLE_BUTTON (gpp->jpg_interlace_checkbutton)
, epp->jpeg_interlaced);
gtk_toggle_button_set_active (GTK_TOGGLE_BUTTON (gpp->jpg_odd_first_checkbutton)
@@ -214,35 +214,58 @@
static void
p_set_codec_dependent_wgt_senistive(GapGveAviGlobalParams *gpp, gint32 idx)
{
+ gboolean png_sensitive;
gboolean jpeg_sensitive;
gboolean xvid_sensitive;
+ gboolean raw_sensitive;
+ gint notebook_page_idx;
+ png_sensitive = TRUE;
jpeg_sensitive = TRUE;
xvid_sensitive = TRUE;
switch(idx)
{
case GAP_AVI_VIDCODEC_00_JPEG:
+ case GAP_AVI_VIDCODEC_01_MJPG:
+ notebook_page_idx = GAP_AVI_VIDCODEC_00_JPEG;
+ png_sensitive = FALSE;
jpeg_sensitive = TRUE;
xvid_sensitive = FALSE;
+ raw_sensitive = FALSE;
+ break;
+ case GAP_AVI_VIDCODEC_02_PNG:
+ notebook_page_idx = idx -1;
+ png_sensitive = TRUE;
+ jpeg_sensitive = FALSE;
+ xvid_sensitive = FALSE;
+ raw_sensitive = FALSE;
break;
- case GAP_AVI_VIDCODEC_01_RAW:
+ case GAP_AVI_VIDCODEC_03_RAW:
+ notebook_page_idx = idx -1;
+ png_sensitive = FALSE;
jpeg_sensitive = FALSE;
xvid_sensitive = FALSE;
+ raw_sensitive = TRUE;
break;
- case GAP_AVI_VIDCODEC_02_XVID:
+ case GAP_AVI_VIDCODEC_04_XVID:
+ notebook_page_idx = idx -1;
+ png_sensitive = FALSE;
jpeg_sensitive = FALSE;
xvid_sensitive = TRUE;
+ raw_sensitive = FALSE;
break;
default:
- idx = GAP_AVI_VIDCODEC_00_JPEG;
+ notebook_page_idx = GAP_AVI_VIDCODEC_00_JPEG;
+ png_sensitive = FALSE;
jpeg_sensitive = TRUE;
xvid_sensitive = FALSE;
+ raw_sensitive = FALSE;
break;
}
if(gpp->notebook_main)
- gtk_notebook_set_current_page(GTK_NOTEBOOK(gpp->notebook_main), idx);
+ gtk_notebook_set_current_page(GTK_NOTEBOOK(gpp->notebook_main), notebook_page_idx);
if(gpp->jpg_dont_recode_checkbutton)
gtk_widget_set_sensitive(gpp->jpg_dont_recode_checkbutton, jpeg_sensitive);
@@ -271,6 +294,16 @@
if(gpp->xvid_quality_spinbutton)
gtk_widget_set_sensitive(gpp->xvid_quality_spinbutton, xvid_sensitive);
+ if(gpp->png_dont_recode_checkbutton)
+ gtk_widget_set_sensitive(gpp->png_dont_recode_checkbutton, png_sensitive);
+ if(gpp->png_interlace_checkbutton)
+ gtk_widget_set_sensitive(gpp->png_interlace_checkbutton, png_sensitive);
+ if(gpp->png_compression_spinbutton)
+ gtk_widget_set_sensitive(gpp->png_compression_spinbutton, png_sensitive);
+
+ if(gpp->raw_vflip_checkbutton)
+ gtk_widget_set_sensitive(gpp->raw_vflip_checkbutton, raw_sensitive);
+
} /* end p_set_codec_dependent_wgt_senistive */
@@ -390,6 +423,7 @@
GapGveAviValues *epp;
gint master_row;
gint jpg_row;
+ gint png_row;
gint xvid_row;
gint raw_row;
GtkWidget *shell_window;
@@ -397,18 +431,20 @@
GtkWidget *frame_main;
GtkWidget *notebook_main;
GtkWidget *frame_jpg;
+ GtkWidget *frame_png;
GtkWidget *frame_xvid;
GtkWidget *frame_raw;
GtkWidget *label;
GtkWidget *table_master;
GtkWidget *table_jpg;
+ GtkWidget *table_png;
GtkWidget *table_xvid;
GtkWidget *table_raw;
GtkWidget *checkbutton;
GtkWidget *spinbutton;
GtkObject *adj;
GtkWidget *combo_codec;
-
+ gint notebook_page_idx;
epp = &gpp->evl;
@@ -455,9 +491,11 @@
/* the Video CODEC combo */
combo_codec = gimp_int_combo_box_new ("JPEG", GAP_AVI_VIDCODEC_00_JPEG,
- "RAW", GAP_AVI_VIDCODEC_01_RAW,
+ "MJPG", GAP_AVI_VIDCODEC_01_MJPG,
+ "PNG", GAP_AVI_VIDCODEC_02_PNG,
+ "RAW", GAP_AVI_VIDCODEC_03_RAW,
#ifdef ENABLE_LIBXVIDCORE
- "XVID", GAP_AVI_VIDCODEC_02_XVID,
+ "XVID", GAP_AVI_VIDCODEC_04_XVID,
#endif
NULL);
@@ -533,14 +571,16 @@
/* the notebook page for JPEG Codec options */
/* ----------------------------------------- */
- frame_jpg = gimp_frame_new (_("JPEG Codec Options"));
+ notebook_page_idx = 0;
+ frame_jpg = gimp_frame_new (_("JPEG / MJPG Codec Options"));
gtk_widget_show (frame_jpg);
gtk_container_add (GTK_CONTAINER (notebook_main), frame_jpg);
gtk_container_set_border_width (GTK_CONTAINER (frame_jpg), 4);
label = gtk_label_new (_("JPEG Options"));
gtk_widget_show (label);
- gtk_notebook_set_tab_label (GTK_NOTEBOOK (notebook_main), gtk_notebook_get_nth_page (GTK_NOTEBOOK (notebook_main), 0), label);
+ gtk_notebook_set_tab_label (GTK_NOTEBOOK (notebook_main)
+ , gtk_notebook_get_nth_page (GTK_NOTEBOOK (notebook_main), notebook_page_idx), label);
/* the table for the JPEG option widgets */
@@ -572,14 +612,11 @@
, (gpointer)gpp);
g_signal_connect (G_OBJECT (checkbutton), "toggled"
, G_CALLBACK (on_checkbutton_toggled)
- , &epp->dont_recode_frames);
+ , &epp->jpeg_dont_recode_frames);
gimp_help_set_help_data (checkbutton
, _("Don't recode the input JPEG frames."
- " This option is ignored when input is read from storyboard."
- " WARNING: works only if all input frames are JPEG pictures"
- " with matching size and YUV 4:2:2 encoding."
- " This option may produce an unusable video"
- " when other frames are provided as input.")
+ " WARNING: This option may produce an unusable video"
+ " when refered JPEG frames are not YUV 4:2:2 encoded.")
, NULL);
@@ -669,10 +706,123 @@
, _("The quality setting of the encoded JPEG frames (100=best quality)")
, NULL);
+ /* the notebook page for PNG Codec options */
+ /* ----------------------------------------- */
+ notebook_page_idx++;
+
+ frame_png = gimp_frame_new (_("PNG Codec Options"));
+ gtk_widget_show (frame_png);
+ gtk_container_add (GTK_CONTAINER (notebook_main), frame_png);
+ gtk_container_set_border_width (GTK_CONTAINER (frame_png), 4);
+
+ label = gtk_label_new (_("PNG Options"));
+ gtk_widget_show (label);
+ gtk_notebook_set_tab_label (GTK_NOTEBOOK (notebook_main)
+ , gtk_notebook_get_nth_page (GTK_NOTEBOOK (notebook_main), notebook_page_idx), label);
+
+
+ /* the table for the JPEG option widgets */
+ table_png = gtk_table_new (4, 3, FALSE);
+ gtk_widget_show (table_png);
+ gtk_container_add (GTK_CONTAINER (frame_png), table_png);
+ gtk_container_set_border_width (GTK_CONTAINER (table_png), 5);
+ gtk_table_set_row_spacings (GTK_TABLE (table_png), 2);
+ gtk_table_set_col_spacings (GTK_TABLE (table_png), 2);
+
+ png_row = 0;
+
+ /* the dont recode label */
+ label = gtk_label_new (_("Dont Recode:"));
+ gtk_widget_show (label);
+ gtk_misc_set_alignment (GTK_MISC (label), 0.0, 0.5);
+ gtk_table_attach (GTK_TABLE (table_png), label, 0, 1, png_row, png_row+1,
+ (GtkAttachOptions) (GTK_FILL),
+ (GtkAttachOptions) (0), 0, 0);
+
+ /* the dont recode checkbutton */
+ checkbutton = gtk_check_button_new_with_label (" ");
+ gpp->png_dont_recode_checkbutton = checkbutton;
+ gtk_widget_show (checkbutton);
+ gtk_table_attach (GTK_TABLE (table_png), checkbutton, 1, 2, png_row, png_row+1,
+ (GtkAttachOptions) (GTK_FILL),
+ (GtkAttachOptions) (0), 0, 0);
+ g_object_set_data (G_OBJECT (checkbutton), "gpp"
+ , (gpointer)gpp);
+ g_signal_connect (G_OBJECT (checkbutton), "toggled"
+ , G_CALLBACK (on_checkbutton_toggled)
+ , &epp->png_dont_recode_frames);
+ gimp_help_set_help_data (checkbutton
+ , _("Don't recode the input PNG frames when possible."
+ " WARNING: This option may produce an unusable video")
+ , NULL);
+
+
+ png_row++;
+
+ /* the interlace label */
+ label = gtk_label_new (_("Interlace:"));
+ gtk_widget_show (label);
+ gtk_misc_set_alignment (GTK_MISC (label), 0.0, 0.5);
+ gtk_table_attach (GTK_TABLE (table_png), label, 0, 1, png_row, png_row+1,
+ (GtkAttachOptions) (GTK_FILL),
+ (GtkAttachOptions) (0), 0, 0);
+
+
+ /* the interlace checkbutton */
+ checkbutton = gtk_check_button_new_with_label (" ");
+ gpp->png_interlace_checkbutton = checkbutton;
+ gtk_widget_show (checkbutton);
+ gtk_table_attach (GTK_TABLE (table_png), checkbutton, 1, 2, png_row, png_row+1,
+ (GtkAttachOptions) (GTK_FILL),
+ (GtkAttachOptions) (0), 0, 0);
+ g_object_set_data (G_OBJECT (checkbutton), "gpp"
+ , (gpointer)gpp);
+ g_signal_connect (G_OBJECT (checkbutton), "toggled"
+ , G_CALLBACK (on_checkbutton_toggled)
+ , &epp->png_interlaced);
+ gimp_help_set_help_data (checkbutton
+ , _("Generate interlaced PNGs")
+ , NULL);
+
+
+ png_row++;
+
+
+ /* the png compression label */
+ label = gtk_label_new (_("Compression:"));
+ gtk_widget_show (label);
+ gtk_misc_set_alignment (GTK_MISC (label), 0.0, 0.5);
+ gtk_table_attach (GTK_TABLE (table_png), label, 0, 1, png_row, png_row+1,
+ (GtkAttachOptions) (GTK_FILL),
+ (GtkAttachOptions) (0), 0, 0);
+
+ /* the jpeg quality spinbutton */
+ adj = gtk_adjustment_new (epp->png_compression
+ , 0, 9
+ , 1, 10, 10);
+ gpp->png_compression_spinbutton_adj = adj;
+ spinbutton = gtk_spin_button_new (GTK_ADJUSTMENT (adj), 1, 0);
+ gpp->png_compression_spinbutton = spinbutton;
+ gtk_widget_show (spinbutton);
+ gtk_table_attach (GTK_TABLE (table_png), spinbutton, 1, 2, png_row, png_row+1,
+ (GtkAttachOptions) (GTK_EXPAND | GTK_FILL),
+ (GtkAttachOptions) (0), 0, 0);
+ g_object_set_data (G_OBJECT (checkbutton), "gpp"
+ , (gpointer)gpp);
+ g_signal_connect (G_OBJECT (adj), "value_changed"
+ , G_CALLBACK (on_gint32_spinbutton_changed)
+ , &epp->jpeg_quality);
+ gimp_help_set_help_data (spinbutton
+ , _("The compression setting of the encoded PNG frames (9=best compression"
+ "0=fastest)")
+ , NULL);
+
/* the notebook page for RAW Codec options */
/* ----------------------------------------- */
+ notebook_page_idx++;
+
frame_raw = gimp_frame_new (_("RAW Codec Options"));
gtk_widget_show (frame_raw);
gtk_container_add (GTK_CONTAINER (notebook_main), frame_raw);
@@ -680,7 +830,8 @@
label = gtk_label_new (_("RAW Options"));
gtk_widget_show (label);
- gtk_notebook_set_tab_label (GTK_NOTEBOOK (notebook_main), gtk_notebook_get_nth_page (GTK_NOTEBOOK (notebook_main), 1), label);
+ gtk_notebook_set_tab_label (GTK_NOTEBOOK (notebook_main)
+ , gtk_notebook_get_nth_page (GTK_NOTEBOOK (notebook_main), notebook_page_idx), label);
/* the table for the RAW option widgets */
table_raw = gtk_table_new (1, 3, FALSE);
@@ -737,6 +888,7 @@
/* the notebook page for XVID Codec options */
/* ----------------------------------------- */
+ notebook_page_idx++;
frame_xvid = gimp_frame_new (_("XVID Codec Options"));
gtk_widget_show (frame_xvid);
gtk_container_add (GTK_CONTAINER (notebook_main), frame_xvid);
@@ -744,7 +896,8 @@
label = gtk_label_new (_("XVID Options"));
gtk_widget_show (label);
- gtk_notebook_set_tab_label (GTK_NOTEBOOK (notebook_main), gtk_notebook_get_nth_page (GTK_NOTEBOOK (notebook_main), 2), label);
+ gtk_notebook_set_tab_label (GTK_NOTEBOOK (notebook_main)
+ , gtk_notebook_get_nth_page (GTK_NOTEBOOK (notebook_main), notebook_page_idx), label);
/* the table for the XVID option widgets */
Modified: trunk/vid_enc_avi/gap_enc_avi_main.c
==============================================================================
--- trunk/vid_enc_avi/gap_enc_avi_main.c (original)
+++ trunk/vid_enc_avi/gap_enc_avi_main.c Sun Jun 29 12:12:59 2008
@@ -43,13 +43,6 @@
#include <libgimp/gimpui.h>
-/* names of the supported AVI Codecs */
-#define GAP_AVI_CODEC_RAW "RAW " /* refers to 4 byte code "RGB " */
-#define GAP_AVI_CODEC_RGB "RGB "
-#define GAP_AVI_CODEC_JPEG "JPEG"
-/* ??? not sure what to use for the correct 4cc codec names for xvid divx MPEG 4 */
-#define GAP_AVI_CODEC_XVID "XVID"
-#define GAP_AVI_CODEC_DIVX "div5"
@@ -157,8 +150,8 @@
{GIMP_PDB_INT32, "run_mode", "interactive, non-interactive"},
{GIMP_PDB_STRING, "key_stdpar", "key to get standard video encoder params via gimp_get_data"},
{GIMP_PDB_STRING, "codec_name", "identifier of the codec. one of the strings \"JPEG\", \"RGB\", \"DIVX\" "},
- {GIMP_PDB_INT32, "dont_recode_frames", "=1: store the frames _directly_ into the AVI. "
- "(works only for codec_name JPEG and input frames must be 4:2:2 JPEG !)"},
+ {GIMP_PDB_INT32, "jpeg_dont_recode_frames", "=1: store the frames _directly_ into the AVI where possible. "
+ "(works only for codec_name MJPG or JPEG and input frames must be 4:2:2 JPEG !)"},
{GIMP_PDB_INT32, "jpeg_interlaced", "=1: store two JPEG frames, for the odd/even lines"},
{GIMP_PDB_INT32, "jpeg_quality", "the quality of the coded jpegs (0 - 100%)"},
{GIMP_PDB_INT32, "jpeg_odd_even", "if jpeg_interlaced: odd frames first ?"},
@@ -178,7 +171,12 @@
{GIMP_PDB_INT32, "APP0_marker", "=1: write APP0 marker for each frame into the AVI. "
"( The APP0 marker is evaluated by some Windows programs for AVIs)"},
- {GIMP_PDB_INT32, "raw_vflip", "=1: flip vertically (only for codec_name RAW and RGB )"}
+ {GIMP_PDB_INT32, "raw_vflip", "=1: flip vertically (only for codec_name RAW and RGB )"},
+
+ {GIMP_PDB_INT32, "png_dont_recode_frames", "=1: store the frames _directly_ into the AVI where possible. "
+ "(works only for codec_name PNG )"},
+ {GIMP_PDB_INT32, "png_interlaced", "=1: interlaced png frames, 0= no interlace"},
+ {GIMP_PDB_INT32, "png_compression", "the compression of the coded pngs (0 - 9) where 9 is best and 0 is fast "}
};
static int nargs_avi_enc_par = sizeof(args_avi_enc_par) / sizeof(args_avi_enc_par[0]);
@@ -205,7 +203,7 @@
gimp_install_procedure(GAP_PLUGIN_NAME_AVI_ENCODE,
_("avi video encoding for anim frames. Menu: @AVI@"),
- _("This plugin does fake video encoding of animframes."
+ _("This plugin handles video encoding for the AVI videoformat."
" the (optional) audiodata must be a raw datafile(s) or .wav (RIFF WAVEfmt ) file(s)"
" .wav files can be mono (1) or stereo (2channels) audiodata must be 16bit uncompressed."
" IMPORTANT: you should first call "
@@ -367,7 +365,7 @@
g_snprintf(epp->codec_name, sizeof(epp->codec_name), "%s", param[2+1].data.d_string);
}
l_ii = 2+2;
- epp->dont_recode_frames = param[l_ii++].data.d_int32;
+ epp->jpeg_dont_recode_frames = param[l_ii++].data.d_int32;
epp->jpeg_interlaced = param[l_ii++].data.d_int32;
epp->jpeg_quality = param[l_ii++].data.d_int32;
epp->jpeg_odd_even = param[l_ii++].data.d_int32;
@@ -387,6 +385,10 @@
epp->raw_vflip = param[l_ii++].data.d_int32;
+ epp->png_dont_recode_frames = param[l_ii++].data.d_int32;
+ epp->png_interlaced = param[l_ii++].data.d_int32;
+ epp->png_compression = param[l_ii++].data.d_int32;
+
}
}
else
@@ -541,8 +543,8 @@
{
if(gap_debug) printf("gap_enc_avi_main_init_default_params\n");
- g_snprintf(epp->codec_name, sizeof(epp->codec_name), GAP_AVI_CODEC_JPEG);
- epp->dont_recode_frames = 0;
+ g_snprintf(epp->codec_name, sizeof(epp->codec_name), GAP_AVI_CODEC_MJPG);
+ epp->jpeg_dont_recode_frames = 0;
epp->jpeg_interlaced = 0;
epp->jpeg_quality = 84;
epp->jpeg_odd_even = 0;
@@ -566,6 +568,20 @@
} /* end gap_enc_avi_main_init_default_params */
+gint32
+p_dimSizeOfRawFrame(GapGveAviGlobalParams *gpp)
+{
+ gint32 sizeOfRawFrame;
+
+ /* size of uncompressed RGBA frame + safety of 1000 bytes should be
+ * more than enough
+ */
+ sizeOfRawFrame = 1000 + (gpp->val.vid_width * gpp->val.vid_height * 4);
+
+ return (sizeOfRawFrame);
+} /* end p_dimSizeOfRawFrame */
+
+
/* ============================================================================
* p_avi_encode
* The main "productive" routine
@@ -603,6 +619,13 @@
long l_samples = 0;
guchar *buffer; /* Holding misc. file contents */
+ unsigned char *l_video_chunk_ptr;
+ gint32 l_maxSizeOfRawFrame;
+ gint32 l_max_master_frame_nr;
+ gint32 l_cnt_encoded_frames;
+ gint32 l_cnt_reused_frames;
+ gint32 l_check_flags;
+ gint32 l_out_frame_nr;
gint32 wavsize = 0; /* Data size of the wav file */
long audio_margin = 8192; /* The audio chunk size */
@@ -611,6 +634,9 @@
long frames_per_second_x100 = 2500;
gdouble frames_per_second_x100f = 2500;
char databuffer[300000]; /* For transferring audio data */
+ gint32 l_video_frame_chunk_size;
+ gint32 l_video_frame_chunk_hdr_size;
+ gboolean l_dont_recode_frames;
#ifdef ENABLE_LIBXVIDCORE
GapGveXvidControl *xvid_control = NULL;
@@ -638,8 +664,19 @@
printf(" codec_name:%s:\n", epp->codec_name);
}
+ l_maxSizeOfRawFrame = p_dimSizeOfRawFrame(gpp);
+ l_video_chunk_ptr = g_malloc0(l_maxSizeOfRawFrame);
+
+ l_out_frame_nr = 0;
l_rc = 0;
l_layer_id = -1;
+ l_cnt_encoded_frames = 0;
+ l_cnt_reused_frames = 0;
+ l_tmp_image_id = -1;
+ l_check_flags = GAP_VID_CHCHK_FLAG_SIZE;
+ l_video_frame_chunk_size = 0;
+ l_video_frame_chunk_hdr_size = 0;
+ l_dont_recode_frames = FALSE;
/* make list of frameranges */
{ gint32 l_total_framecount;
@@ -747,6 +784,8 @@
#ifdef ENABLE_LIBXVIDCORE
if ((strcmp(epp->codec_name, GAP_AVI_CODEC_RGB) != 0)
&& (strcmp(epp->codec_name, GAP_AVI_CODEC_RAW) != 0)
+ && (strcmp(epp->codec_name, GAP_AVI_CODEC_PNG) != 0)
+ && (strcmp(epp->codec_name, GAP_AVI_CODEC_MJPG) != 0)
&& (strcmp(epp->codec_name, GAP_AVI_CODEC_JPEG) != 0))
{
if(gap_debug) printf("INIT Encoder Instance (HANDLE) for XVID (OpenDivX)\n");
@@ -780,125 +819,110 @@
}
l_begin = gpp->val.range_from;
l_end = gpp->val.range_to;
+ l_max_master_frame_nr = abs(l_end - l_begin) + 1;
l_cur_frame_nr = l_begin;
while(l_rc >= 0)
{
if(l_cur_frame_nr == l_begin) /* setup things first if this is the first frame */
{
- /* (light) check if dont_recode_frames is possible */
- if (epp->dont_recode_frames)
+ /* jpeg/mjpeg codec specific check flags setup */
+ if (epp->jpeg_dont_recode_frames)
{
- /* DONT_RECODE works only if:
- * - input is a series of JPEG frames all encoded with YUV 4:2:2
- * - framesize is same as videosize
- * - codec_name is "JPEG"
- */
- if(gpp->val.storyboard_file)
+ if ((strcmp(epp->codec_name, GAP_AVI_CODEC_JPEG) == 0)
+ || (strcmp(epp->codec_name, GAP_AVI_CODEC_MJPG) == 0))
{
- if(*gpp->val.storyboard_file != '\0')
- {
- /* storyboard returns image_id of a composite image. We MUST recode here !
- */
- epp->dont_recode_frames = FALSE;
- }
+ l_check_flags |= (GAP_VID_CHCHK_FLAG_JPG | GAP_VID_CHCHK_FLAG_FULL_FRAME);
+ l_dont_recode_frames = TRUE;
}
-
- if (strcmp(epp->codec_name, GAP_AVI_CODEC_JPEG) != 0)
- {
- /* must recode the frame other codecs than JPEG */
- epp->dont_recode_frames = FALSE;
- }
-
- if ((strcmp(gpp->ainfo.extension, ".jpg") != 0)
- && (strcmp(gpp->ainfo.extension, ".jpeg") != 0)
- && (strcmp(gpp->ainfo.extension, ".JPG") != 0)
- && (strcmp(gpp->ainfo.extension, ".JPEG") != 0))
+ }
+ /* png codec specific check flags setup */
+ if (epp->png_dont_recode_frames)
+ {
+ if (strcmp(epp->codec_name, GAP_AVI_CODEC_PNG) == 0)
{
- /* we MUST recode if frame is no JPG,
- * (if image does not fit in size or is not JPG or is not YUV 4:2:2
- * there wold be the need of recoding too
- * but this code does just a 'light' check for extensions)
- */
- epp->dont_recode_frames = FALSE;
+ l_check_flags |= (GAP_VID_CHCHK_FLAG_PNG | GAP_VID_CHCHK_FLAG_FULL_FRAME);
+ l_dont_recode_frames = TRUE;
}
-
+ }
+
+ //if(gap_debug)
+ {
+ printf("l_dont_recode_frames:%d\n", l_dont_recode_frames);
}
} /* end setup of 1.st frame (l_cur_frame_nr == l_begin) */
- /* must fetch the frame into gimp_image if we have to recode */
- if (!epp->dont_recode_frames)
+ /* calling the frame fetcher */
{
- /* load the current frame image, and transform (flatten, convert to RGB, scale, macro, etc..) */
- l_tmp_image_id = gap_gve_story_fetch_composite_image(l_vidhand
- , l_cur_frame_nr
- , (gint32) gpp->val.vid_width
- , (gint32) gpp->val.vid_height
- , gpp->val.filtermacro_file
- , &l_layer_id /* output */
- );
- if(l_tmp_image_id < 0)
+ gboolean l_fetch_ok;
+ gboolean l_force_keyframe;
+
+ l_out_frame_nr++;
+
+
+ l_fetch_ok = gap_story_render_fetch_composite_image_or_chunk(l_vidhand
+ , l_cur_frame_nr
+ , (gint32) gpp->val.vid_width
+ , (gint32) gpp->val.vid_height
+ , gpp->val.filtermacro_file
+ , &l_layer_id /* output */
+ , &l_tmp_image_id /* output */
+ , l_dont_recode_frames /* dont_recode_flag */
+ , NULL /* GapCodecNameElem *vcodec_list NULL == no checks */
+ , &l_force_keyframe
+ , l_video_chunk_ptr
+ , &l_video_frame_chunk_size /* actual chunk size (incl. header) */
+ , l_maxSizeOfRawFrame /* IN max size */
+ , gpp->val.framerate
+ , l_max_master_frame_nr
+ , &l_video_frame_chunk_hdr_size
+ , l_check_flags
+ );
+ if(l_fetch_ok != TRUE)
{
l_rc = -1;
}
}
-
+
+
/* this block is done foreach handled video frame */
if(l_rc == 0)
{
- /* encode one VIDEO FRAME */
- if (epp->dont_recode_frames)
+ if (l_video_frame_chunk_size > 0)
{
- char *l_frame_filename;
+ /* 1:1 lossless copy one VIDEO FRAME */
+ l_cnt_reused_frames++;
-
- if(gap_debug) printf("DONT_RECODE_FRAMES packing input frame 1:1 into AVI\n");
-
- /* the DONT_RECODE_FRAMES option is fast,
- * - but works only if input is JPEG with size is same as videosize
- * - of course there is no support for filtermacros and storyboard stuff
- * because the processing of gap_gve_story_fetch_composite_image is passed by in that case!
- */
- /* build the frame name */
- /* Use the gap functions to generate the frame filename */
- l_frame_filename = gap_lib_alloc_fname(gpp->ainfo.basename,
- l_cur_frame_nr,
- gpp->ainfo.extension);
- /* can't find the frame ? */
- if(l_frame_filename == NULL)
+ //if (gap_debug)
{
- l_rc = -1;
+ printf("DEBUG: 1:1 copy of frame %d (fetch as chunk OK) chunk_ptr:%d chunk_size:%d chunk_hdr_size:%d\n"
+ , (int)l_cur_frame_nr
+ , (int)l_video_chunk_ptr
+ , (int)l_video_frame_chunk_size
+ , (int)l_video_frame_chunk_hdr_size
+ );
}
- else
- {
- l_FRAME_size = gap_file_get_filesize(l_frame_filename);
- buffer = gap_file_load_file(l_frame_filename);
- if (buffer == NULL)
- {
- printf("gap_avi: Failed opening encoded input frame %s.",
- l_frame_filename);
- l_rc = -1;
- }
- else
- {
- if (gap_debug) printf("gap_avi: Writing frame nr. %ld, size %d\n", l_cur_frame_nr, l_FRAME_size);
+ l_FRAME_size = l_video_frame_chunk_size - l_video_frame_chunk_hdr_size;
+ buffer = l_video_chunk_ptr + l_video_frame_chunk_hdr_size;
+
+ AVI_write_frame(l_avifile, buffer, l_FRAME_size, TRUE /* all frames are keyframe for JPEG codec */);
- AVI_write_frame(l_avifile, buffer, l_FRAME_size, TRUE /* all frames are keyframe for JPEG codec */);
- }
- }
- if(l_frame_filename)
- {
- g_free(l_frame_filename);
- }
}
else
{
+ /* encode one VIDEO FRAME */
gint32 l_nn;
int l_keyframe;
guchar *l_app0_buffer;
gint32 l_app0_len;
+ l_cnt_encoded_frames++;
+ //if (gap_debug)
+ {
+ printf("DEBUG: saving recoded frame %d (fetch as chunk FAILED)\n", (int)l_cur_frame_nr);
+ }
+
l_keyframe = TRUE; /* TRUE: keyframe is independent image (I frame or uncompressed)
* FALSE: for dependent frames (P and B frames)
*/
@@ -925,12 +949,20 @@
}
}
- if (strcmp(epp->codec_name, GAP_AVI_CODEC_JPEG) == 0)
+ if ((strcmp(epp->codec_name, GAP_AVI_CODEC_JPEG) == 0)
+ || (strcmp(epp->codec_name, GAP_AVI_CODEC_MJPG) == 0))
{
/* Compress the picture into a JPEG */
buffer = gap_gve_jpeg_drawable_encode_jpeg(l_drawable, epp->jpeg_interlaced,
&l_FRAME_size, epp->jpeg_quality, epp->jpeg_odd_even, FALSE, l_app0_buffer, l_app0_len);
}
+ else if ((strcmp(epp->codec_name, GAP_AVI_CODEC_JPEG) == 0)
+ || (strcmp(epp->codec_name, GAP_AVI_CODEC_MJPG) == 0))
+ {
+ /* Compress the picture into a PNG */
+ buffer = gap_gve_png_drawable_encode_png(l_drawable, epp->png_interlaced,
+ &l_FRAME_size, epp->png_compression, l_app0_buffer, l_app0_len);
+ }
else
{
if ((strcmp(epp->codec_name, GAP_AVI_CODEC_RAW) == 0)
@@ -950,6 +982,10 @@
}
buffer = gap_gve_raw_BGR_drawable_encode(l_drawable, &l_FRAME_size, l_vflip, l_app0_buffer, l_app0_len);
}
+ else if (strcmp(epp->codec_name, GAP_AVI_CODEC_PNG) == 0)
+ {
+ printf("PNG codec not implemented yet.\n");
+ }
#ifdef ENABLE_LIBXVIDCORE
else
{
@@ -1068,5 +1104,11 @@
gap_gve_story_close_vid_handle(l_vidhand);
}
+ /* statistics */
+ printf("encoded frames: %d\n", (int)l_cnt_encoded_frames);
+ printf("1:1 copied frames: %d\n", (int)l_cnt_reused_frames);
+ printf("total handled frames: %d\n", (int)l_cnt_encoded_frames + l_cnt_reused_frames);
+
+ g_free(l_video_chunk_ptr);
return l_rc;
} /* end p_avi_encode */
Modified: trunk/vid_enc_avi/gap_enc_avi_main.h
==============================================================================
--- trunk/vid_enc_avi/gap_enc_avi_main.h (original)
+++ trunk/vid_enc_avi/gap_enc_avi_main.h Sun Jun 29 12:12:59 2008
@@ -38,10 +38,21 @@
#define GAP_AVI_VIDCODEC_00_JPEG 0
-#define GAP_AVI_VIDCODEC_01_RAW 1
-#define GAP_AVI_VIDCODEC_02_XVID 2
-#define GAP_AVI_VIDCODEC_MAX_ELEMENTS 3
-
+#define GAP_AVI_VIDCODEC_01_MJPG 1
+#define GAP_AVI_VIDCODEC_02_PNG 2
+#define GAP_AVI_VIDCODEC_03_RAW 3
+#define GAP_AVI_VIDCODEC_04_XVID 4
+#define GAP_AVI_VIDCODEC_MAX_ELEMENTS 5
+
+/* names of the supported AVI Codecs */
+#define GAP_AVI_CODEC_RAW "RAW " /* refers to 4 byte code "RGB " */
+#define GAP_AVI_CODEC_RGB "RGB "
+#define GAP_AVI_CODEC_JPEG "JPEG"
+#define GAP_AVI_CODEC_MJPG "MJPG"
+#define GAP_AVI_CODEC_PNG "PNG "
+/* ??? not sure what to use for the correct 4cc codec names for xvid divx MPEG 4 */
+#define GAP_AVI_CODEC_XVID "XVID"
+#define GAP_AVI_CODEC_DIVX "div5"
/* avi specific encoder params */
@@ -50,7 +61,7 @@
gint32 APP0_marker;
/* for the builtin "JPEG" CODEC */
- gint32 dont_recode_frames;
+ gint32 jpeg_dont_recode_frames;
gint32 jpeg_interlaced;
gint32 jpeg_quality; /* 0..100% */
gint32 jpeg_odd_even;
@@ -61,6 +72,10 @@
/* for the "RGB " (== raw) CODEC */
gint32 raw_vflip;
+ /* for the "PNG " CODEC */
+ gint32 png_dont_recode_frames;
+ gint32 png_interlaced;
+ gint32 png_compression; /* 0..9 */
} GapGveAviValues;
typedef struct GapGveAviGlobalParams { /* nick: gpp */
@@ -101,6 +116,11 @@
GtkWidget *raw_vflip_checkbutton;
+ GtkWidget *png_dont_recode_checkbutton;
+ GtkWidget *png_interlace_checkbutton;
+ GtkWidget *png_compression_spinbutton;
+ GtkObject *png_compression_spinbutton_adj;
+
} GapGveAviGlobalParams;
Modified: trunk/vid_enc_ffmpeg/gap_enc_ffmpeg_main.c
==============================================================================
--- trunk/vid_enc_ffmpeg/gap_enc_ffmpeg_main.c (original)
+++ trunk/vid_enc_ffmpeg/gap_enc_ffmpeg_main.c Sun Jun 29 12:12:59 2008
@@ -2935,17 +2935,142 @@
} /* end p_ffmpeg_close */
+/* ---------------------------
+ * p_add_vcodec_name
+ * ---------------------------
+ * add the specified name to as 1st element to the specified codec_list
+ * optional followed by all compatible video codecs that can be used for lossless video cut feature.
+ */
+static void
+p_add_vcodec_name(GapCodecNameElem **codec_list, const char *name)
+{
+ if(name)
+ {
+ GapCodecNameElem *codec_elem;
+
+ codec_elem = g_malloc0(sizeof(GapCodecNameElem));
+ if (codec_elem)
+ {
+ codec_elem->codec_name = g_strdup(name);
+ codec_elem->next = *codec_list;
+ *codec_list = codec_elem;
+ }
+ }
+} /* end p_add_vcodec_name */
+
+
+/* ---------------------------
+ * p_setup_check_flags
+ * ---------------------------
+ * Set up check_flags (conditions for lossless video cut)
+ * and return a list of codec_names that shall be compatible to the
+ * selected codec (epp->vcodec_name) for encoding.
+ * NOTE:
+ * the compatibility list is in experimental state,
+ * (it is not based on format specifications)
+ *
+ */
+static GapCodecNameElem *
+p_setup_check_flags(GapGveFFMpegValues *epp
+ , gint32 *check_flags /* OUT */
+ )
+{
+ gint32 l_check_flags;
+ GapCodecNameElem *compatible_vcodec_list;
+
+
+ l_check_flags = GAP_VID_CHCHK_FLAG_SIZE;
+
+ compatible_vcodec_list = NULL;
+
+ if(strcmp(epp->vcodec_name, "mjpeg") == 0)
+ {
+ l_check_flags |= (GAP_VID_CHCHK_FLAG_JPG | GAP_VID_CHCHK_FLAG_FULL_FRAME);
+ p_add_vcodec_name(&compatible_vcodec_list, "JPEG");
+ }
+ else if(strcmp(epp->vcodec_name, "png") == 0)
+ {
+ l_check_flags |= (GAP_VID_CHCHK_FLAG_JPG | GAP_VID_CHCHK_FLAG_FULL_FRAME);
+ p_add_vcodec_name(&compatible_vcodec_list, "PNG");
+ p_add_vcodec_name(&compatible_vcodec_list, "PNG ");
+ }
+ else if(strcmp(epp->vcodec_name, "mpeg1video") == 0)
+ {
+ l_check_flags |= (GAP_VID_CHCHK_FLAG_MPEG_INTEGRITY | GAP_VID_CHCHK_FLAG_VCODEC_NAME);
+ p_add_vcodec_name(&compatible_vcodec_list, "mjpeg");
+ }
+ else if(strcmp(epp->vcodec_name, "mpeg2video") == 0)
+ {
+ l_check_flags |= (GAP_VID_CHCHK_FLAG_MPEG_INTEGRITY | GAP_VID_CHCHK_FLAG_VCODEC_NAME);
+ p_add_vcodec_name(&compatible_vcodec_list, "mpeg1video");
+ p_add_vcodec_name(&compatible_vcodec_list, "mpegvideo");
+ p_add_vcodec_name(&compatible_vcodec_list, "mjpeg");
+ p_add_vcodec_name(&compatible_vcodec_list, "JPEG");
+ }
+ else if(strcmp(epp->vcodec_name, "mpeg4") == 0)
+ {
+ l_check_flags |= (GAP_VID_CHCHK_FLAG_MPEG_INTEGRITY | GAP_VID_CHCHK_FLAG_VCODEC_NAME);
+ p_add_vcodec_name(&compatible_vcodec_list, "mpeg1video");
+ p_add_vcodec_name(&compatible_vcodec_list, "mpeg2video");
+ p_add_vcodec_name(&compatible_vcodec_list, "mpegvideo");
+ p_add_vcodec_name(&compatible_vcodec_list, "mjpeg");
+ p_add_vcodec_name(&compatible_vcodec_list, "JPEG");
+ }
+ else if(strcmp(epp->vcodec_name, "msmpeg4") == 0)
+ {
+ l_check_flags |= (GAP_VID_CHCHK_FLAG_MPEG_INTEGRITY | GAP_VID_CHCHK_FLAG_VCODEC_NAME);
+ p_add_vcodec_name(&compatible_vcodec_list, "mpeg1video");
+ p_add_vcodec_name(&compatible_vcodec_list, "mpeg2video");
+ p_add_vcodec_name(&compatible_vcodec_list, "mpegvideo");
+ p_add_vcodec_name(&compatible_vcodec_list, "mjpeg");
+ p_add_vcodec_name(&compatible_vcodec_list, "JPEG");
+ }
+ else if(strcmp(epp->vcodec_name, "msmpeg4v1") == 0)
+ {
+ l_check_flags |= (GAP_VID_CHCHK_FLAG_MPEG_INTEGRITY | GAP_VID_CHCHK_FLAG_VCODEC_NAME);
+ p_add_vcodec_name(&compatible_vcodec_list, "mpeg1video");
+ p_add_vcodec_name(&compatible_vcodec_list, "mpeg2video");
+ p_add_vcodec_name(&compatible_vcodec_list, "mpegvideo");
+ p_add_vcodec_name(&compatible_vcodec_list, "msmpeg4");
+ p_add_vcodec_name(&compatible_vcodec_list, "mjpeg");
+ p_add_vcodec_name(&compatible_vcodec_list, "JPEG");
+ }
+ else if(strcmp(epp->vcodec_name, "msmpeg4v2") == 0)
+ {
+ l_check_flags |= (GAP_VID_CHCHK_FLAG_MPEG_INTEGRITY | GAP_VID_CHCHK_FLAG_VCODEC_NAME);
+ p_add_vcodec_name(&compatible_vcodec_list, "mpeg1video");
+ p_add_vcodec_name(&compatible_vcodec_list, "mpeg2video");
+ p_add_vcodec_name(&compatible_vcodec_list, "mpegvideo");
+ p_add_vcodec_name(&compatible_vcodec_list, "msmpeg4");
+ p_add_vcodec_name(&compatible_vcodec_list, "msmpeg4v1");
+ p_add_vcodec_name(&compatible_vcodec_list, "mjpeg");
+ p_add_vcodec_name(&compatible_vcodec_list, "JPEG");
+ }
+ else
+ {
+ l_check_flags |= (GAP_VID_CHCHK_FLAG_VCODEC_NAME);
+ }
+
+
+ *check_flags = l_check_flags;
+ p_add_vcodec_name(&compatible_vcodec_list, epp->vcodec_name);
-/* ============================================================================
+
+ return (compatible_vcodec_list);
+} /* end p_setup_check_flags */
+
+
+
+/* ---------------------------
* p_ffmpeg_encode
+ * ---------------------------
* The main "productive" routine
* ffmpeg encoding of anim frames, based on ffmpeg lib (by Fabrice Bellard)
* Audio encoding is Optional.
* (wav_audiofile must be provided in that case)
*
- * returns value >= 0 if all went ok
+ * returns value >= 0 if ok
* (or -1 on error)
- * ============================================================================
*/
static gint
p_ffmpeg_encode(GapGveFFMpegGlobalParams *gpp)
@@ -2964,9 +3089,10 @@
gint32 l_cnt_encoded_frames;
gint32 l_cnt_reused_frames;
gint l_video_tracks = 0;
-
+ gint32 l_check_flags;
t_awk_array l_awk_arr;
t_awk_array *awp;
+ GapCodecNameElem *l_vcodec_list;
epp = &gpp->evl;
@@ -2974,6 +3100,9 @@
l_cnt_encoded_frames = 0;
l_cnt_reused_frames = 0;
p_init_audio_workdata(awp);
+
+ l_check_flags = GAP_VID_CHCHK_FLAG_SIZE;
+ l_vcodec_list = p_setup_check_flags(epp, &l_check_flags);
//if(gap_debug)
{
@@ -3072,16 +3201,17 @@
gboolean l_fetch_ok;
gboolean l_force_keyframe;
gint32 l_video_frame_chunk_size;
+ gint32 l_video_frame_chunk_hdr_size;
/* must fetch the frame into gimp_image */
/* load the current frame image, and transform (flatten, convert to RGB, scale, macro, etc..) */
if(gap_debug)
{
- printf("\nFFenc: before gap_gve_story_fetch_composite_image_or_chunk\n");
+ printf("\nFFenc: before gap_story_render_fetch_composite_image_or_chunk\n");
}
- l_fetch_ok = gap_gve_story_fetch_composite_image_or_chunk(l_vidhand
+ l_fetch_ok = gap_story_render_fetch_composite_image_or_chunk(l_vidhand
, l_cur_frame_nr
, (gint32) gpp->val.vid_width
, (gint32) gpp->val.vid_height
@@ -3089,18 +3219,20 @@
, &l_layer_id /* output */
, &l_tmp_image_id /* output */
, epp->dont_recode_flag
- , epp->vcodec_name
+ , l_vcodec_list /* list of compatible vcodec_names */
, &l_force_keyframe
, ffh->vst[0].video_buffer
, &l_video_frame_chunk_size
, ffh->vst[0].video_buffer_size /* IN max size */
, gpp->val.framerate
, l_max_master_frame_nr
+ , &l_video_frame_chunk_hdr_size
+ , l_check_flags
);
//if(gap_debug)
{
- printf("\nFFenc: after gap_gve_story_fetch_composite_image_or_chunk image_id:%d layer_id:%d\n"
+ printf("\nFFenc: after gap_story_render_fetch_composite_image_or_chunk image_id:%d layer_id:%d\n"
, (int)l_tmp_image_id
, (int) l_layer_id
);
Added: trunk/vid_enc_rawframes/Makefile.am
==============================================================================
--- (empty file)
+++ trunk/vid_enc_rawframes/Makefile.am Sun Jun 29 12:12:59 2008
@@ -0,0 +1,50 @@
+## Process this file with automake to produce Makefile.in
+
+libexecdir = $(GIMP_PLUGIN_DIR)/plug-ins
+
+scriptdatadir = $(GIMP_DATA_DIR)/scripts
+
+
+if GAP_VIDEOAPI_SUPPORT
+GAPVIDEOAPI = -L$(top_builddir)/libgapvidapi -lgapvidapi $(GAPVIDEOAPI_EXTLIBS)
+INC_GAPVIDEOAPI = -I$(top_srcdir)/libgapvidapi $(GAPVIDEOAPI_EXTINCS)
+endif
+
+LIBGAPBASE = $(top_builddir)/libgapbase/libgapbase.a
+INC_LIBGAPBASE = -I$(top_srcdir)/libgapbase
+
+LIBGAPSTORY = -L$(top_builddir)/gap -lgapstory
+INC_LIBGAPSTORY = -I$(top_srcdir)/gap
+
+LIBGAPVIDUTIL = -L$(top_builddir)/libgapvidutil -lgapvidutil
+INC_LIBGAPVIDUTIL = -I$(top_srcdir)/libgapvidutil
+
+
+libexec_PROGRAMS = gap_vid_enc_rawframes
+
+
+gap_vid_enc_rawframes_SOURCES = \
+ gap_enc_rawframes_main.c
+
+
+AM_CPPFLAGS = \
+ -DGAPLIBDIR=\""$(GAPLIBDIR)"\" \
+ -DLOCALEDIR=\""$(LOCALEDIR)"\"
+
+INCLUDES = \
+ -I$(top_srcdir) \
+ -I$(top_srcdir)/libwavplayclient \
+ $(INC_LIBGAPBASE) \
+ $(INC_GAPVIDEOAPI) \
+ $(INC_LIBGAPSTORY) \
+ $(INC_LIBGAPVIDUTIL) \
+ $(GIMP_CFLAGS) \
+ -I$(includedir)
+
+
+LDADD = $(GIMP_LIBS)
+
+# note: sequence of libs matters because LIBGAPVIDUTIL uses both LIBGAPSTORY and GAPVIDEOAPI
+# (if those libs appear before LIBGAPVIDUTIL the linker can not resolve those references.
+
+gap_vid_enc_rawframes_LDADD = $(LIBGAPVIDUTIL) $(LIBGAPSTORY) $(GAPVIDEOAPI) $(LIBGAPBASE) $(GIMP_LIBS)
Added: trunk/vid_enc_rawframes/gap_enc_rawframes_main.c
==============================================================================
--- (empty file)
+++ trunk/vid_enc_rawframes/gap_enc_rawframes_main.c Sun Jun 29 12:12:59 2008
@@ -0,0 +1,949 @@
+/* gap_enc_rawframes_main.c
+ * by hof (Wolfgang Hofer)
+ *
+ * GAP ... Gimp Animation Plugins
+ *
+ * This is the MAIN Module for a rawframes video extract encoder
+ * This encoder writes the videoframes 1:1 as single files to disc
+ * instead of encoding to one videofile. It is mainly intended for
+ * extracting I Frames 1:1 from MPEG or AVI as JPEG files
+ * where the decoding and reencoding process is skipped where possible
+ * This results in fast and lossless extract of keyframes frames.
+ * (example: AVI files recoded by the OLYMPUS SP560UZ digitalcamera
+ * ........
+ *
+ */
+/* The GIMP -- an image manipulation program
+ * Copyright (C) 1995 Spencer Kimball and Peter Mattis
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* revision history:
+ * version 2.5.0; 2008.06.01 hof: created
+ */
+
+#include <gtk/gtk.h>
+#include <libgimp/gimp.h>
+#include <libgimp/gimpui.h>
+
+
+
+#define GAP_PLUGIN_NAME_RAWFRAMES_PARAMS "extension-gap-encpar-rawframes"
+
+#include "gap_gvetypes.h"
+
+#include "gap_libgapvidutil.h"
+#include "gap_libgimpgap.h"
+
+
+/* rawframe specific encoder params */
+typedef struct {
+ gint32 dum_par; /* unused */
+} GapGveRawValues;
+
+typedef struct GapGveRawGlobalParams { /* nick: gpp */
+ GapGveCommonValues val;
+ GapGveEncAInfo ainfo;
+ GapGveEncList *ecp;
+
+ GapGveRawValues evl;
+
+} GapGveRawGlobalParams;
+
+
+void p_rawframe_init_default_params(GapGveRawValues *epp);
+gint p_rawframe_encode(GapGveRawGlobalParams *gpp);
+gint p_rawframe_encode_dialog(GapGveRawGlobalParams *gpp);
+gchar* p_build_format_from_framename(gchar *framename);
+
+
+/* Includes for extra LIBS */
+
+
+#define GAP_PLUGIN_NAME_rawframeS_ENCODE "plug-in-gap-enc-rawframes"
+
+/* ------------------------
+ * global gap DEBUG switch
+ * ------------------------
+ */
+
+/* int gap_debug = 1; */ /* print debug infos */
+/* int gap_debug = 0; */ /* 0: dont print debug infos */
+
+int gap_debug = 0;
+GapGveRawGlobalParams global_params;
+int global_nargs_raw_enc_par;
+
+static void query(void);
+static void
+run (const gchar *name, /* name of plugin */
+ gint n_params, /* number of in-paramters */
+ const GimpParam * param, /* in-parameters */
+ gint *nreturn_vals, /* number of out-parameters */
+ GimpParam ** return_vals); /* out-parameters */
+static void p_gimp_get_data(const char *key, void *buffer, gint expected_size);
+
+GimpPlugInInfo PLUG_IN_INFO =
+{
+ NULL, /* init_proc */
+ NULL, /* quit_proc */
+ query, /* query_proc */
+ run, /* run_proc */
+};
+
+
+
+
+
+/* ------------------------
+ * MAIN
+ * ------------------------
+ */
+
+MAIN ()
+
+/* --------------------------------
+ * query
+ * --------------------------------
+ */
+static void
+query ()
+{
+ gchar *l_ecp_key;
+
+ /* video encoder standard parameters (same for each encoder) */
+ static GimpParamDef args_raw_enc[] =
+ {
+ {GIMP_PDB_INT32, "run_mode", "non-interactive"},
+ {GIMP_PDB_IMAGE, "image", "Input image"},
+ {GIMP_PDB_DRAWABLE, "drawable", "Input drawable (unused)"},
+ {GIMP_PDB_STRING, "videofile", "framename of the 1.st sinle framefile to write"},
+ {GIMP_PDB_INT32, "range_from", "number of first frame"},
+ {GIMP_PDB_INT32, "range_to", "number of last frame"},
+ {GIMP_PDB_INT32, "vid_width", "Width of resulting Video Frames (all Frames are scaled to this width)"},
+ {GIMP_PDB_INT32, "vid_height", "Height of resulting Video Frames (all Frames are scaled to this height)"},
+ {GIMP_PDB_INT32, "vid_format", "videoformat: 0=comp., 1=PAL, 2=NTSC, 3=SECAM, 4=MAC, 5=unspec"},
+ {GIMP_PDB_FLOAT, "framerate", "framerate in frames per seconds"},
+ {GIMP_PDB_INT32, "samplerate", "audio samplerate in samples per seconds (is ignored .wav files are used)"},
+ {GIMP_PDB_STRING, "audfile", "optional audiodata file .wav must contain uncompressed 16 bit samples. pass empty string if no audiodata should be included"},
+ {GIMP_PDB_INT32, "use_rest", "0 == use default values for encoder specific params, 1 == use encoder specific params"},
+ {GIMP_PDB_STRING, "filtermacro_file", "macro to apply on each handled frame. (textfile with filter plugin names and LASTVALUE bufferdump"},
+ {GIMP_PDB_STRING, "storyboard_file", "textfile with list of one or more framesequences"},
+ {GIMP_PDB_INT32, "input_mode", "0 ... image is one of the frames to encode, range_from/to params refere to numberpart of the other frameimages on disc. \n"
+ "1 ... image is multilayer, range_from/to params refere to layer index. \n"
+ "2 ... image is ignored, input is specified by storyboard_file parameter."},
+ };
+ static int nargs_raw_enc = sizeof(args_raw_enc) / sizeof(args_raw_enc[0]);
+
+ /* video encoder specific parameters */
+ static GimpParamDef args_raw_enc_par[] =
+ {
+ {GIMP_PDB_INT32, "run_mode", "interactive, non-interactive"},
+ {GIMP_PDB_STRING, "key_stdpar", "key to get standard video encoder params via gimp_get_data"},
+ {GIMP_PDB_INT32, "dum_par", "Dummy parameter (unused)"},
+ };
+ static int nargs_raw_enc_par = sizeof(args_raw_enc_par) / sizeof(args_raw_enc_par[0]);
+
+ static GimpParamDef *return_vals = NULL;
+ static int nreturn_vals = 0;
+
+ /* video encoder standard query (same for each encoder) */
+ static GimpParamDef args_in_ecp[] =
+ {
+ {GIMP_PDB_STRING, "param_name", "name of the parameter, supported: menu_name, video_extension"},
+ };
+
+ static GimpParamDef args_out_ecp[] =
+ {
+ {GIMP_PDB_STRING, "param_value", "parmeter value"},
+ };
+
+ static int nargs_in_enp = sizeof(args_in_ecp) / sizeof(args_in_ecp[0]);
+ static int nargs_out_enp = (sizeof(args_out_ecp) / sizeof(args_out_ecp[0]));
+
+ INIT_I18N();
+
+ global_nargs_raw_enc_par = nargs_raw_enc_par;
+
+ gimp_install_procedure(GAP_PLUGIN_NAME_rawframeS_ENCODE,
+ _("rawframes video encoding for anim frames. Menu: @rawframeS@"),
+ _("This plugin has a video encoder API"
+ " but writes a series of single raw frames instead of one videofile."
+ " the filetype of the output frames is derived from the extension."
+ " the extension is the suffix part of the parameter \"videofile\"."
+ " the names of the output frame(s) are same as the parameter \"videofile\""
+ " but the framenumber part is replaced by the current framenumber"
+ " (or added automatic if videofile has no number part)"
+ " audiodata is ignored."
+ "WARNINGS: for proper operation, the handled frames shall refere to single"
+ " video file without any transitions. this allows fetching frames"
+ " as raw data chunks. The chunks are 1:1 written to disc as framefiles."
+ " The resulting raw data frames on disc may be unusable if the raw chunk data"
+ " is not compatible to any image fileformat."
+ " MPEG I frames, and MJPG files may be extractable to the JPEG fileformat."
+ " A call of"
+ "\"" GAP_PLUGIN_NAME_RAWFRAMES_PARAMS "\""
+ " to set encoder specific parameters, is not required because"
+ " this release of the rawframe encoder has NO encoder specific parameters"),
+ "Wolfgang Hofer (hof gimp org)",
+ "Wolfgang Hofer",
+ GAP_VERSION_WITH_DATE,
+ NULL, /* has no Menu entry, just a PDB interface */
+ "RGB*, INDEXED*, GRAY*",
+ GIMP_PLUGIN,
+ nargs_raw_enc, nreturn_vals,
+ args_raw_enc, return_vals);
+
+
+
+ gimp_install_procedure(GAP_PLUGIN_NAME_RAWFRAMES_PARAMS,
+ _("Set Parameters for GAP rawframes video encoder Plugins"),
+ _("This plugin sets rawframes specific video encoding parameters."),
+ "Wolfgang Hofer (hof gimp org)",
+ "Wolfgang Hofer",
+ GAP_VERSION_WITH_DATE,
+ NULL, /* has no Menu entry, just a PDB interface */
+ NULL,
+ GIMP_PLUGIN,
+ nargs_raw_enc_par, nreturn_vals,
+ args_raw_enc_par, return_vals);
+
+
+ l_ecp_key = g_strdup_printf("%s%s", GAP_QUERY_PREFIX_VIDEO_ENCODERS, GAP_PLUGIN_NAME_rawframeS_ENCODE);
+ gimp_install_procedure(l_ecp_key,
+ _("Get GUI Parameters for GAP rawframes video encoder"),
+ _("This plugin returns rawframes encoder specific parameters."),
+ "Wolfgang Hofer (hof gimp org)",
+ "Wolfgang Hofer",
+ GAP_VERSION_WITH_DATE,
+ NULL, /* has no Menu entry, just a PDB interface */
+ NULL,
+ GIMP_PLUGIN,
+ nargs_in_enp , nargs_out_enp,
+ args_in_ecp, args_out_ecp);
+
+
+ g_free(l_ecp_key);
+} /* end query */
+
+
+/* --------------------------------
+ * run
+ * --------------------------------
+ */
+static void
+run (const gchar *name, /* name of plugin */
+ gint n_params, /* number of in-parameters */
+ const GimpParam * param, /* in-parameters */
+ gint *nreturn_vals, /* number of out-parameters */
+ GimpParam ** return_vals) /* out-parameters */
+{
+ GapGveRawValues *epp;
+ GapGveRawGlobalParams *gpp;
+
+ static GimpParam values[1];
+ gint32 l_rc;
+ const char *l_env;
+ char *l_ecp_key1;
+ char *l_encoder_key;
+
+ gpp = &global_params;
+ epp = &gpp->evl;
+ *nreturn_vals = 1;
+ *return_vals = values;
+ l_rc = 0;
+
+ INIT_I18N();
+
+ values[0].type = GIMP_PDB_STATUS;
+ values[0].data.d_status = GIMP_PDB_SUCCESS;
+
+ l_env = g_getenv("GAP_DEBUG_ENC");
+ if(l_env != NULL)
+ {
+ if((*l_env != 'n') && (*l_env != 'N')) gap_debug = 1;
+ }
+
+ if(gap_debug) printf("\n\nSTART of PlugIn: %s\n", name);
+
+ l_ecp_key1 = g_strdup_printf("%s%s", GAP_QUERY_PREFIX_VIDEO_ENCODERS, GAP_PLUGIN_NAME_rawframeS_ENCODE);
+ l_encoder_key = g_strdup(GAP_PLUGIN_NAME_rawframeS_ENCODE);
+
+
+ p_rawframe_init_default_params(epp);
+
+ if (strcmp (name, l_ecp_key1) == 0)
+ {
+ /* this interface replies to the queries of the common encoder gui */
+ gchar *param_name;
+
+ param_name = param[0].data.d_string;
+ if(gap_debug) printf("query for param_name: %s\n", param_name);
+ *nreturn_vals = 2;
+
+ values[1].type = GIMP_PDB_STRING;
+ if(strcmp (param_name, GAP_VENC_PAR_MENU_NAME) == 0)
+ {
+ values[1].data.d_string = g_strdup("rawframeS");
+ }
+ else if (strcmp (param_name, GAP_VENC_PAR_VID_EXTENSION) == 0)
+ {
+ values[1].data.d_string = g_strdup(".jpg");
+ }
+ else if (strcmp (param_name, GAP_VENC_PAR_SHORT_DESCRIPTION) == 0)
+ {
+ values[1].data.d_string =
+ g_strdup(_("The rawframes Encoder\n"
+ "writes single frames instead of one videofile\n"
+ "the fileformat of the frames is derived from the\n"
+ "extension of the video name, frames are named\n"
+ "video name plus 6-digit number + extension"
+ )
+ );
+ }
+ else if (strcmp (param_name, GAP_VENC_PAR_GUI_PROC) == 0)
+ {
+ values[1].data.d_string = g_strdup(GAP_PLUGIN_NAME_RAWFRAMES_PARAMS);
+ }
+ else
+ {
+ values[1].data.d_string = g_strdup("\0");
+ }
+ }
+ else if (strcmp (name, GAP_PLUGIN_NAME_RAWFRAMES_PARAMS) == 0)
+ {
+ /* this interface sets the encoder specific parameters */
+ gint l_set_it;
+ gchar *l_key_stdpar;
+
+ gpp->val.run_mode = param[0].data.d_int32;
+ l_key_stdpar = param[1].data.d_string;
+ gpp->val.vid_width = 320;
+ gpp->val.vid_height = 200;
+ p_gimp_get_data(l_key_stdpar, &gpp->val, sizeof(GapGveCommonValues));
+
+ if(gap_debug) printf("rate: %f w:%d h:%d\n", (float)gpp->val.framerate, (int)gpp->val.vid_width, (int)gpp->val.vid_height);
+
+ l_set_it = TRUE;
+ if (gpp->val.run_mode == GIMP_RUN_NONINTERACTIVE)
+ {
+ /* set video encoder specific params */
+ if (n_params != global_nargs_raw_enc_par)
+ {
+ values[0].data.d_status = GIMP_PDB_CALLING_ERROR;
+ l_set_it = FALSE;
+ }
+ else
+ {
+ epp->dum_par = param[2+1].data.d_int32;
+ }
+ }
+ else
+ {
+ /* try to read encoder specific params */
+ p_gimp_get_data(l_encoder_key, epp, sizeof(GapGveRawValues));
+
+ if(0 != p_rawframe_encode_dialog(gpp))
+ {
+ l_set_it = FALSE;
+ }
+ }
+
+ if(l_set_it)
+ {
+ if(gap_debug) printf("Setting Encoder specific Params\n");
+ gimp_set_data(l_encoder_key, epp, sizeof(GapGveRawValues));
+ }
+
+ }
+ else if (strcmp (name, GAP_PLUGIN_NAME_rawframeS_ENCODE) == 0)
+ {
+ char *l_base;
+ int l_l;
+
+ /* run the video encoder procedure */
+
+ gpp->val.run_mode = param[0].data.d_int32;
+
+ /* get image_ID and animinfo */
+ gpp->val.image_ID = param[1].data.d_image;
+ gap_gve_misc_get_ainfo(gpp->val.image_ID, &gpp->ainfo);
+
+ /* set initial (default) values */
+ l_base = g_strdup(gpp->ainfo.basename);
+ l_l = strlen(l_base);
+
+ if (l_l > 0)
+ {
+ if(l_base[l_l -1] == '_')
+ {
+ l_base[l_l -1] = '\0';
+ }
+ }
+ if(gap_debug) printf("Init Default parameters for %s base: %s\n", name, l_base);
+ g_snprintf(gpp->val.videoname, sizeof(gpp->val.videoname), "%s.mpg", l_base);
+
+ gpp->val.audioname1[0] = '\0';
+ gpp->val.filtermacro_file[0] = '\0';
+ gpp->val.storyboard_file[0] = '\0';
+ gpp->val.framerate = gpp->ainfo.framerate;
+ gpp->val.range_from = gpp->ainfo.curr_frame_nr;
+ gpp->val.range_to = gpp->ainfo.last_frame_nr;
+ gpp->val.samplerate = 0;
+ gpp->val.vid_width = gimp_image_width(gpp->val.image_ID) - (gimp_image_width(gpp->val.image_ID) % 16);
+ gpp->val.vid_height = gimp_image_height(gpp->val.image_ID) - (gimp_image_height(gpp->val.image_ID) % 16);
+ gpp->val.vid_format = VID_FMT_NTSC;
+ gpp->val.input_mode = GAP_RNGTYPE_FRAMES;
+
+ g_free(l_base);
+
+ if (n_params != GAP_VENC_NUM_STANDARD_PARAM)
+ {
+ values[0].data.d_status = GIMP_PDB_CALLING_ERROR;
+ }
+ else
+ {
+ if(gap_debug) printf("Reading Standard parameters for %s\n", name);
+
+ if (param[3].data.d_string[0] != '\0') { g_snprintf(gpp->val.videoname, sizeof(gpp->val.videoname), "%s", param[3].data.d_string); }
+ if (param[4].data.d_int32 >= 0) { gpp->val.range_from = param[4].data.d_int32; }
+ if (param[5].data.d_int32 >= 0) { gpp->val.range_to = param[5].data.d_int32; }
+ if (param[6].data.d_int32 > 0) { gpp->val.vid_width = param[6].data.d_int32; }
+ if (param[7].data.d_int32 > 0) { gpp->val.vid_height = param[7].data.d_int32; }
+ if (param[8].data.d_int32 > 0) { gpp->val.vid_format = param[8].data.d_int32; }
+ gpp->val.framerate = param[9].data.d_float;
+ gpp->val.samplerate = param[10].data.d_int32;
+ g_snprintf(gpp->val.audioname1, sizeof(gpp->val.audioname1), "%s", param[11].data.d_string);
+
+ /* use rawframes specific encoder parameters (0==run with default values) */
+ if (param[12].data.d_int32 == 0)
+ {
+ if(gap_debug) printf("Running the Encoder %s with Default Values\n", name);
+ }
+ else
+ {
+ /* try to read encoder specific params */
+ p_gimp_get_data(name, epp, sizeof(GapGveRawValues));
+ }
+ if (param[13].data.d_string[0] != '\0') { g_snprintf(gpp->val.filtermacro_file, sizeof(gpp->val.filtermacro_file), "%s", param[13].data.d_string); }
+ if (param[14].data.d_string[0] != '\0') { g_snprintf(gpp->val.storyboard_file, sizeof(gpp->val.storyboard_file), "%s", param[14].data.d_string); }
+ if (param[15].data.d_int32 >= 0) { gpp->val.input_mode = param[15].data.d_int32; }
+ }
+
+ if (values[0].data.d_status == GIMP_PDB_SUCCESS)
+ {
+ if (l_rc >= 0 )
+ {
+ l_rc = p_rawframe_encode(gpp);
+ /* delete images in the cache
+ * (the cache may have been filled while parsing
+ * and processing a storyboard file)
+ */
+ gap_gve_story_drop_image_cache();
+ }
+ }
+ }
+ else
+ {
+ values[0].data.d_status = GIMP_PDB_CALLING_ERROR;
+ }
+
+ if(l_rc < 0)
+ {
+ values[0].data.d_status = GIMP_PDB_EXECUTION_ERROR;
+ }
+
+} /* end run */
+
+
+/* --------------------------------
+ * p_gimp_get_data
+ * --------------------------------
+ */
+static void
+p_gimp_get_data(const char *key, void *buffer, gint expected_size)
+{
+ if(gimp_get_data_size(key) == expected_size)
+ {
+ if(gap_debug) printf("p_gimp_get_data: key:%s\n", key);
+ gimp_get_data(key, buffer);
+ }
+ else
+ {
+ if(gap_debug)
+ {
+ printf("ERROR: p_gimp_get_data key:%s failed\n", key);
+ printf("ERROR: gimp_get_data_size:%d expected size:%d\n"
+ , (int)gimp_get_data_size(key)
+ , (int)expected_size);
+ }
+ }
+} /* end p_gimp_get_data */
+
+
+/* --------------------------------
+ * p_rawframe_init_default_params
+ * --------------------------------
+ */
+void
+p_rawframe_init_default_params(GapGveRawValues *epp)
+{
+ if(gap_debug) printf("p_rawframe_init_default_params\n");
+
+ epp->dum_par = 0;
+} /* end p_rawframe_init_default_params */
+
+
+/* --------------------------------
+ * p_rawframe_encode_dialog
+ * --------------------------------
+ */
+gint
+p_rawframe_encode_dialog(GapGveRawGlobalParams *gpp)
+{
+ if(gpp->val.run_mode == GIMP_RUN_INTERACTIVE)
+ {
+ g_message(_("the rawframe Encoder has no encoder specific Parameters"));
+ }
+ return 0;
+} /* end p_rawframe_encode_dialog */
+
+
+/* ----------------------------------------------------
+ * p_build_format_from_framename
+ * ----------------------------------------------------
+ * IN: framename_0001.jpg
+ * OUT: framename_%06d.jpg
+ */
+gchar *
+p_build_format_from_framename(gchar *framename)
+{
+ gchar *l_fmt;
+ gchar *l_fmtnum;
+ gchar *l_framename;
+ gint l_idx;
+ gint l_len;
+ gchar *l_ext_ptr;
+ gchar *l_num_ptr;
+ gint l_idx_numlen;
+
+ l_framename = g_strdup(framename);
+ l_ext_ptr = NULL;
+ l_num_ptr = NULL;
+ l_idx_numlen = 0;
+ l_len = strlen(l_framename);
+
+ /* findout the numberpart and extension */
+ for(l_idx=l_len-1; l_idx >= 0; l_idx--)
+ {
+ if (l_framename[l_idx] == '.')
+ {
+ l_ext_ptr = &l_framename[l_idx];
+ l_idx_numlen = 0;
+ l_num_ptr = NULL;
+ while(l_idx >= 0)
+ {
+ l_idx--;
+ if(g_ascii_isdigit(l_framename[l_idx]))
+ {
+ l_idx_numlen++;
+ }
+ else
+ {
+ l_num_ptr = &l_framename[l_idx];
+ break; /* stop if ran out of number part */
+ }
+ }
+ break;
+ }
+
+ if(g_ascii_isdigit(l_framename[l_idx]))
+ {
+ if(l_num_ptr == NULL)
+ {
+ l_idx_numlen++;
+ }
+ }
+ else
+ {
+ if (g_ascii_isdigit(l_framename[l_idx +1]) && (l_num_ptr == NULL))
+ {
+ l_num_ptr = &l_framename[l_idx];
+ }
+ }
+ }
+
+ if(l_num_ptr)
+ {
+ l_num_ptr++;
+ *l_num_ptr = '\0'; /* set end of string marker */
+ }
+ if(l_ext_ptr)
+ {
+ *l_ext_ptr = '\0'; /* set end of string marker */
+ l_ext_ptr++;
+ }
+
+ /* if(l_idx_numlen > 0) l_fmtnum = g_strdup_printf("%%0%dd", (int)l_idx_numlen);
+ * else l_fmtnum = g_strdup("_%06d");
+ */
+ l_fmtnum = g_strdup("_%06d"); /* always use 6digit framenumbers */
+
+ if(l_ext_ptr)
+ {
+ l_fmt = g_strdup_printf("%s%s.%s", l_framename, l_fmtnum, l_ext_ptr);
+ }
+ else
+ {
+ l_fmt = g_strdup_printf("%s%s", l_framename, l_fmtnum);
+ }
+
+ g_free(l_fmtnum);
+ g_free(l_framename);
+
+ return(l_fmt);
+} /* end p_build_format_from_framename */
+
+
+/* --------------------------------
+ * p_save_chunk_as_frame
+ * --------------------------------
+ */
+gboolean
+p_save_chunk_as_frame(const char *filename, unsigned char *video_chunk, gint32 video_frame_chunk_size, gint32 header_length)
+{
+ FILE *fp;
+ char *dataPtr;
+ gint32 dataSize;
+
+ dataSize = video_frame_chunk_size - header_length;
+ dataPtr = video_chunk + header_length;
+
+ fp = fopen(filename, "w");
+ if (fp)
+ {
+ fwrite(dataPtr, dataSize, 1, fp);
+ fclose(fp);
+ return (TRUE);
+ }
+
+ printf("ERROR cant write to file:%s\n", filename);
+ return (FALSE);
+} /* end p_save_chunk_as_frame */
+
+
+gint32
+p_dimSizeOfRawFrame(GapGveRawGlobalParams *gpp)
+{
+ gint32 sizeOfRawFrame;
+
+ /* size of uncompressed RGBA frame + safety of 1000 bytes should be
+ * more than enough
+ */
+ sizeOfRawFrame = 1000 + (gpp->val.vid_width * gpp->val.vid_height * 4);
+
+ return (sizeOfRawFrame);
+} /* end p_dimSizeOfRawFrame */
+
+
+gboolean
+p_is_videoname_jpeg(const char *videoname)
+{
+ const char *ext;
+ gint len;
+ gint idx;
+
+
+ ext = videoname;
+ if (ext == NULL)
+ {
+ return (FALSE);
+ }
+
+ len = strlen(videoname);
+ for(idx = len-1; idx >= 0; idx--)
+ {
+ ext = &videoname[idx];
+ if (*ext == '.')
+ {
+ ext++;
+ break;
+ }
+ }
+
+ if (strcmp(ext, "jpg") == 0) { return (TRUE); }
+ if (strcmp(ext, "JPG") == 0) { return (TRUE); }
+ if (strcmp(ext, "jpeg") == 0) { return (TRUE); }
+ if (strcmp(ext, "JPEG") == 0) { return (TRUE); }
+
+ return (FALSE);
+}
+
+
+/* --------------------------------
+ * p_rawframe_encode
+ * --------------------------------
+ */
+gint
+p_rawframe_encode(GapGveRawGlobalParams *gpp)
+{
+ static GapGveStoryVidHandle *l_vidhand = NULL;
+ gint32 l_tmp_image_id;
+ gint32 l_layer_id;
+ long l_cur_frame_nr;
+ long l_step, l_begin, l_end;
+ gdouble l_percentage, l_percentage_step;
+ int l_rc;
+ unsigned char *l_video_chunk_ptr;
+ gint32 l_maxSizeOfRawFrame;
+ gint32 l_max_master_frame_nr;
+ gint32 l_cnt_encoded_frames;
+ gint32 l_cnt_reused_frames;
+ gint32 l_check_flags;
+ gchar *l_frame_fmt; /* format string has one %d for the framenumber */
+ gint32 l_out_frame_nr;
+ GimpRunMode l_save_runmode;
+
+ //if(gap_debug)
+ {
+ printf("p_rawframe_encode: START\n");
+ printf(" videoname: %s\n", gpp->val.videoname);
+ printf(" audioname1: %s\n", gpp->val.audioname1);
+ printf(" basename: %s\n", gpp->ainfo.basename);
+ printf(" extension: %s\n", gpp->ainfo.extension);
+ printf(" range_from: %d\n", (int)gpp->val.range_from);
+ printf(" range_to: %d\n", (int)gpp->val.range_to);
+ printf(" framerate: %f\n", (float)gpp->val.framerate);
+ printf(" samplerate: %d\n", (int)gpp->val.samplerate);
+ printf(" vid_width: %d\n", (int)gpp->val.vid_width);
+ printf(" vid_height: %d\n", (int)gpp->val.vid_height);
+ printf(" image_ID: %d\n", (int)gpp->val.image_ID);
+ printf(" storyboard_file: %s\n", gpp->val.storyboard_file);
+ printf(" input_mode: %d\n", gpp->val.input_mode);
+ }
+
+ l_maxSizeOfRawFrame = p_dimSizeOfRawFrame(gpp);
+ l_video_chunk_ptr = g_malloc0(l_maxSizeOfRawFrame);
+
+
+ l_out_frame_nr = 0;
+ l_rc = 0;
+ l_layer_id = -1;
+ l_cnt_encoded_frames = 0;
+ l_cnt_reused_frames = 0;
+ l_tmp_image_id = -1;
+ l_check_flags = GAP_VID_CHCHK_FLAG_SIZE;
+
+ if(p_is_videoname_jpeg(gpp->val.videoname) == TRUE)
+ {
+ l_check_flags += GAP_VID_CHCHK_FLAG_JPG;
+ //if(gap_debug)
+ {
+ printf("check fetched chunks for JPEG frames activated\n");
+ }
+ }
+
+ l_frame_fmt = p_build_format_from_framename(gpp->val.videoname);
+
+ if(gap_debug) printf("rawframes will be saved with filename: %s\n", l_frame_fmt);
+
+
+ /* make list of frameranges */
+ { gint32 l_total_framecount;
+ l_vidhand = gap_gve_story_open_vid_handle (gpp->val.input_mode
+ ,gpp->val.image_ID
+ ,gpp->val.storyboard_file
+ ,gpp->ainfo.basename
+ ,gpp->ainfo.extension
+ ,gpp->val.range_from
+ ,gpp->val.range_to
+ ,&l_total_framecount
+ );
+ }
+
+ /* TODO check for overwrite */
+
+ /* here we could open the video file for write */
+ /* gpp->val.videoname */
+
+ l_percentage = 0.0;
+ if(gpp->val.run_mode == GIMP_RUN_INTERACTIVE)
+ {
+ gimp_progress_init(_("Rawframes Video Eextract .."));
+ }
+
+
+
+ if(gpp->val.range_from > gpp->val.range_to)
+ {
+ l_step = -1; /* operate in descending (reverse) order */
+ l_percentage_step = 1.0 / ((1.0 + gpp->val.range_from) - gpp->val.range_to);
+ }
+ else
+ {
+ l_step = 1; /* operate in ascending order */
+ l_percentage_step = 1.0 / ((1.0 + gpp->val.range_to) - gpp->val.range_from);
+ }
+ l_begin = gpp->val.range_from;
+ l_end = gpp->val.range_to;
+ l_max_master_frame_nr = abs(l_end - l_begin) + 1;
+
+ l_cur_frame_nr = l_begin;
+ l_save_runmode = GIMP_RUN_INTERACTIVE;
+ while(l_rc >= 0)
+ {
+ gboolean l_fetch_ok;
+ gboolean l_force_keyframe;
+ gint32 l_video_frame_chunk_size;
+ gint32 l_video_frame_chunk_hdr_size;
+
+ l_out_frame_nr++;
+
+
+ l_fetch_ok = gap_story_render_fetch_composite_image_or_chunk(l_vidhand
+ , l_cur_frame_nr
+ , (gint32) gpp->val.vid_width
+ , (gint32) gpp->val.vid_height
+ , gpp->val.filtermacro_file
+ , &l_layer_id /* output */
+ , &l_tmp_image_id /* output */
+ , TRUE /* dont_recode_flag */
+ , NULL /* GapCodecNameElem *vcodec_list NULL == no checks */
+ , &l_force_keyframe
+ , l_video_chunk_ptr
+ , &l_video_frame_chunk_size /* actual chunk size (incl. header) */
+ , l_maxSizeOfRawFrame /* IN max size */
+ , gpp->val.framerate
+ , l_max_master_frame_nr
+ , &l_video_frame_chunk_hdr_size
+ , l_check_flags
+ );
+ if(l_fetch_ok != TRUE)
+ {
+ printf("ERROR: fetching of frame: %d FAILED, terminating\n", (int)l_cur_frame_nr);
+ return -1;
+ }
+ else
+ {
+ gchar *l_sav_name;
+
+ l_sav_name = g_strdup_printf(l_frame_fmt, (int)l_out_frame_nr);
+
+ if (l_video_frame_chunk_size > 0)
+ {
+ gboolean l_saveOk;
+
+ l_cnt_reused_frames++;
+ //if (gap_debug)
+ {
+ printf("DEBUG: 1:1 copy of frame %d (fetch as chunk OK) chunk_ptr:%d chunk_size:%d chunk_hdr_size:%d\n"
+ , (int)l_cur_frame_nr
+ , (int)l_video_chunk_ptr
+ , (int)l_video_frame_chunk_size
+ , (int)l_video_frame_chunk_hdr_size
+ );
+ }
+
+ /* dont recode, just write video chunk to output frame file */
+ l_saveOk = p_save_chunk_as_frame(l_sav_name, l_video_chunk_ptr, l_video_frame_chunk_size, l_video_frame_chunk_hdr_size);
+ if (!l_saveOk)
+ {
+ return -1;
+ }
+ }
+ else
+ {
+ l_cnt_encoded_frames++;
+ //if (gap_debug)
+ {
+ printf("DEBUG: saving recoded frame %d (fetch as chunk FAILED)\n", (int)l_cur_frame_nr);
+ }
+
+ if(gpp->val.run_mode == GIMP_RUN_INTERACTIVE)
+ {
+ char *l_msg;
+
+ l_msg = g_strdup_printf(_("SAVING: %s\n"), l_sav_name);
+ gimp_progress_init(l_msg);
+ g_free(l_msg);
+ }
+
+ {
+ gint32 l_sav_rc;
+
+ if(gap_debug)
+ {
+ printf("rawframe mode:%d, image_id:%d save: %s l_sav_name\n"
+ ,(int)l_save_runmode
+ ,(int)l_tmp_image_id
+ ,l_sav_name
+ );
+ }
+ l_sav_rc = gap_lib_save_named_image(l_tmp_image_id, l_sav_name, l_save_runmode);
+ if(l_sav_rc < 0)
+ {
+ g_message(_("** Save FAILED on file\n%s"), l_sav_name);
+ l_rc = -1;
+ }
+ }
+ l_save_runmode = GIMP_RUN_WITH_LAST_VALS;
+ if(l_tmp_image_id < 0)
+ {
+ return -1;
+ }
+
+ }
+ g_free(l_sav_name);
+ }
+
+
+ if(l_tmp_image_id >= 0)
+ {
+ /* destroy the tmp image */
+ gap_image_delete_immediate(l_tmp_image_id);
+ }
+
+ l_percentage += l_percentage_step;
+ if(gap_debug) printf("PROGRESS: %f\n", (float) l_percentage);
+ if(gpp->val.run_mode == GIMP_RUN_INTERACTIVE)
+ {
+ gimp_progress_update (l_percentage);
+ }
+
+ /* advance to next frame */
+ if((l_cur_frame_nr == l_end) || (l_rc < 0))
+ {
+ break;
+ }
+ l_cur_frame_nr += l_step;
+
+ }
+
+ g_free(l_frame_fmt);
+
+ if(l_vidhand)
+ {
+ gap_gve_story_close_vid_handle(l_vidhand);
+ }
+
+ /* statistics */
+ printf("encoded frames: %d\n", (int)l_cnt_encoded_frames);
+ printf("1:1 copied frames: %d\n", (int)l_cnt_reused_frames);
+ printf("total handled frames: %d\n", (int)l_cnt_encoded_frames + l_cnt_reused_frames);
+
+ return l_rc;
+} /* end p_rawframe_encode */
+
[
Date Prev][
Date Next] [
Thread Prev][
Thread Next]
[
Thread Index]
[
Date Index]
[
Author Index]