[gimp-gap] switched to ffmpeg-0.6.1 libraries, videoencoder performance relevant changes.



commit a01e82394b557ec98c70b685d9532bd78c1dc4db
Author: Wolfgang Hofer <wolfgangh svn gnome org>
Date:   Tue Dec 14 20:31:12 2010 +0100

    switched to ffmpeg-0.6.1 libraries, videoencoder performance relevant changes.

 ChangeLog                                  |  179 +++
 NEWS                                       |   10 +-
 configure.in                               |   40 +-
 docs/reference/txt/STORYBOARD_FILE_DOC.txt |   18 +-
 docs/reference/txt/gap_gimprc_params.txt   |   52 +
 extern_libs/README_extern_libs             |    8 +-
 extern_libs/configure_options_ffmpeg.txt   |    4 +-
 extern_libs/ffmpeg.tar.gz                  |  Bin 4462285 -> 4471231 bytes
 gap/Makefile.am                            |    2 +-
 gap/gap_frame_fetcher.c                    |  284 ++++-
 gap/gap_frame_fetcher.h                    |   14 +
 gap/gap_player_dialog.c                    |   87 +-
 gap/gap_story_dialog.c                     |  309 ++++
 gap/gap_story_file.c                       |  124 ++-
 gap/gap_story_file.h                       |   10 +
 gap/gap_story_main.c                       |    8 +
 gap/gap_story_main.h                       |    7 +
 gap/gap_story_render_audio.c               |    3 +-
 gap/gap_story_render_lossless.c            |  320 +++-
 gap/gap_story_render_processor.c           | 2297 +++++++++++++++++++++++++---
 gap/gap_story_render_processor.h           |  166 ++-
 gap/gap_story_render_types.h               |    9 +-
 libgapbase/Makefile.am                     |    2 +
 libgapbase/gap_base.c                      |  200 +++
 libgapbase/gap_base.h                      |   83 +
 libgapbase/gap_libgapbase.h                |    1 +
 libgapbase/gap_timm.c                      |  569 +++++++
 libgapbase/gap_timm.h                      |  189 +++
 libgapvidapi/Makefile.am                   |    1 +
 libgapvidapi/gap_vid_api.c                 | 1010 ++++++++++++-
 libgapvidapi/gap_vid_api.h                 |   75 +-
 libgapvidapi/gap_vid_api_ffmpeg.c          |   44 +-
 libgapvidapi/gap_vid_api_mp_util.c         |  560 +++++++
 libgapvidutil/gap_gve_raw.c                |  736 ++++++++--
 libgapvidutil/gap_gve_raw.h                |   66 +
 vid_common/gap_cme_gui.c                   |    3 +-
 vid_enc_avi/gap_enc_avi_gui.c              |   38 +-
 vid_enc_avi/gap_enc_avi_main.c             |   27 +-
 vid_enc_avi/gap_enc_avi_main.h             |    2 +
 vid_enc_ffmpeg/gap_enc_ffmpeg_callbacks.c  |    8 +-
 vid_enc_ffmpeg/gap_enc_ffmpeg_gui.c        |   40 +-
 vid_enc_ffmpeg/gap_enc_ffmpeg_main.c       | 1714 ++++++++++++++++++---
 vid_enc_ffmpeg/gap_enc_ffmpeg_main.h       |    9 +-
 vid_enc_ffmpeg/gap_enc_ffmpeg_par.c        |  167 ++-
 vid_enc_ffmpeg/gap_enc_ffmpeg_par.h        |    5 +
 45 files changed, 8772 insertions(+), 728 deletions(-)
---
diff --git a/ChangeLog b/ChangeLog
index 5979f02..57da4c7 100755
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,182 @@
+2010-12-14 Wolfgang Hofer <hof gimp org>
+
+- GIMP-GAP has swichted its main video encoder/decoder engine
+  from ffmpeg-0.6 to ffmpeg-0.6.1
+  
+  - test videos videoinex creation smartscan gives same analyse files as ffmpeg-0.6
+  - seek tests repeated, same results as ffmpeg-0.6
+  - read various videoformat samples same results as ffmpeg-0.6
+
+- Analyze of the videoencoding performance 
+    GVA api and ffmpeg based encoder were checked and
+    some performance relevant changes were implemented.
+    
+    Note: the multithread support did NOT fulfill my expectations.
+    the performance advantage was minimal compared with singlethreaded implementation
+    on my 4 Processor development machine.
+    Other changes (like enable mmx ffmpeg options, replacement of row-by-row
+    processing) were more efficient for performance.
+
+- GVA video api 
+   - GVA_fetch_frame_to_buffer use fcache filling reads in case of backwards seek.
+   - added fcache_mutex to prepare fcache access code for multithred environment.
+   - provide new procedure GVA_search_fcache_and_get_frame_as_gimp_layer_or_rgb888 
+     intended for use in multithreaded environment (e.g. the prefetch tread
+     in the storyboard processor)
+
+- replaced the slow row by row implementation
+  that converts from drawable to RGB buffer at encoding
+  (see: libgapvidutil/gap_gve_raw.c gap_gve_drawable_to_RgbBuffer)
+  
+- added multithreading support for the ffmpeg based video encoder plug-in
+  using a ringbuffer queue implementation where the main thread
+  feeds the queue via storyboard processor. The parallel running encoder
+  thread reads from the queue and uses libavformat/libavcodec to write
+  the resulting videofile.
+
+  On singleprocessor configuration encoding is done without the queue
+  (the same way as prior implementation did)
+
+- added a new menu in the storyboard dialog that allows edit gimprc parameters
+  that are relevant for the storyboard render processing.
+
+- The storyboardprocessor provides a new API procedure
+    gap_story_render_fetch_composite_image_or_buffer_or_chunk
+  that allows video encoders to bypass the render engine 
+  in scenes where simple 1:1 copy of fetched frames in movie clips is possible.
+  (in such cases an rgb888 buffer is delivered to the encoder instead of 
+  a gimp layer)
+  The ffmpeg based video encoder already uses this new API
+  and runs faster when encoding those storyboards scenes.
+
+- added gimprc parameters:
+  
+  "video-storyboard-fcache-size-per-videofile"
+  "video-storyboard-resource-log-interval"
+    # the integer parameter video-storyboard-resource-log-interval
+    # enables logging of cached resources during storyboard processing
+    # value n (positive integer) triggers resource logging every n-th frame.
+    #         where logging includes information about cached images 
+    #         and currently open video handles.
+    #         This is intended for debug purpose.
+    # value 0 (is the default) turns off this type of logging.
+
+  "video-storyboard-multiprocessor-enable"
+    # the boolean parameter video-storyboard-multiprocessor-enable
+    # enables multiprocessor support for the storyboard processor
+    # The current implementation uses a parallel running prefetch thread
+    # that reads frames from referenced videofiles into a frame cache
+    # in advance.
+  
+  "video-enoder-ffmpeg-multiprocessor-enable"
+    # the boolean parameter video-enoder-ffmpeg-multiprocessor-enable
+    # enables multiprocessor support for the ffmpeg based video encoder
+    # The current implementation uses a parallel running encoder thread
+    # that uses a ringbuffer queue that is feed by the storyboard processor.
+  
+
+- Analyse for out of memory errors while encoding large HD videos via storyboard processor.
+  ==> the default values for: 
+    video-storyboard-max-open-videofiles    12
+    GAP_STB_RENDER_GVA_FRAMES_TO_KEEP_CACHED 36
+
+  are too large in case HD video is processed
+  (this fills up my 4 GB ram on storyboard encoding
+  with the fcache of the video api.
+
+     one HD frame takes 6271236 bytes  // (1920 x 1088 x 3) + 4  + (1088 * 4)
+     for all videohandles this is a summary of 2709173952 bytes    //  12  * 36 * 6271236
+     
+     the rest of 4GB was mainly filled with gimp images 
+     (the image cache default "gap_ffetch_max_img_cache_elements" is 18 images)
+     
+  as result of this analyse the GAP_STB_RENDER_GVA_FRAMES_TO_KEEP_CACHED value
+  is now configurable via gimprc parameter "video-storyboard-fcache-size-per-videofile"
+  furthermore the configured value is only used in case videoclips are played backwards
+  For standard ascening frame access a small fcache size (6) is picked that 
+  shall be big enough for prefetch of videoframes in mutithreaded environment.
+  Those changes result in remarkable less usage of memory resources without
+  loss of performance.
+
+- added optional performance logging in single or multithread environment 
+  for storyboard processing, ffmpeg videoapi parts and ffmpeg based video encoder.
+
+  The new configure option
+     --enable-runtime_recording
+  compiles gimp-gap with support for runtime recording/logging on some
+  instrumented procedures, intended for performance analysis and tuning purpose.
+  (see gap_timm.c calls in the source code)
+  
+  
+  Logging report is triggered by
+  o) STOP button clicked in the player
+  o) after ffmpeg based encoder finished
+  
+  
+- optional resource usage logging while storyboard processing
+
+- use rounding in calculation of storboard scale and movement koords
+  (use gdouble variables and rint function in procedure gap_story_file_calculate_render_attributes)
+
+- AVI1 videoencoder added option to select RGB or BGR for raw video
+  (intended mainly for test purpose, most players want BGR for RAW video
+
+  * NEWS  
+  * configure.in  
+
+  * docs/reference/txt/gap_gimprc_params.txt  
+  * docs/reference/txt/STORYBOARD_FILE_DOC.txt  
+
+  * extern_libs/configure_options_ffmpeg.txt  
+  * extern_libs/ffmpeg.tar.gz                # updated to ffmpeg release 0.6.1
+  * extern_libs/README_extern_libs  
+
+  * gap/gap_frame_fetcher.c  
+  * gap/gap_frame_fetcher.h  
+  * gap/gap_player_dialog.c  
+  * gap/gap_story_dialog.c  
+  * gap/gap_story_file.c  
+  * gap/gap_story_file.h  
+  * gap/gap_story_main.c  
+  * gap/gap_story_main.h  
+  * gap/gap_story_render_audio.c  
+  * gap/gap_story_render_lossless.c  
+  * gap/gap_story_render_processor.c  
+  * gap/gap_story_render_processor.h  
+  * gap/gap_story_render_types.h  
+  * gap/Makefile.am  
+
+  * libgapbase/gap_base.c  
+  * libgapbase/gap_base.h  
+  * libgapbase/gap_libgapbase.h  
+  * libgapbase/Makefile.am  
+
+  * libgapvidapi/gap_vid_api.c  
+  * libgapvidapi/gap_vid_api_ffmpeg.c  
+  * libgapvidapi/gap_vid_api.h 
+  * libgapvidapi/Makefile.am  
+
+  * libgapvidutil/gap_gve_raw.c  
+  * libgapvidutil/gap_gve_raw.h  
+
+  * vid_common/gap_cme_gui.c 
+ 
+  * vid_enc_avi/gap_enc_avi_gui.c  
+  * vid_enc_avi/gap_enc_avi_main.c  
+  * vid_enc_avi/gap_enc_avi_main.h 
+ 
+  * vid_enc_ffmpeg/gap_enc_ffmpeg_callbacks.c  
+  * vid_enc_ffmpeg/gap_enc_ffmpeg_gui.c  
+  * vid_enc_ffmpeg/gap_enc_ffmpeg_main.c  
+  * vid_enc_ffmpeg/gap_enc_ffmpeg_main.h  
+  * vid_enc_ffmpeg/gap_enc_ffmpeg_par.c  
+  * vid_enc_ffmpeg/gap_enc_ffmpeg_par.h  
+
+
+  *  libgapbase/gap_timm.c                # NEW FILE
+  *  libgapbase/gap_timm.h                # NEW FILE
+  *  libgapvidapi/gap_vid_api_mp_util.c   # NEW FILE
+
 2010-10-10 Wolfgang Hofer <hof gimp org>
   
 - GIMP-GAP has swichted its main video encoder/decoder engine
diff --git a/NEWS b/NEWS
index aabca56..89e868c 100644
--- a/NEWS
+++ b/NEWS
@@ -29,6 +29,9 @@ Here is a short overview whats new in GIMP-GAP-2.7.0:
   (anchor modes are: ClipColormask, Clip, Master)
 
 - The Storyboard now supports rotatation of the prcessed clips by any angle.
+  bugfixes in the storyboard processing now use less memory resources
+  and produces smoother movent.
+  (the old code did run out of memory when processing multiple HD videoclips)
 
 - A new storyboard processing feature allows adding external transparency 
   for clip type movie. This is done via format string that refers to
@@ -51,12 +54,15 @@ Here is a short overview whats new in GIMP-GAP-2.7.0:
    individual workpointfiles per processed frame are located via
    name convention (extension .morphpoints) and used for morphing when they exist.
    (such files can be created with the help of the plug-in "Morph Workpoint Generator").
-
+/home/hof/gimp_devel/gap/gimp-gap-ffmpeg/master-patched/NEWS
 - support to run gimp_color_balance tool as animated filter 
   (added wrapper plug-in).
 
+- significant better performance for ffmpeg based video encoder
+  on single and multiprocessor machines.
+
 - updated gimp-gap video API and ffmpeg-based video encoder
-  to support the libraries provided with the ffmpeg-0.6 release
+  to support the libraries provided with the ffmpeg-0.6.1 release
   This includes various bugfixes related to video de/encode 
   (but breaks backwards compatibility when seeking positions by frame number
   in videofiles that do not start with a keyframe).
diff --git a/configure.in b/configure.in
index 448792e..49113c2 100755
--- a/configure.in
+++ b/configure.in
@@ -62,6 +62,9 @@ AC_SUBST(GIMP_PLUGIN_DIR)
 
 PKG_CHECK_MODULES(GLIB, glib-2.0 >= 2.8.0)
 
+
+
+
 NEW_LINE="
 "
 
@@ -159,6 +162,10 @@ AC_CHECK_LIB(pthread, pthread_create,
           pthread_err="pthread header file (pthread.h) not found")],
 	pthread_err="pthread library (libpthread) not found")
 
+  if test "x$pthread_err" = "x"; then
+    AC_DEFINE(GAP_HAVE_PTHREAD, 1,
+              [Define to 1 if you have the <pthread.h> header file.])
+  fi
 
 dnl check for bzip2 library (for ffmpeg matroskadec )
 dnl the check result does not matter unless libavformat is linked or built later on.
@@ -1002,18 +1009,19 @@ AC_SUBST(FFMPEG_LIBSWSCALE_A)
 AC_SUBST(LMPEG3_DIR)
 AC_SUBST(LMPEG3_A)
 
-GAPVIDEOAPI_EXTLIBS="\$(GAP_VLIBS_FFMPEG) \$(GAP_VLIBS_MPEG3) -lpng -lz \$(GAP_PTHREAD_LIB) -lm"
-GAPVIDEOAPI_EXTINCS="\$(GAP_VINCS_FFMPEG) \$(GAP_VINCS_MPEG3)"
-AC_SUBST(GAPVIDEOAPI_EXTLIBS)
-AC_SUBST(GAPVIDEOAPI_EXTINCS)
-
 dnl 
 dnl TODO: check for required gthread version, 
 dnl and disable GAP_USE_GTHREAD if not available
 dnl
-GTHREAD_LIBS="-lgthread-2.0"
+GTHREAD_LIBS=`$PKG_CONFIG --libs gthread-2.0`
 AC_SUBST(GTHREAD_LIBS)
 
+GAPVIDEOAPI_EXTLIBS="\$(GAP_VLIBS_FFMPEG) \$(GAP_VLIBS_MPEG3) -lpng -lz \$(GTHREAD_LIBS) \$(GAP_PTHREAD_LIB) -lm"
+GAPVIDEOAPI_EXTINCS="\$(GAP_VINCS_FFMPEG) \$(GAP_VINCS_MPEG3)"
+AC_SUBST(GAPVIDEOAPI_EXTLIBS)
+AC_SUBST(GAPVIDEOAPI_EXTINCS)
+
+
 
 
 dnl Test for audiosupport
@@ -1056,6 +1064,26 @@ AC_ARG_ENABLE(gdkpixbuf_pview,
 AM_CONDITIONAL(BUILD_FFMPEG_LIBS, test "x$build_ffmpeg_libs_yesno" != "xno")
 AM_CONDITIONAL(BUILD_LIBMPEG3_LIB, test "x$build_libmpeg3_lib_yesno" != "xno")
 
+dnl
+dnl runtime recording is primary a developers feature
+dnl for analyse and performance tuning purpose.
+dnl
+AC_ARG_ENABLE(runtime_recording,
+              [  --enable-runtime-recording-all      enable runtime recording and logging of all procedures using GAP_TIMM macros (default is no for this debug feature)])
+  if test "x$enable_runtime_recording_all" = "xyes"; then
+    AC_DEFINE(GAP_RUNTIME_RECORDING_LOCK, 1,
+              [use gap runtime recording and logging features (enables locking implementations GAP_TIMM_x_FUNCTION macros)])
+    AC_DEFINE(GAP_RUNTIME_RECORDING_NOLOCK, 1,
+              [use gap runtime recording and logging features (enables non-locking implementations GAP_TIMM_x_RECORD macros)])
+  fi
+
+AC_ARG_ENABLE(runtime_recording,
+              [  --enable-runtime-recording-nolock  enable runtime recording (only non-locking implementations, default is no for this debug feature)])
+  if test "x$enable_runtime_recording_nolock" = "xyes"; then
+    AC_DEFINE(GAP_RUNTIME_RECORDING_NOLOCK, 1,
+              [use gap runtime recording and logging features (enables non-locking implementations GAP_TIMM_x_RECORD macros)])
+  fi
+
 
 GAPLIBDIR=${libdir}/$PACKAGE-$GAP_MAJOR_VERSION.$GAP_MINOR_VERSION
 
diff --git a/docs/reference/txt/STORYBOARD_FILE_DOC.txt b/docs/reference/txt/STORYBOARD_FILE_DOC.txt
index 6d0660a..9189749 100644
--- a/docs/reference/txt/STORYBOARD_FILE_DOC.txt
+++ b/docs/reference/txt/STORYBOARD_FILE_DOC.txt
@@ -1,4 +1,4 @@
-STORYBOARD_FILES           2010.03.20:
+STORYBOARD_FILES           2010.11.02:
 
 
 General
@@ -702,9 +702,23 @@ VID_FIT_SIZE
   - The Input Frame is placed in the center of the 
     composite output frame.
     (unless VID_MOVE_X/Y define other offsets than 0.0)
-  - Zooming may cause additional Scaling. The VID_FIT_SIZE
+  - Zooming may cause additional Scaling. The VID_FIT_SIZE record
     describes the normal Size (where zoom is set to 1.0)
 
+  - WARNING: settings that disable scaling 
+    (e.g. use fixed width or height at original source image size)
+    do NOT follow changes of the resulting VID_MASTER_SIZE.
+    In case you want to render videos in different sizes from the same storyboard
+    by simply changing the VID_MASTER_SIZE sizes, you should NOT
+    use such settings for your clips.
+    
+      VID_FIT_SIZE mode=none                                     # for fixed width and height
+      VID_FIT_SIZE mode=width proportions=change_proportions     # fixed width
+      VID_FIT_SIZE mode=height proportions=change_proportions    # fixed height
+      
+    Furthermore the fixed size also forces internal rendering for preview purpose
+    to operate at full VID_MASTER_SIZE which may be very slow (especially for HD video)
+
 
   The described behavior is valid for all fetched Frames in the same track
   until the next VID_FIT_SIZE record (or until end of file if there is none)
diff --git a/docs/reference/txt/gap_gimprc_params.txt b/docs/reference/txt/gap_gimprc_params.txt
index 5435fcb..5fb6b2a 100644
--- a/docs/reference/txt/gap_gimprc_params.txt
+++ b/docs/reference/txt/gap_gimprc_params.txt
@@ -174,6 +174,11 @@ If you edit gimprc files by hand, you must do this before you startup GIMP.
 # run out of memory and other resources. Therefore the number
 # of simultaneous open videofiles is limited to a default
 # of 12.
+# The integer parameter video-storyboard-fcache-size-per-videofile
+# sets the number of frames to be cached per open video handle
+# for backwards playing video clips. (it is not relevant for
+# the standard case where video clips are read in ascending frame sequence)
+#
 # If you have less than 1GB of memory, you may use a smaller
 # value (minimum is 2) when you are running out of memory
 # while storyboard processing.
@@ -181,6 +186,53 @@ If you edit gimprc files by hand, you must do this before you startup GIMP.
 # videoclips but this will be slower because there will be more
 # open, seek and close operations required with a smaller limit.
 (video-storyboard-max-open-videofiles 12)
+(video-storyboard-fcache-size-per-videofile 36)
+
+# The boolean parameter video-storyboard-preview-render-full-size
+# sets the size for storyboard rendering for preview purpose 
+# (in the player widget that is embedded in the storyboard dialog)
+# the value "no" picks the minimum of current preview size and storyboard size
+# for rendering.
+# Note that preview renderig is forced to full size (regardless
+# to the setting of this parameter) in case the storyboard
+# includes at least one clip with fixed width or height.
+# (e.g. the storyboard records of the following types:
+#  VID_FIT_SIZE mode=none
+#  VID_FIT_SIZE mode=width proportions=change_proportions
+#  VID_FIT_SIZE mode=height proportions=change_proportions
+# )  
+# The value "yes" uses full original storyboard size for rendering of the preview.
+# This may be desired in case the user wants to extract a composite frame
+# from the playback widget in the storyboard dialog at full size.
+(video-storyboard-preview-render-full-size "no")
+
+# the integer parameter video-storyboard-resource-log-interval
+# enables logging of cached resources during storyboard processing.
+# value n (positive integer) triggers resource logging every n-th frame.
+#         where logging includes information about cached images 
+#         and currently open video handles.
+#         This is intended for debug purpose.
+# value 0 (is the default) turns off this type of logging.
+(video-storyboard-resource-log-interval 0)
+
+
+# the boolean parameter video-storyboard-multiprocessor-enable
+# enables multiprocessor support for the storyboard processor
+# The current implementation uses a parallel running prefetch thread
+# that reads frames from referenced videofiles into a frame cache
+# in advance.
+# in case num-processors is configured with value 1 the default is "no" (otherwise "yes")
+(video-storyboard-multiprocessor-enable "no")
+  
+# the boolean parameter video-enoder-ffmpeg-multiprocessor-enable
+# enables multiprocessor support for the ffmpeg based video encoder
+# The current implementation uses a parallel running encoder thread
+# that uses a ringbuffer queue that is feed by the storyboard processor.
+# in case num-processors is configured with value 1 the default is "no" (otherwise "yes")
+(video-enoder-ffmpeg-multiprocessor-enable "no")
+
+
+
 
 
 # the video-libavformat-seek-gopsize parameter
diff --git a/extern_libs/README_extern_libs b/extern_libs/README_extern_libs
index 84dc99d..a590660 100755
--- a/extern_libs/README_extern_libs
+++ b/extern_libs/README_extern_libs
@@ -3,7 +3,7 @@ as sourcecode for convenient static linking.
 
 CURRENT FFMPEG version is:
 
-- ffmpeg 0.6
+- ffmpeg 0.6.1
 
 CURRENT LIBMPEG3 version is:
        
@@ -48,7 +48,11 @@ this GIMP-GAP distribution.
   working. 
   
   GIMP-GAP currently supports 
-  o) ffmpeg-0.6 basically works, tests are in progress
+  o) ffmpeg-0.6.1 tests repeated, same results as ffmpeg-0.6
+  o) ffmpeg-0.6 basically works, tests done 
+     incopatibility detected at frame exact positioning compared against ffmpeg-0.5 
+     on dvd mpeg2 encoded videos that do not start with a keyframe.
+     (No idea how to fix, there are no plans to investigate more effort on this...)
 
   o) ffmpeg-0.5 successfully tested with many videoformats
      but does no longer work with recent version of libx264 (X264_BUILD 93)
diff --git a/extern_libs/configure_options_ffmpeg.txt b/extern_libs/configure_options_ffmpeg.txt
index 4830ed1..9f3d889 100644
--- a/extern_libs/configure_options_ffmpeg.txt
+++ b/extern_libs/configure_options_ffmpeg.txt
@@ -1,4 +1,6 @@
---disable-shared --enable-static --disable-mmx --enable-gpl 
+--disable-shared --enable-static --enable-gpl --enable-mmx --enable-mmx2 
+# --enable-mmx --enable-mmx2 
+# 
 # recent ffmpeg releases does no longer support --enable-liba52
 # for audio /mp3 encoding ffmpeg recommands to link with the external libraries.
 #
diff --git a/extern_libs/ffmpeg.tar.gz b/extern_libs/ffmpeg.tar.gz
old mode 100755
new mode 100644
index b04410f..a7d7914
Binary files a/extern_libs/ffmpeg.tar.gz and b/extern_libs/ffmpeg.tar.gz differ
diff --git a/gap/Makefile.am b/gap/Makefile.am
index b4b4387..fd13cbc 100644
--- a/gap/Makefile.am
+++ b/gap/Makefile.am
@@ -23,7 +23,7 @@ GAP_VIDEO_EXTRACT = gap_video_extract
 GAP_VIDEO_INDEX = gap_video_index
 endif
 
-LIBGAPBASE  = $(top_builddir)/libgapbase/libgapbase.a
+LIBGAPBASE  = $(top_builddir)/libgapbase/libgapbase.a $(GTHREAD_LIBS)
 INC_LIBGAPBASE = -I$(top_srcdir)/libgapbase
 
 LIBGIMPGAP = libgimpgap.a
diff --git a/gap/gap_frame_fetcher.c b/gap/gap_frame_fetcher.c
old mode 100644
new mode 100755
index d43bc09..e88acf2
--- a/gap/gap_frame_fetcher.c
+++ b/gap/gap_frame_fetcher.c
@@ -67,6 +67,15 @@
 //#include <dirent.h>
 
 
+// TODO add configure option to check for HAVE_SYSINFO
+#define HAVE_SYSINFO 0
+
+#ifdef HAVE_SYSINFO
+#include <sys/sysinfo.h>
+#endif
+
+
+
 #include <glib/gstdio.h>
 
 
@@ -96,7 +105,7 @@
 
 /* the lists of cached images and duplicates are implemented via GIMP image parasites,
  * where images are simply loaded by GIMP without adding a display and marked with a non persistent parasite.
- * the GAP_IMAGE_CACHE_PARASITE holds the modification timestamp (mtime) and full filename (inclusive terminating 0)
+ * the GAP_IMAGE_CACHE_PARASITE holds the modification timestamp (mtime), gint32 ffetch_user_id and full filename (inclusive terminating 0)
  * the GAP_IMAGE_DUP_CACHE_PARASITE holds the gint32 ffetch_user_id
  */
 
@@ -147,7 +156,7 @@ static GapFFetchResourceUserElem *global_rsource_users = NULL;
  *         FRAME FETCHER procedures                          *
  *************************************************************
  */
-static gint32         p_load_cache_image(const char* filename, gboolean addToCache);
+static gint32         p_load_cache_image(const char* filename, gint32 ffetch_user_id, gboolean addToCache);
 static void           p_drop_image_cache(void);
 #ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
 static void           p_drop_gvahand_cache_elem1(GapFFetchGvahandCache *gvcache);
@@ -269,9 +278,12 @@ p_get_ffetch_gva_frames_to_keep_cached()
 /* ----------------------------------------------------
  * p_load_cache_image
  * ----------------------------------------------------
+ * load an image from cache or from file (in case image is not already cached)
+ * in case the flag addToCache is TRUE the image will be automatically added
+ * to the cache after read from file operation.
  */
 static gint32
-p_load_cache_image(const char* filename, gboolean addToCache)
+p_load_cache_image(const char* filename, gint32 ffetch_user_id, gboolean addToCache)
 {
   gint32 l_image_id;
   char *l_filename;
@@ -280,7 +292,7 @@ p_load_cache_image(const char* filename, gboolean addToCache)
   gint    nimages;
   gint    l_idi;
   gint    l_number_of_cached_images;
-  gint32  l_first_chached_image_id;
+  gint32  l_first_cached_image_id;
   GimpParasite  *l_parasite;
 
 
@@ -291,7 +303,7 @@ p_load_cache_image(const char* filename, gboolean addToCache)
   }
 
   l_image_id = -1;
-  l_first_chached_image_id = -1;
+  l_first_cached_image_id = -1;
   l_number_of_cached_images = 0;
   images = gimp_image_list(&nimages);
   for(l_idi=0; l_idi < nimages; l_idi++)
@@ -301,15 +313,17 @@ p_load_cache_image(const char* filename, gboolean addToCache)
     if(l_parasite)
     {
       gint32 *mtime_ptr;
+      gint32 *ffetch_id_ptr;
       gchar  *filename_ptr;
       
       mtime_ptr = (gint32 *) l_parasite->data;
-      filename_ptr = (gchar *)&l_parasite->data[sizeof(gint32)];
+      ffetch_id_ptr = (gint32 *)&l_parasite->data[sizeof(gint32)];
+      filename_ptr = (gchar *)&l_parasite->data[sizeof(gint32) + sizeof(gint32)];
     
       l_number_of_cached_images++;
-      if (l_first_chached_image_id < 0)
+      if (l_first_cached_image_id < 0)
       {
-        l_first_chached_image_id = images[l_idi];
+        l_first_cached_image_id = images[l_idi];
       }
       
       if(strcmp(filename, filename_ptr) == 0)
@@ -329,8 +343,9 @@ p_load_cache_image(const char* filename, gboolean addToCache)
            */
           if(gap_debug)
           {
-            printf("FrameFetcher: DELETE because mtime changed : (image_id:%d) name:%s  mtimefile:%d mtimecache:%d  pid:%d\n"
+            printf("FrameFetcher: DELETE because mtime changed : (image_id:%d) ffetchId:%d name:%s  mtimefile:%d mtimecache:%d  pid:%d\n"
                   , (int)images[l_idi]
+                  , (int)*ffetch_id_ptr
                   , gimp_image_get_filename(images[l_idi])
                   , (int)mtimefile
                   , (int)*mtime_ptr
@@ -372,6 +387,7 @@ p_load_cache_image(const char* filename, gboolean addToCache)
     guchar *parasite_data;
     gint32  parasite_size;
     gint32 *parasite_mtime_ptr;
+    gint32 *parasite_ffetch_id_ptr;
     gchar  *parasite_filename_ptr;
     gint32  len_filename0;           /* filename length including the terminating 0 */
   
@@ -383,23 +399,25 @@ p_load_cache_image(const char* filename, gboolean addToCache)
       if(gap_debug)
       {
         printf("FrameFetcher: DELETE because cache is full: (image_id:%d)  name:%s number_of_cached_images:%d pid:%d\n"
-              , (int)l_first_chached_image_id
-              , gimp_image_get_filename(l_first_chached_image_id)
+              , (int)l_first_cached_image_id
+              , gimp_image_get_filename(l_first_cached_image_id)
               , (int)l_number_of_cached_images
               , (int)gap_base_getpid()
               );
       }
-      gap_image_delete_immediate(l_first_chached_image_id);
+      gap_image_delete_immediate(l_first_cached_image_id);
     }
 
     /* build parasite data including mtime and full filename with terminating 0 byte */
     len_filename0 = strlen(filename) + 1;
-    parasite_size = sizeof(gint32) + len_filename0;  
+    parasite_size = sizeof(gint32) + sizeof(gint32) + len_filename0;  
     parasite_data = g_malloc0(parasite_size);
     parasite_mtime_ptr = (gint32 *)parasite_data;
-    parasite_filename_ptr = (gchar *)&parasite_data[sizeof(gint32)];
+    parasite_ffetch_id_ptr = (gint32 *)&parasite_data[sizeof(gint32)];
+    parasite_filename_ptr = (gchar *)&parasite_data[sizeof(gint32) + sizeof(gint32)];
     
     *parasite_mtime_ptr = gap_file_get_mtime(filename);
+    *parasite_ffetch_id_ptr = ffetch_user_id;
     memcpy(parasite_filename_ptr, filename, len_filename0);
     
     /* attach a parasite to mark the image as part of the gap image cache */
@@ -640,8 +658,8 @@ p_ffetch_get_open_gvahand(const char* filename, gint32 seltrack, const char *pre
 
     if(l_idx > gvcache->max_vid_cache)
     {
-      /* chache list has more elements than desired,
-       * drop the 1.st (oldest) entry in the chache list
+      /* cache list has more elements than desired,
+       * drop the 1.st (oldest) entry in the cache list
        * (this closes the droped handle)
        */
       p_drop_gvahand_cache_elem1(gvcache);
@@ -752,7 +770,7 @@ gap_frame_fetch_orig_image(gint32 ffetch_user_id
     ,gboolean addToCache             /* enable caching */
     )
 {
-  return (p_load_cache_image(filename, addToCache));
+  return (p_load_cache_image(filename, ffetch_user_id, addToCache));
 }  /* end gap_frame_fetch_orig_image */
 
 
@@ -778,7 +796,7 @@ gap_frame_fetch_dup_image(gint32 ffetch_user_id
 
   resulting_layer = -1;
   dup_image_id = -1;
-  image_id = p_load_cache_image(filename, addToCache);
+  image_id = p_load_cache_image(filename, ffetch_user_id, addToCache);
   if (image_id < 0)
   {
     return(-1);
@@ -857,8 +875,8 @@ gap_frame_fetch_dup_image(gint32 ffetch_user_id
  * gap_frame_fetch_dup_video
  * ----------------------------
  * returns the fetched video frame as gimp layer_id.
- *         the returned layer id is (the only layer) in a temporary image.
- *         note the caller is responsible to delete that temporary image after processing is done.
+ *         the returned layer id is the only layer in a temporary image.
+ *         note that the caller is responsible to delete the temporary image after processing is done.
  *         this can be done by calling gap_frame_fetch_delete_list_of_duplicated_images()
  */
 gint32
@@ -1008,7 +1026,7 @@ gap_frame_fetch_register_user(const char *caller_name)
  * gap_frame_fetch_unregister_user
  * -------------------------------------------------
  * unregister the specified resource user id.
- + (if there are still registered resource users
+ * (if there are still registered resource users
  *  cached images and videohandles are kept.
  *  until the last resource user calls this procedure.
  *  if there are no more registered users all
@@ -1089,3 +1107,227 @@ gap_frame_fetch_remove_parasite(gint32 image_id)
   }
 
 }  /* end gap_frame_fetch_remove_parasite */
+
+
+
+
+/* ----------------------------------------------------
+ * p_dump_resources_gvahand
+ * ----------------------------------------------------
+ * print videohandle resource usage of cached video handles to stdout.
+ */
+static void
+p_dump_resources_gvahand()
+{
+#ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
+  t_GVA_Handle *l_gvahand;
+  GapFFetchGvahandCacheElem  *gvc_ptr;
+  GapFFetchGvahandCacheElem  *gvc_last;
+  GapFFetchGvahandCache      *gvcache;
+  gint32                      count;
+
+  count = 0;
+  if(global_gvcache != NULL)
+  {
+    gvcache = global_gvcache;
+    gvc_last = gvcache->gvc_list;
+
+    for(gvc_ptr = gvcache->gvc_list; gvc_ptr != NULL; gvc_ptr = (GapFFetchGvahandCacheElem *)gvc_ptr->next)
+    {
+      t_GVA_Handle *gvahand;
+      
+      gvahand = gvc_ptr->gvahand;
+      
+      count++;
+      printf("FrameFetcher GVA_handle: %s   currFrameNr:%d fcache elemSize:%d byteSize:%d\n"
+        , gvahand->filename
+        , (int) gvahand->current_frame_nr
+        , (int)GVA_get_fcache_size_in_elements(gvahand)
+        , (int)GVA_get_fcache_size_in_bytes(gvahand)
+        );
+
+      gvc_last = gvc_ptr;
+    }
+    printf("FrameFetcher holds %d open GVA_handles limit gap_ffetch_max_gvc_cache_elements:%d (fcache_size:%d)\n"
+       ,(int)count
+       ,(int)p_get_ffetch_max_gvc_cache_elements()
+       ,(int)p_get_ffetch_gva_frames_to_keep_cached
+       );
+    
+  }
+  else
+  {
+    printf("FrameFetcher has no GVA_handle cached\n");
+  }
+#else
+
+  printf("FrameFetcher compiled without GAP_ENABLE_VIDEOAPI_SUPPORT\n");
+
+#endif
+
+}  /* end p_dump_resources_gvahand */
+
+
+
+
+/* ----------------------------------------------------
+ * p_dump_process_resource_usage
+ * ----------------------------------------------------
+ * getrusage did not work in 1st test on linux (and is not available on windows)
+ */
+static void
+p_dump_process_resource_usage()
+{
+#ifdef HAVE_SYSINFO
+
+  int rc;
+  struct sysinfo info;
+  
+  rc = sysinfo(&info);
+  if(rc == 0)
+  {
+    printf ("FrameFetcher sysinfo memory in bytes:\n"
+            "  memory total size:         %u\n"
+            "  memory free:               %u\n"
+            "  shared memory total size:  %u\n"
+            "  memory used by buffers:    %u\n"
+            "  swap total size:           %u\n"
+            "  swap free:                 %u\n"
+            "  high memory total size:    %u\n"
+            "  high memory free:          %u\n"
+      , (info.mem_unit * info.totalram)
+      , (info.mem_unit * info.freeram)
+      , (info.mem_unit * info.sharedram)
+      , (info.mem_unit * info.bufferram)
+      , (info.mem_unit * info.totalswap)
+      , (info.mem_unit * info.freeswap)
+      , (info.mem_unit * info.totalhigh)
+      , (info.mem_unit * info.freehigh)
+      );
+  
+  }
+  else
+  {
+    printf("FrameFetcher getrusage failed with retcode:%d\n", rc);
+  }
+
+
+#else
+  printf("FrameFetcher compiled without sysinfo support (HAVE_SYSINFO not dfined)\n");
+#endif
+}
+
+/* ----------------------------------------------------
+ * gap_frame_fetch_dump_resources
+ * ----------------------------------------------------
+ * print current resource usage to stdout
+ * this includes information about 
+ *  - ALL images currently loaded
+ *  - all video filehandles with memory cache sizes
+ * 
+ */
+void
+gap_frame_fetch_dump_resources()
+{
+  gint32 *images;
+  gint    nimages;
+  gint    l_idi;
+  gint    l_number_of_cached_images;
+
+
+  printf("gap_frame_fetch_dump_resources: START pid:%d\n", (int)gap_base_getpid());
+
+
+  l_number_of_cached_images = 0;
+  images = gimp_image_list(&nimages);
+  
+  
+  
+  for(l_idi=0; l_idi < nimages; l_idi++)
+  {
+    GimpParasite  *l_parasite;
+    char          *l_filename;
+    char          *l_cacheInfoString;
+    gint32         image_id;
+    
+    
+    image_id = images[l_idi];
+    l_filename = gimp_image_get_filename(image_id);
+    l_parasite = gimp_image_parasite_find(image_id, GAP_IMAGE_CACHE_PARASITE);
+
+    if(l_parasite)
+    {
+      gint32 *mtime_ptr;
+      gint32 *ffetch_id_ptr;
+      gchar  *filename_ptr;
+      
+      mtime_ptr = (gint32 *) l_parasite->data;
+      ffetch_id_ptr = (gint32 *)&l_parasite->data[sizeof(gint32)];
+      filename_ptr = (gchar *)&l_parasite->data[sizeof(gint32) + sizeof(gint32)];
+    
+      l_number_of_cached_images++;
+
+      l_cacheInfoString = g_strdup_printf("Cache member: mtime:%d ffetchId:%d %s"
+                                         ,*mtime_ptr
+                                         ,*ffetch_id_ptr
+                                         ,filename_ptr
+                                         );
+      
+      gimp_parasite_free(l_parasite);
+    }
+    else
+    {
+      l_parasite = gimp_image_parasite_find(images[l_idi], GAP_IMAGE_DUP_CACHE_PARASITE);
+      if(l_parasite)
+      {
+        gint32 *ffetch_user_id_ptr;
+        ffetch_user_id_ptr = (gint32 *) l_parasite->data;
+
+        l_number_of_cached_images++;
+  
+        l_cacheInfoString = g_strdup_printf("Cache member (merged duplicate): ffetchId:%d"
+                                         ,*ffetch_user_id_ptr
+                                         );
+        
+        gimp_parasite_free(l_parasite);
+      }
+      else
+      {
+        l_cacheInfoString = g_strdup_printf("Not cached");
+      }
+    }
+
+    
+    printf(" FrameFetcher ImgId:%d (%d x %d) %s %s\n"
+          ,(int)image_id
+          ,(int)gimp_image_width(image_id)
+          ,(int)gimp_image_height(image_id)
+          ,l_filename
+          ,l_cacheInfoString
+          );
+    
+    g_free(l_cacheInfoString);
+    if(l_filename != NULL)
+    {
+      g_free(l_filename);
+    }
+    l_parasite = NULL;
+  }
+  
+  if(images)
+  {
+    g_free(images);
+  }
+  
+  printf(" Number of images currently loaded in gimp total: %d gap_ffetch_max_img_cache_elements:%d marked as cache member:%d\n"
+        ,(int)nimages
+        ,(int)p_get_ffetch_max_img_cache_elements()
+        ,(int)l_number_of_cached_images
+        );
+
+
+  p_dump_resources_gvahand();
+  p_dump_process_resource_usage();
+
+}  /* end gap_frame_fetch_dump_resources */
+
diff --git a/gap/gap_frame_fetcher.h b/gap/gap_frame_fetcher.h
index 2c7c553..1134e28 100644
--- a/gap/gap_frame_fetcher.h
+++ b/gap/gap_frame_fetcher.h
@@ -145,4 +145,18 @@ gap_frame_fetch_dup_video(gint32 ffetch_user_id
 void
 gap_frame_fetch_remove_parasite(gint32 image_id);
 
+
+/* ----------------------------------------------------
+ * gap_frame_fetch_dump_resources
+ * ----------------------------------------------------
+ * print current resource usage to stdout
+ * this includes information about 
+ *  - ALL images currently loaded in gimp
+ *  - all video filehandles with memory cache sizes
+ * 
+ */
+void
+gap_frame_fetch_dump_resources();
+
+
 #endif
diff --git a/gap/gap_player_dialog.c b/gap/gap_player_dialog.c
old mode 100644
new mode 100755
index cb05123..1598abe
--- a/gap/gap_player_dialog.c
+++ b/gap/gap_player_dialog.c
@@ -344,6 +344,7 @@ static guchar * p_fetch_videoframe(GapPlayerMainGlobalParams *gpp
                    , gint32 *th_bpp
                    , gint32 *th_width
                    , gint32 *th_height
+                   , gboolean isBackwards
                    );
 static void     p_init_video_playback_cache(GapPlayerMainGlobalParams *gpp);
 static void     p_init_layout_options(GapPlayerMainGlobalParams *gpp);
@@ -366,6 +367,7 @@ static guchar * p_fetch_videoframe_via_cache(GapPlayerMainGlobalParams *gpp
                    , gint32 *th_height_ptr
                    , gint32 *flip_status_ptr
                    , const gchar *ckey
+                   , gboolean isBackwards
                    );
 static void     p_frame_chache_processing(GapPlayerMainGlobalParams *gpp
                    , const gchar *ckey);
@@ -2917,9 +2919,14 @@ p_fetch_videoframe(GapPlayerMainGlobalParams *gpp
                    , gint32 *th_bpp
                    , gint32 *th_width
                    , gint32 *th_height
+                   , gboolean isBackwards
                    )
 {
   guchar *th_data;
+  static gint32 funcId = -1;
+  
+  GAP_TIMM_GET_FUNCTION_ID(funcId, "playerDialog.p_fetch_videoframe");
+
 
   th_data = NULL;
 #ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
@@ -2930,6 +2937,8 @@ p_fetch_videoframe(GapPlayerMainGlobalParams *gpp
   }
   gpp->gva_lock = TRUE;
 
+  GAP_TIMM_START_FUNCTION(funcId);
+
   if((gpp->gva_videofile) && (gpp->gvahand))
   {
     if((strcmp(gpp->gva_videofile, gva_videofile) != 0)
@@ -2966,23 +2975,15 @@ p_fetch_videoframe(GapPlayerMainGlobalParams *gpp
 
      if(global_max_vid_frames_to_keep_cached < 1)
      {
-        char *value_string;
-
-        value_string = gimp_gimprc_query("video-max-frames-keep-cached");
-
-        if(value_string)
-        {
-//printf(" VIDFETCH (4) gimprc: value_string: %s\n", value_string);
-          global_max_vid_frames_to_keep_cached = atol(value_string);
-        }
-        if(global_max_vid_frames_to_keep_cached < 1)
-        {
-          global_max_vid_frames_to_keep_cached = GAP_PLAYER_VID_FRAMES_TO_KEEP_CACHED;
-        }
+        fcache_size = gap_base_get_gimprc_int_value("video-max-frames-keep-cached"
+                                                     , GAP_PLAYER_VID_FRAMES_TO_KEEP_CACHED  /* default */
+                                                     , 2   /* min */
+                                                     , 250 /* max */
+                                                   );
      }
 
-     fcache_size = CLAMP(rangesize, 1, global_max_vid_frames_to_keep_cached);
-     if(fcache_size > gpp->gvahand->fcache.frame_cache_size)
+     // fcache_size = CLAMP(rangesize, 1, global_max_vid_frames_to_keep_cached);
+     if(fcache_size > GVA_get_fcache_size_in_elements(gpp->gvahand))
      {
 //printf(" VIDFETCH (5) gimprc: FCACHE_MAX:%d fcache_size:%d rangesize:%d\n"
 //         , (int)global_max_vid_frames_to_keep_cached
@@ -3015,6 +3016,7 @@ p_fetch_videoframe(GapPlayerMainGlobalParams *gpp
      /* fetch the wanted framenr  */
      th_data = GVA_fetch_frame_to_buffer(gpp->gvahand
                 , do_scale
+                , isBackwards
                 , framenumber
                 , l_deinterlace
                 , l_threshold
@@ -3090,6 +3092,8 @@ p_fetch_videoframe(GapPlayerMainGlobalParams *gpp
   }
   gpp->gva_lock = FALSE;
 
+  GAP_TIMM_STOP_FUNCTION(funcId);
+
 #endif
   return (th_data);
 }  /* end p_fetch_videoframe */
@@ -3221,6 +3225,7 @@ p_fetch_videoframe_via_cache(GapPlayerMainGlobalParams *gpp
                    , gint32 *th_height_ptr
                    , gint32 *flip_status_ptr
                    , const gchar *ckey
+                   , gboolean isBackwards
                    )
 {
   guchar *th_data;
@@ -3244,6 +3249,7 @@ p_fetch_videoframe_via_cache(GapPlayerMainGlobalParams *gpp
                 , th_bpp_ptr
                 , th_width_ptr
                 , th_height_ptr
+                , isBackwards
                 );
   }
 
@@ -3476,6 +3482,10 @@ p_fetch_display_th_data_from_storyboard(GapPlayerMainGlobalParams *gpp
 {
   GapStoryLocateRet *stb_ret;
   guchar *l_th_data;
+  static gint32 funcId = -1;
+  
+  GAP_TIMM_GET_FUNCTION_ID(funcId, "playerDialog.p_fetch_display_th_data_from_storyboard");
+  GAP_TIMM_START_FUNCTION(funcId);
 
   l_th_data = NULL;
   stb_ret = gap_story_locate_framenr(gpp->stb_ptr
@@ -3495,6 +3505,8 @@ p_fetch_display_th_data_from_storyboard(GapPlayerMainGlobalParams *gpp
       {
          if(*filename_pptr)
          {
+           gboolean isBackwards;
+           
            if(gpp->use_thumbnails)
            {
              /* fetch does already scale down to current preview size */
@@ -3516,6 +3528,7 @@ p_fetch_display_th_data_from_storyboard(GapPlayerMainGlobalParams *gpp
                          , stb_ret->stb_elem->delace
                          );
            }
+           isBackwards = (stb_ret->stb_elem->to_frame < stb_ret->stb_elem->from_frame) || gpp->play_backward;
            l_th_data  = p_fetch_videoframe_via_cache(gpp
                          , *filename_pptr
                          , stb_ret->ret_framenr
@@ -3528,6 +3541,7 @@ p_fetch_display_th_data_from_storyboard(GapPlayerMainGlobalParams *gpp
                          , th_height_ptr    /* IN/OUT */
                          , flip_status_ptr  /* IN/OUT */
                          , *ckey_pptr       /* IN */
+                         , isBackwards
                          );
            if(gpp->cancel_video_api)
            {
@@ -3550,6 +3564,8 @@ p_fetch_display_th_data_from_storyboard(GapPlayerMainGlobalParams *gpp
     g_free(stb_ret);
   }
 
+  GAP_TIMM_STOP_FUNCTION(funcId);
+
   return (l_th_data);
 }  /* end p_fetch_display_th_data_from_storyboard */
 
@@ -3869,18 +3885,21 @@ p_render_display_from_active_image_or_file(GapPlayerMainGlobalParams *gpp
 static void
 p_display_frame(GapPlayerMainGlobalParams *gpp, gint32 framenr)
 {
-  char *l_filename;
-   gint32  l_th_width;
-   gint32  l_th_height;
-   gint32  l_th_data_count;
-   gint32  l_th_bpp;
-   gint32  l_flip_request;
-   gint32  l_flip_status;
-   gint32  l_composite_image_id;
-   guchar *l_th_data;
-   gboolean framenr_is_the_active_image;
-   GdkPixbuf *pixbuf;
-   gchar *ckey;
+  char   *l_filename;
+  gint32  l_th_width;
+  gint32  l_th_height;
+  gint32  l_th_data_count;
+  gint32  l_th_bpp;
+  gint32  l_flip_request;
+  gint32  l_flip_status;
+  gint32  l_composite_image_id;
+  guchar *l_th_data;
+  gboolean framenr_is_the_active_image;
+  GdkPixbuf *pixbuf;
+  gchar *ckey;
+  static gint32 funcId = -1;
+  
+  GAP_TIMM_GET_FUNCTION_ID(funcId, "playerDialog.p_display_frame");
 
   /*if(gap_debug) printf("p_display_frame START: framenr:%d\n", (int)framenr);*/
   if(gpp->gva_lock)
@@ -3890,6 +3909,9 @@ p_display_frame(GapPlayerMainGlobalParams *gpp, gint32 framenr)
      */
     return;
   }
+
+  GAP_TIMM_START_FUNCTION(funcId);
+
   ckey = NULL;
   l_th_data = NULL;
   pixbuf = NULL;
@@ -3923,6 +3945,7 @@ p_display_frame(GapPlayerMainGlobalParams *gpp, gint32 framenr)
 
       if(gpp->cancel_video_api)
       {
+        GAP_TIMM_STOP_FUNCTION(funcId);
         return;
       }
     }
@@ -3957,6 +3980,8 @@ p_display_frame(GapPlayerMainGlobalParams *gpp, gint32 framenr)
 
     if(gpp->ainfo_ptr->ainfo_type == GAP_AINFO_MOVIE)
     {
+      gboolean isBackwards;
+      
       /* playback of a single videoclip */
       if(gpp->use_thumbnails)
       {
@@ -3980,6 +4005,7 @@ p_display_frame(GapPlayerMainGlobalParams *gpp, gint32 framenr)
                          );
       }
 
+      isBackwards = (gpp->ainfo_ptr->last_frame_nr < gpp->ainfo_ptr->first_frame_nr) || gpp->play_backward;
       l_th_data  = p_fetch_videoframe_via_cache(gpp
                                      , gpp->ainfo_ptr->old_filename
                                      , framenr
@@ -3992,6 +4018,7 @@ p_display_frame(GapPlayerMainGlobalParams *gpp, gint32 framenr)
                                      , &l_th_height    /* IN/OUT */
                                      , &l_flip_status  /* OUT */
                                      , ckey            /* IN */
+                                     , isBackwards
                                      );
       if(gpp->cancel_video_api)
       {
@@ -4005,6 +4032,8 @@ p_display_frame(GapPlayerMainGlobalParams *gpp, gint32 framenr)
            gtk_progress_bar_set_text(GTK_PROGRESS_BAR(gpp->progress_bar)
                         , _("Canceled"));
         }
+
+        GAP_TIMM_STOP_FUNCTION(funcId);
         return;
       }
 
@@ -4198,6 +4227,8 @@ p_display_frame(GapPlayerMainGlobalParams *gpp, gint32 framenr)
 
   if(l_filename) g_free(l_filename);
 
+  GAP_TIMM_STOP_FUNCTION(funcId);
+
 }  /* end p_display_frame */
 
 /* ------------------------------
@@ -4716,6 +4747,8 @@ on_pause_button_press_event        (GtkButton       *button,
     return FALSE;
   }
 
+  GAP_TIMM_PRINT_FUNCTION_STATISTICS();
+
   if(gpp->progress_bar)
   {
     gtk_progress_bar_set_text(GTK_PROGRESS_BAR(gpp->progress_bar)
diff --git a/gap/gap_story_dialog.c b/gap/gap_story_dialog.c
old mode 100644
new mode 100755
index 6ca7576..db51cfc
--- a/gap/gap_story_dialog.c
+++ b/gap/gap_story_dialog.c
@@ -103,6 +103,27 @@ extern int gap_debug;  /* 1 == print debug infos , 0 dont print debug infos */
 #define GAP_STORY_FW_PTR   "gap_story_fw_ptr"
 #define GAP_STORY_TABW_PTR "gap_story_tabw_ptr"
 
+
+/* render parameters constraint check values */
+
+#define MIN_OPEN_VIDEOFILES 2
+#define MAX_OPEN_VIDEOFILES 100
+#define DEFAULT_OPEN_VIDEOFILES 12
+
+#define MIN_FCACHE_PER_VIDEO 2
+#define MAX_FCACHE_PER_VIDEO 250
+#define DEFAULT_FCACHE_PER_VIDEO 36
+
+#define MIN_IMG_CACHE_ELEMENTS 1
+#define MAX_IMG_CACHE_ELEMENTS 2000
+#define DEFAULT_IMG_CACHE_ELEMENTS 18
+
+#define MIN_RESOURCE_LOG_INTERVAL 0
+#define MAX_RESOURCE_LOG_INTERVAL 100000
+#define DEFAULT_RESOURCE_LOG_INTERVAL 0
+
+
+
 /* the file GAP_DEBUG_STORYBOARD_CONFIG_FILE
  * is a debug configuration intended for development an test
  * if this file exists at start of the storyboard plug-in
@@ -290,6 +311,7 @@ static void     p_menu_win_help_cb (GtkWidget *widget, GapStbMainGlobalParams *s
 static void     p_menu_win_properties_cb (GtkWidget *widget, GapStbMainGlobalParams *sgpp);
 static void     p_menu_win_vthumbs_toggle_cb (GtkWidget *widget, GapStbMainGlobalParams *sgpp);
 static void     p_menu_win_debug_log_to_stdout_cb (GtkWidget *widget, GapStbMainGlobalParams *sgpp);
+static void     p_menu_win_render_properties_cb (GtkWidget *widget, GapStbMainGlobalParams *sgpp);
 
 static void     p_menu_cll_new_cb (GtkWidget *widget, GapStbMainGlobalParams *sgpp);
 static void     p_menu_cll_open_cb (GtkWidget *widget, GapStbMainGlobalParams *sgpp);
@@ -399,6 +421,9 @@ static void    p_save_gimprc_int_value(const char *gimprc_option_name, gint32 va
 static void    p_save_gimprc_gboolean_value(const char *gimprc_option_name, gboolean value);
 static void    p_save_gimprc_layout_settings(GapStbMainGlobalParams *sgpp);
 static void    p_get_gimprc_layout_settings(GapStbMainGlobalParams *sgpp);
+static void    p_save_gimprc_render_settings(GapStbMainGlobalParams *sgpp);
+static void    p_get_gimprc_render_settings(GapStbMainGlobalParams *sgpp);
+
 
 static void    p_reset_progress_bars(GapStbMainGlobalParams *sgpp);
 static void    p_call_external_image_viewer(GapStbFrameWidget *fw);
@@ -1786,6 +1811,48 @@ p_story_call_player(GapStbMainGlobalParams *sgpp
       return;  /* in case of errors: NO playback possible */
       sgpp->in_player_call = FALSE;
     }
+    
+    if(sgpp->stb_preview_render_full_size != TRUE)
+    {
+      /* if the storboard duplicate (stb_dup) includes at least one unscalable clip 
+       * (where either with or height is fixed to original size)
+       * then we must force slow internal rendering at full size.
+       */
+      if(gap_story_checkForAtLeatOneClipWithScalingDisabled(stb_dup) == TRUE)
+      {
+        if(gap_debug)
+        {
+          printf("Storyboard includes non-scaleable clip force rendering at full size!\n");
+        }
+      }
+      else
+      {
+        gint32 previewWidth;
+        gint32 previewHeight;
+
+        /* pick small size for preview rendering at composite video playback via storyboard processor
+         * purpose when posible for performance reasons.
+         */
+        if(sgpp->plp)
+        {
+          previewWidth = sgpp->plp->pv_width;
+          previewHeight = sgpp->plp->pv_height;
+        }
+        else
+        {
+          /* in case player was not yet called (e.g this ist the very 1st player call)
+           * the preview size is not yet known.
+           * Assume a preview width of 320 pixel in that case
+           */
+          previewWidth = 320;
+          previewHeight = previewWidth * (stb_dup->master_height / MAX(stb_dup->master_width,1));
+        }
+      
+        stb_dup->master_width = MIN(stb_dup->master_width, previewWidth);
+        stb_dup->master_height = MIN(stb_dup->master_height, previewHeight);
+      }
+    }
+    
   }
 
   if(sgpp->plp)
@@ -5323,6 +5390,176 @@ p_menu_win_debug_log_to_stdout_cb (GtkWidget *widget, GapStbMainGlobalParams *sg
   }
 }  /* end p_menu_win_debug_log_to_stdout_cb */
 
+/* -------------------------------
+ * p_menu_win_render_properties_cb
+ * -------------------------------
+ */
+static void
+p_menu_win_render_properties_cb (GtkWidget *widget, GapStbMainGlobalParams *sgpp)
+{
+  static GapArrArg  argv[8];
+  gint l_ii;
+  
+  gint l_stb_max_open_videofile_idx;
+  gint l_stb_fcache_size_per_videofile_idx;
+  gint l_ffetch_max_img_cache_elements_idx;
+  gint l_stb_resource_log_linterval_idx;
+  gint l_stb_preview_render_full_size_idx;
+  gint l_stb_isMultithreadEnabled_idx;
+  gint l_stb_isMultithreadFfEncoderEnabled_idx;
+  
+  gint multithreadDefaultFlag;
+  gboolean l_rc;
+
+
+  if(gap_debug) printf("p_menu_win_render_properties_cb\n");
+
+  if(sgpp->render_prop_dlg_open)
+  {
+    g_message(_("Global Render Properties dialog already open"));
+    return;
+  }
+
+  sgpp->render_prop_dlg_open = TRUE;
+
+  l_ii = 0;
+  l_stb_max_open_videofile_idx = l_ii;
+  gap_arr_arg_init(&argv[l_ii], GAP_ARR_WGT_INT);
+  argv[l_ii].constraint = TRUE;
+  argv[l_ii].label_txt = _("Max open Videofiles:");
+  argv[l_ii].help_txt  = _("Maximum number of videofiles to be opened at same time while storyboard rendering");
+  argv[l_ii].int_min   = (gint)MIN_OPEN_VIDEOFILES;
+  argv[l_ii].int_max   = (gint)MAX_OPEN_VIDEOFILES;
+  argv[l_ii].int_ret   = (gint)sgpp->stb_max_open_videofile;
+  argv[l_ii].has_default = TRUE;
+  argv[l_ii].int_default = (gint)DEFAULT_OPEN_VIDEOFILES;
+
+  l_ii++; l_stb_fcache_size_per_videofile_idx = l_ii;
+  gap_arr_arg_init(&argv[l_ii], GAP_ARR_WGT_INT);
+  argv[l_ii].constraint = TRUE;
+  argv[l_ii].label_txt = _("Framecache / open video:");
+  argv[l_ii].help_txt  = _("Maximum number of frames to be cached per open video "
+                           "(only relevant when video clips are rendered backwards)");
+  argv[l_ii].int_min   = (gint)MIN_FCACHE_PER_VIDEO;
+  argv[l_ii].int_max   = (gint)MAX_FCACHE_PER_VIDEO;
+  argv[l_ii].int_ret   = (gint)sgpp->stb_fcache_size_per_videofile;
+  argv[l_ii].has_default = TRUE;
+  argv[l_ii].int_default = (gint)DEFAULT_FCACHE_PER_VIDEO;
+
+
+
+  l_ii++; l_ffetch_max_img_cache_elements_idx = l_ii;
+  gap_arr_arg_init(&argv[l_ii], GAP_ARR_WGT_INT);
+  argv[l_ii].constraint = TRUE;
+  argv[l_ii].label_txt = _("Max Image cache:");
+  argv[l_ii].help_txt  = _("Maximum number of images to keep cached "
+                           "(in memory as gimp image without display) "
+                           "while storyboard rendering");
+  argv[l_ii].int_min   = (gint)MIN_IMG_CACHE_ELEMENTS;
+  argv[l_ii].int_max   = (gint)MAX_IMG_CACHE_ELEMENTS;
+  argv[l_ii].int_ret   = (gint)sgpp->ffetch_max_img_cache_elements;
+  argv[l_ii].has_default = TRUE;
+  argv[l_ii].int_default = (gint)DEFAULT_IMG_CACHE_ELEMENTS;
+
+  l_ii++; l_stb_resource_log_linterval_idx = l_ii;
+  gap_arr_arg_init(&argv[l_ii], GAP_ARR_WGT_INT);
+  argv[l_ii].constraint = TRUE;
+  argv[l_ii].label_txt = _("Resource Loginterval:");
+  argv[l_ii].help_txt  = _("Value 0 turns off resource logging to stdout. "
+                           "Value n logs current resource usage afte each n processed frames to stdout "
+                           "(this includes the list of cached images and opened videofiles)");
+  argv[l_ii].int_min   = (gint)MIN_RESOURCE_LOG_INTERVAL;
+  argv[l_ii].int_max   = (gint)MAX_RESOURCE_LOG_INTERVAL;
+  argv[l_ii].int_ret   = (gint)sgpp->stb_resource_log_linterval;
+  argv[l_ii].has_default = TRUE;
+  argv[l_ii].int_default = (gint)DEFAULT_RESOURCE_LOG_INTERVAL;
+
+
+  l_ii++; l_stb_preview_render_full_size_idx = l_ii;
+  gap_arr_arg_init(&argv[l_ii], GAP_ARR_WGT_TOGGLE);
+  argv[l_ii].constraint = TRUE;
+  argv[l_ii].label_txt = _("Render preview at full size:");
+  argv[l_ii].help_txt  = _("ON: Rendering of composite frames for preview purpose is done (slow) at full size."
+                           "OFF: storyboard rendering for preview purpose is done at small size where possible."
+                           "(typically faster but disables extraction of a composite frame at original size "
+                           "via click into the player preview)");
+  argv[l_ii].int_ret   = (gint)sgpp->stb_preview_render_full_size;
+  argv[l_ii].has_default = TRUE;
+  argv[l_ii].int_default = (gint)0;
+
+
+  /* the default for multiprocessing support is derived from
+   * gimprc "num-processors" configuration. In case gimp is configured to use
+   * only one processor the default is NO otherwise it is YES.
+   */
+  multithreadDefaultFlag = 0; /* FALSE */
+  if (gap_base_get_gimprc_int_value("num-processors"
+                               , 1  /* default */
+                               , 1  /* min */
+                               , 32 /* max */
+                               ) > 1)
+  {
+    multithreadDefaultFlag = 1; /* TRUE */
+  }
+
+  l_ii++; l_stb_isMultithreadEnabled_idx = l_ii;
+  gap_arr_arg_init(&argv[l_ii], GAP_ARR_WGT_TOGGLE);
+  argv[l_ii].constraint = TRUE;
+  argv[l_ii].label_txt = _("Multiprocessor Storyboard Support:");
+  argv[l_ii].help_txt  = _("ON: Rendering of composite storyboard frames uses more than one processor. "
+                           "(reading frames from videoclips is done by parallel running prefetch processing) " 
+                           "OFF: Rendering of composite frames uses only one processor.");
+  argv[l_ii].int_ret   = (gint)sgpp->stb_isMultithreadEnabled;
+  argv[l_ii].has_default = TRUE;
+  argv[l_ii].int_default = (gint)multithreadDefaultFlag;
+
+
+  l_ii++; l_stb_isMultithreadFfEncoderEnabled_idx = l_ii;
+  gap_arr_arg_init(&argv[l_ii], GAP_ARR_WGT_TOGGLE);
+  argv[l_ii].constraint = TRUE;
+  argv[l_ii].label_txt = _("Multiprocessor Encoder Support:");
+  argv[l_ii].help_txt  = _("ON: Video encoders shall use more than one processor where implemented. "
+                           "The ffmpeg based video encoder implementation supports parallel processing. " 
+                           "OFF: Video encoders use only one processor.");
+  argv[l_ii].int_ret   = (gint)gap_base_get_gimprc_gboolean_value(
+                              GAP_GIMPRC_VIDEO_ENCODER_FFMPEG_MULTIPROCESSOR_ENABLE
+                             , multithreadDefaultFlag  /* default */
+                             );
+  argv[l_ii].has_default = TRUE;
+  argv[l_ii].int_default = (gint)multithreadDefaultFlag;
+
+  /* the reset to default button */
+  l_ii++;
+  gap_arr_arg_init(&argv[l_ii], GAP_ARR_WGT_DEFAULT_BUTTON);
+  argv[l_ii].label_txt = _("Default");
+  argv[l_ii].help_txt  = _("Use the standard built in storyboard render settings");
+
+  l_rc = gap_arr_ok_cancel_dialog( _("Global Storyboard Render Properties")
+                                 , _("Render Settings")
+                                 ,G_N_ELEMENTS(argv), argv
+                                 );
+  sgpp->render_prop_dlg_open = FALSE;
+
+  if(l_rc == TRUE)
+  {
+    gboolean isFFMpegEncoderMultiprocessorSupport;
+    
+    sgpp->stb_max_open_videofile         = argv[l_stb_max_open_videofile_idx].int_ret;
+    sgpp->stb_fcache_size_per_videofile  = argv[l_stb_fcache_size_per_videofile_idx].int_ret;
+    sgpp->ffetch_max_img_cache_elements  = argv[l_ffetch_max_img_cache_elements_idx].int_ret;
+    sgpp->stb_resource_log_linterval     = argv[l_stb_resource_log_linterval_idx].int_ret;
+    sgpp->stb_preview_render_full_size   = (argv[l_stb_preview_render_full_size_idx].int_ret != 0);
+    sgpp->stb_isMultithreadEnabled       = (argv[l_stb_isMultithreadEnabled_idx].int_ret != 0);
+    isFFMpegEncoderMultiprocessorSupport = (argv[l_stb_isMultithreadFfEncoderEnabled_idx].int_ret != 0);
+
+    p_save_gimprc_render_settings(sgpp);
+    p_save_gimprc_gboolean_value(GAP_GIMPRC_VIDEO_ENCODER_FFMPEG_MULTIPROCESSOR_ENABLE
+                              , isFFMpegEncoderMultiprocessorSupport
+                              );
+    
+  }
+}  /* end p_menu_win_render_properties_cb  */
+
 
 /* -----------------------------
  * p_menu_cll_new_cb
@@ -5932,6 +6169,12 @@ p_make_menu_global(GapStbMainGlobalParams *sgpp, GtkWidget *menu_bar)
                           , sgpp
                           );
 
+   p_make_item_with_label(file_menu, _("Render Settings")
+                          , p_menu_win_render_properties_cb
+                          , sgpp
+                          );
+
+
    sgpp->menu_item_win_vthumbs =
    p_make_check_item_with_label(file_menu, _("Video thumbnails")
                           , p_menu_win_vthumbs_toggle_cb
@@ -6537,6 +6780,7 @@ gap_story_dlg_fetch_videoframe(GapStbMainGlobalParams *sgpp
      /* fetch the wanted framenr  */
      th_data = GVA_fetch_frame_to_buffer(sgpp->gvahand
                 , do_scale
+                , FALSE           /* isBackwards */
                 , framenumber
                 , l_deinterlace
                 , l_threshold
@@ -8078,6 +8322,66 @@ p_get_gimprc_layout_settings(GapStbMainGlobalParams *sgpp)
 
 }  /* end p_get_gimprc_layout_settings */
 
+
+/* ---------------------------------
+ * p_save_gimprc_render_settings
+ * ---------------------------------
+ * Save global storyboard render settings as gimprc parameters.
+ */
+static void
+p_save_gimprc_render_settings(GapStbMainGlobalParams *sgpp)
+{
+  p_save_gimprc_int_value("gap_ffetch_max_img_cache_elements", sgpp->ffetch_max_img_cache_elements);
+  p_save_gimprc_int_value(GAP_GIMPRC_VIDEO_STORYBOARD_MAX_OPEN_VIDEOFILES, sgpp->stb_max_open_videofile);
+  p_save_gimprc_int_value(GAP_GIMPRC_VIDEO_STORYBOARD_FCACHE_SIZE_PER_VIDEOFILE, sgpp->stb_fcache_size_per_videofile);
+  p_save_gimprc_int_value(GAP_GIMPRC_VIDEO_STORYBOARD_RESOURCE_LOG_INTERVAL, sgpp->stb_resource_log_linterval);
+  p_save_gimprc_gboolean_value(GAP_GIMPRC_VIDEO_STORYBOARD_PREVIEW_RENDER_FULL_SIZE, sgpp->stb_preview_render_full_size);
+  p_save_gimprc_gboolean_value(GAP_GIMPRC_VIDEO_STORYBOARD_MULTIPROCESSOR_ENABLE, sgpp->stb_isMultithreadEnabled);
+
+}  /* end p_save_gimprc_render_settings */
+
+
+/* ---------------------------------
+ * p_get_gimprc_render_settings
+ * ---------------------------------
+ */
+static void
+p_get_gimprc_render_settings(GapStbMainGlobalParams *sgpp)
+{
+
+  sgpp->ffetch_max_img_cache_elements  = gap_base_get_gimprc_int_value("gap_ffetch_max_img_cache_elements"
+                    ,DEFAULT_IMG_CACHE_ELEMENTS
+                    ,MIN_IMG_CACHE_ELEMENTS
+                    ,MAX_IMG_CACHE_ELEMENTS
+                    );
+  sgpp->stb_max_open_videofile  = gap_base_get_gimprc_int_value(GAP_GIMPRC_VIDEO_STORYBOARD_MAX_OPEN_VIDEOFILES
+                    ,DEFAULT_OPEN_VIDEOFILES
+                    ,MIN_OPEN_VIDEOFILES
+                    ,MAX_OPEN_VIDEOFILES
+                    );
+
+  sgpp->stb_fcache_size_per_videofile  = gap_base_get_gimprc_int_value(GAP_GIMPRC_VIDEO_STORYBOARD_FCACHE_SIZE_PER_VIDEOFILE
+                    ,DEFAULT_FCACHE_PER_VIDEO
+                    ,MIN_FCACHE_PER_VIDEO
+                    ,MAX_FCACHE_PER_VIDEO
+                    );
+
+  sgpp->stb_resource_log_linterval  = gap_base_get_gimprc_int_value(GAP_GIMPRC_VIDEO_STORYBOARD_RESOURCE_LOG_INTERVAL
+                    ,0  /* default */
+                    ,0  /* min */
+                    ,100000  /* max */
+                    );
+
+
+  sgpp->stb_preview_render_full_size  = gap_base_get_gimprc_gboolean_value(
+                    GAP_GIMPRC_VIDEO_STORYBOARD_PREVIEW_RENDER_FULL_SIZE
+                    ,FALSE  /* default */
+                    );
+
+  sgpp->stb_isMultithreadEnabled = gap_story_isMultiprocessorSupportEnabled();
+
+}  /* end p_get_gimprc_render_settings */
+
 /* ---------------------------------
  * p_reset_progress_bars
  * ---------------------------------
@@ -8169,12 +8473,17 @@ gap_storyboard_dialog(GapStbMainGlobalParams *sgpp)
   sgpp->menu_item_stb_close = NULL;
 
   sgpp->win_prop_dlg_open = FALSE;
+  sgpp->render_prop_dlg_open = FALSE;
   sgpp->dnd_pixbuf = NULL;
 
   /* get layout settings from gimprc
    * (keeping initial values if no layout settings available in gimprc)
    */
   p_get_gimprc_layout_settings(sgpp);
+  /* get render settings from gimprc
+   * (using default values if no layout settings available in gimprc)
+   */
+  p_get_gimprc_render_settings(sgpp);
 
   /*  The dialog and main vbox  */
   /* the help_id is passed as NULL to avoid creation of the HELP button
diff --git a/gap/gap_story_file.c b/gap/gap_story_file.c
old mode 100644
new mode 100755
index de8b7f4..67f09f2
--- a/gap/gap_story_file.c
+++ b/gap/gap_story_file.c
@@ -34,6 +34,7 @@
 
 #include <string.h>
 #include <stdlib.h>
+#include <math.h>
 
 
 #include <glib/gstdio.h>
@@ -8948,7 +8949,7 @@ gap_story_get_default_attribute(gint att_typ_idx)
  *   opacity       0.0 .. fully transparent, 1.0 fully opaque
  * )
  *
- * the results are written to the speccified result_attr structure,
+ * the results are written to the specified result_attr structure,
  * and shall be applied to the layer.
  *
  */
@@ -8971,14 +8972,14 @@ gap_story_file_calculate_render_attributes(GapStoryCalcAttr *result_attr
     , gdouble move_y
     )
 {
-  gint32 result_width;
-  gint32 result_height;
-  gint32 result_ofsx;
-  gint32 result_ofsy;
-  gint32 l_tmp_width;
-  gint32 l_tmp_height;
-  gint32 center_x_ofs;
-  gint32 center_y_ofs;
+  gdouble result_width;
+  gdouble result_height;
+  gdouble result_ofsx;
+  gdouble result_ofsy;
+  gdouble l_tmp_width;
+  gdouble l_tmp_height;
+  gdouble center_x_ofs;
+  gdouble center_y_ofs;
 
 
   if(result_attr == NULL)
@@ -9021,11 +9022,11 @@ gap_story_file_calculate_render_attributes(GapStoryCalcAttr *result_attr
     {
       if(fit_height)
       {
-         l_tmp_width = vid_height * l_frame_prop;
+         l_tmp_width = (gdouble)vid_height * l_frame_prop;
       }
       if(fit_width)
       {
-         l_tmp_height = vid_width / l_frame_prop;
+         l_tmp_height = (gdouble)vid_width / l_frame_prop;
       }
     }
 
@@ -9036,7 +9037,7 @@ gap_story_file_calculate_render_attributes(GapStoryCalcAttr *result_attr
   {
     if(fit_width)
     {
-      result_width  = MAX(1, (vid_width  * scale_x));
+      result_width  = MAX(1, ((gdouble)vid_width  * scale_x));
     }
     else
     {
@@ -9045,7 +9046,7 @@ gap_story_file_calculate_render_attributes(GapStoryCalcAttr *result_attr
 
     if(fit_height)
     {
-      result_height = MAX(1, (vid_height * scale_y));
+      result_height = MAX(1, ((gdouble)vid_height * scale_y));
     }
     else
     {
@@ -9053,23 +9054,23 @@ gap_story_file_calculate_render_attributes(GapStoryCalcAttr *result_attr
     }
   }
 
+  result_attr->opacity = CLAMP(opacity * 100.0, 0.0, 100.0);
+  result_attr->width = rint(result_width);
+  result_attr->height = rint(result_height);
 
   /* offset calculation */
   {
 
-    center_x_ofs = (vid_width/2) -  (result_width/2);
-    center_y_ofs = (vid_height/2) - (result_height/2);
+    center_x_ofs = ((gdouble)vid_width/2.0) -  (result_width/2.0);
+    center_y_ofs = ((gdouble)vid_height/2.0) - (result_height/2.0);
 
-    result_ofsx  = center_x_ofs + ((result_width / 2)  * move_x) + ((vid_width / 2)  * move_x);
-    result_ofsy  = center_y_ofs + ((result_height / 2 ) * move_y) + ((vid_height / 2 ) * move_y);
+    result_ofsx  = center_x_ofs + (((gdouble)result_attr->width / 2.0)  * move_x) + (((gdouble)vid_width / 2.0)  * move_x);
+    result_ofsy  = center_y_ofs + (((gdouble)result_attr->height / 2.0 ) * move_y) + (((gdouble)vid_height / 2.0 ) * move_y);
 
   }
 
-  result_attr->opacity = CLAMP(opacity * 100.0, 0.0, 100.0);
-  result_attr->width = result_width;
-  result_attr->height = result_height;
-  result_attr->x_offs = result_ofsx + ((view_vid_width - vid_width) / 2);
-  result_attr->y_offs = result_ofsy + ((view_vid_height - vid_height) / 2);
+  result_attr->x_offs = rint(result_ofsx + (((gdouble)view_vid_width - (gdouble)vid_width) / 2.0));
+  result_attr->y_offs = rint(result_ofsy + (((gdouble)view_vid_height - (gdouble)vid_height) / 2.0));
 
 
   /* calculate visble rectangle size after clipping */
@@ -9850,3 +9851,82 @@ gap_story_transform_rotate_layer(gint32 image_id, gint32 layer_id, gdouble rotat
 
 
 }  /* end gap_story_transform_rotate_layer */
+
+
+/* --------------------------------------------------
+ * gap_story_checkForAtLeatOneClipWithScalingDisabled
+ * --------------------------------------------------
+ * returns TRUE if at least one unscaleable clip was found in the specified storyboard.
+ *         FALSE in case all clips are scaleable.
+ * unscaleable clips are those type of clips where either with or height ist fixed to original size.
+ * Note that sections (including main mask_section and all subsections) and all video tracks are included
+ * in the search for non scalable clips.
+ */
+gboolean
+gap_story_checkForAtLeatOneClipWithScalingDisabled(GapStoryBoard *stb_ptr)
+{
+  GapStorySection   *section;
+  GapStoryElem      *stb_elem;
+
+  for(section = stb_ptr->stb_section; section != NULL; section = section->next)
+  {
+    if (section == stb_ptr->mask_section)
+    {
+      continue;
+    }
+
+    for(stb_elem = section->stb_elem; stb_elem != NULL;  stb_elem = stb_elem->next)
+    {
+      gboolean isScaleableClip;
+      
+      isScaleableClip = FALSE;
+      if(stb_elem->att_fit_width == TRUE)
+      {
+        if ((stb_elem->att_keep_proportions == TRUE)
+        ||  (stb_elem->att_fit_height == TRUE))
+        {
+          isScaleableClip = TRUE;
+        }
+      }
+      else if (stb_elem->att_fit_height == TRUE)
+      {
+        if (stb_elem->att_keep_proportions == TRUE)
+        {
+          isScaleableClip = TRUE;
+        }
+      }
+      
+      if(isScaleableClip != TRUE)
+      {
+        return(TRUE);
+      }
+    }
+
+  }
+
+  return (FALSE);
+}  /* end gap_story_checkForAtLeatOneClipWithScalingDisabled */
+
+
+/* -------------------------------------------
+ * gap_story_isMultiprocessorSupportEnabled 
+ * -------------------------------------------
+ * this procedure checks the gimprc parameters configuration
+ * for multiprocessor support.
+ */
+gboolean
+gap_story_isMultiprocessorSupportEnabled(void)
+{
+  gboolean  isMultithreadEnabled;
+  gint      numProcessors;
+  gboolean  mpDefaultFlag; 
+
+  isMultithreadEnabled = FALSE;
+  numProcessors = gap_base_get_numProcessors();
+  mpDefaultFlag = (numProcessors > 1);
+  isMultithreadEnabled = gap_base_get_gimprc_gboolean_value(GAP_GIMPRC_VIDEO_STORYBOARD_MULTIPROCESSOR_ENABLE
+                                     , mpDefaultFlag  /* default */
+                                     );
+  return(isMultithreadEnabled);
+  
+}  /* end gap_story_isMultiprocessorSupportEnabled */
diff --git a/gap/gap_story_file.h b/gap/gap_story_file.h
old mode 100644
new mode 100755
index 5551e1a..d3e4ae2
--- a/gap/gap_story_file.h
+++ b/gap/gap_story_file.h
@@ -52,6 +52,14 @@
 #define GAP_STB_MAX_FRAMENR 99999999
 
 
+
+#define GAP_GIMPRC_VIDEO_STORYBOARD_MULTIPROCESSOR_ENABLE      "video-storyboard-multiprocessor-enable"
+#define GAP_GIMPRC_VIDEO_STORYBOARD_PREVIEW_RENDER_FULL_SIZE   "video-storyboard-preview-render-full-size"
+#define GAP_GIMPRC_VIDEO_STORYBOARD_MAX_OPEN_VIDEOFILES        "video-storyboard-max-open-videofiles"
+#define GAP_GIMPRC_VIDEO_STORYBOARD_FCACHE_SIZE_PER_VIDEOFILE  "video-storyboard-fcache-size-per-videofile"
+#define GAP_GIMPRC_VIDEO_STORYBOARD_RESOURCE_LOG_INTERVAL      "video-storyboard-resource-log-interval"
+#define GAP_GIMPRC_VIDEO_ENCODER_FFMPEG_MULTIPROCESSOR_ENABLE  "video-enoder-ffmpeg-multiprocessor-enable"
+
 /* GapStoryRecordType enum values are superset of GapLibAinfoType
  * from the sourcefile gap_lib.h
  */
@@ -490,5 +498,7 @@ GapStoryVideoFileRef * gap_story_get_video_file_ref_list(GapStoryBoard *stb);
 char *                 gap_story_build_basename(const char *filename);
 
 void                   gap_story_transform_rotate_layer(gint32 image_id, gint32 layer_id, gdouble rotate);
+gboolean               gap_story_checkForAtLeatOneClipWithScalingDisabled(GapStoryBoard *stb_ptr);
+gboolean               gap_story_isMultiprocessorSupportEnabled(void);
 
 #endif
diff --git a/gap/gap_story_main.c b/gap/gap_story_main.c
old mode 100644
new mode 100755
index 82ba96a..2fd123a
--- a/gap/gap_story_main.c
+++ b/gap/gap_story_main.c
@@ -81,6 +81,14 @@ static GapStbMainGlobalParams global_params =
 , FALSE       /*  gboolean           force_stb_aspect */
 , GAP_STB_CLIPTARGET_CLIPLIST_APPEND  /* GapStoryClipTargetEnum clip_target */
 , GAP_VTHUMB_PREFETCH_NOT_ACTIVE      /* GapVThumbPrefetchProgressMode    vthumb_prefetch_in_progress */
+,12           /*  gint32             stb_max_open_videofile */
+,36           /*  gint32             stb_fcache_size_per_videofile */
+, 18          /*  gint32             ffetch_max_img_cache_elements */
+, 0           /*  gint32             stb_resource_log_linterval */
+, FALSE       /*  gboolean           stb_isMultithreadEnabled */
+, FALSE       /*  gboolean           stb_preview_render_full_size */
+, FALSE       /*  gboolean           render_prop_dlg_open */
+
 , FALSE       /*  gboolean           win_prop_dlg_open */
 , GAP_STB_EDMO_SEQUENCE_NUMBER   /*  gint32             cll_edmode */
 , 5                              /*  gint32 cll_cols  */
diff --git a/gap/gap_story_main.h b/gap/gap_story_main.h
old mode 100644
new mode 100755
index ee3a554..8f8c95f
--- a/gap/gap_story_main.h
+++ b/gap/gap_story_main.h
@@ -453,6 +453,13 @@ typedef struct GapStbMainGlobalParams  /* nickname: sgpp */
    * those values are used for LAST_VALUES runmode at startup only
    * rendering uses the values in the tabw structures
    */
+  gint32                  stb_max_open_videofile;
+  gint32                  stb_fcache_size_per_videofile;
+  gint32                  ffetch_max_img_cache_elements;
+  gint32                  stb_resource_log_linterval;
+  gboolean                stb_isMultithreadEnabled;
+  gboolean                stb_preview_render_full_size;
+  gboolean                render_prop_dlg_open;
   gboolean                win_prop_dlg_open;
   GapStoryElemDisplayMode cll_edmode;
   gint32                  cll_cols;
diff --git a/gap/gap_story_render_audio.c b/gap/gap_story_render_audio.c
index d8220bd..d8e380e 100644
--- a/gap/gap_story_render_audio.c
+++ b/gap/gap_story_render_audio.c
@@ -1189,7 +1189,8 @@ gap_story_render_audio_new_audiorange_element(GapStoryRenderAudioType  aud_type
            }
            if(gvahand)
            {
-             GVA_set_fcache_size(gvahand, GAP_STB_RENDER_GVA_FRAMES_TO_KEEP_CACHED);
+             /* set smallest fcache size 1 because fcache is not relevant for audio processing */
+             GVA_set_fcache_size(gvahand, 1);
              aud_elem->samplerate        = gvahand->samplerate;
              aud_elem->channels          = gvahand->audio_cannels;
              aud_elem->bytes_per_sample  = gvahand->audio_cannels * 2;  /* API operates with 16 bit per sample */
diff --git a/gap/gap_story_render_lossless.c b/gap/gap_story_render_lossless.c
old mode 100644
new mode 100755
index 25f07b8..9913f2d
--- a/gap/gap_story_render_lossless.c
+++ b/gap/gap_story_render_lossless.c
@@ -320,7 +320,7 @@ p_chunk_fetch_from_single_image(const char *videofile
  * p_check_chunk_fetch_possible
  * ----------------------------------------------------
  * This procedure checks the preconditions for a possible
- * fetch of already compresses frame chunk.
+ * fetch of an already compressed frame chunk.
  * (a frame chunk can be one raw frame chunk fetched from a videofile
  *  or a single image frame file that shall be loaded 1:1 into memory)
  * - there is only 1 videoinput track at this master_frame_nr
@@ -339,9 +339,7 @@ p_check_chunk_fetch_possible(GapStoryRenderVidHandle *vidhand
                     , GapStoryRenderFrameRangeElem **frn_elem  /* OUT: pointer to relevant frame range element */
                     )
 {
-  gint    l_track;
-  gint32    l_track_min;
-  gint32    l_track_max;
+  gint32    l_track;
   gchar  *l_framename;
   gchar  *l_videofile;
   gdouble l_rotate;
@@ -373,13 +371,11 @@ p_check_chunk_fetch_possible(GapStoryRenderVidHandle *vidhand
   l_videofile = NULL;
   l_cnt_active_tracks = 0;
 
-  p_find_min_max_vid_tracknumbers(vidhand->frn_list, &l_track_min, &l_track_max);
-
   /* findout if there is just one input track from type videofile
    * (that possibly could be fetched as comressed videoframe_chunk
    *  and passed 1:1 to the calling encoder)
    */
-  for(l_track = MIN(GAP_STB_MAX_VID_INTERNAL_TRACKS, l_track_max); l_track >= MAX(0, l_track_min); l_track--)
+  for(l_track = vidhand->maxVidTrack; l_track >= vidhand->minVidTrack; l_track--)
   {
     l_framename = p_fetch_framename(vidhand->frn_list
                  , master_frame_nr /* starts at 1 */
@@ -537,61 +533,6 @@ p_check_basic_chunk_fetch_conditions(gint32 check_flags
 }  /* end p_check_basic_chunk_fetch_conditions */
 
 
-/* ----------------------------------------------------
- * p_check_and_open_video_handle
- * ----------------------------------------------------
- *
- */
-static void
-p_check_and_open_video_handle(GapStoryRenderFrameRangeElem *frn_elem
-   , GapStoryRenderVidHandle *vidhand
-   , gint32 master_frame_nr
-   , const gchar *videofile
-   )
-{
-  if(frn_elem->gvahand == NULL)
-  {
-     /* before we open a new GVA videohandle, lets check
-      * if another element has already opened this videofile,
-      * and reuse the already open gvahand handle if possible
-      */
-     frn_elem->gvahand = p_try_to_steal_gvahand(vidhand
-                                                 , master_frame_nr
-                                                 , frn_elem->basename
-                                                 , frn_elem->exact_seek
-                                                 );
-     if(frn_elem->gvahand == NULL)
-     {
-       if(vidhand->preferred_decoder)
-       {
-         frn_elem->gvahand = GVA_open_read_pref(videofile
-                                , frn_elem->seltrack
-                                , 1 /* aud_track */
-                                , vidhand->preferred_decoder
-                                , FALSE  /* use MMX if available (disable_mmx == FALSE) */
-                                );
-       }
-       else
-       {
-         frn_elem->gvahand = GVA_open_read(videofile
-                                           ,frn_elem->seltrack
-                                           ,1 /* aud_track */
-                                           );
-       }
-       if(frn_elem->gvahand)
-       {
-         GVA_set_fcache_size(frn_elem->gvahand, GAP_STB_RENDER_GVA_FRAMES_TO_KEEP_CACHED);
-
-         frn_elem->gvahand->do_gimp_progress = vidhand->do_gimp_progress;
-         if(frn_elem->exact_seek == 1)
-         {
-           /* configure the GVA Procedures for exact (but slow) seek emulaion */
-           frn_elem->gvahand->emulate_seek = TRUE;
-         }
-       }
-     }
-  }
-}  /* end p_check_and_open_video_handle */
 
 
 /* ----------------------------------------------------
@@ -1102,7 +1043,7 @@ p_story_attempt_fetch_chunk(GapStoryRenderVidHandle *vidhand
 
 
 /* ----------------------------------------------------
- * gap_story_render_fetch_composite_image_or_chunk
+ * gap_story_render_fetch_composite_image_or_chunk         DEPRECATED
  * ----------------------------------------------------
  *
  * fetch composite VIDEO Image at a given master_frame_nr
@@ -1133,7 +1074,7 @@ p_story_attempt_fetch_chunk(GapStoryRenderVidHandle *vidhand
  *                                                  (typical for MPEG I frames)
  *      GAP_VID_CHCHK_FLAG_VCODEC_NAME        check for a compatible vcodec_name
  *
- +
+ *
  * RETURN TRUE on success, FALSE on ERRORS
  *    if an already compressed video_frame_chunk was fetched then return the size of the chunk
  *        in the *video_frame_chunk_size OUT Parameter.
@@ -1197,17 +1138,14 @@ gap_story_render_fetch_composite_image_or_chunk(GapStoryRenderVidHandle *vidhand
 
 #ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
 
-  if(filtermacro_file)
+  if(p_isFiltermacroActive(filtermacro_file))
   {
-     if(*filtermacro_file != '\0')
-     {
-       if(gap_debug)
-       {
-         printf("chunk fetch disabled due to filtermacro procesing\n");
-       }
-       /* if a filtermacro_file is force disable chunk fetching */
-       l_enable_chunk_fetch = FALSE;  
-     }
+    if(gap_debug)
+    {
+      printf("chunk fetch disabled due to filtermacro procesing\n");
+    }
+    /* if a filtermacro_file is force disable chunk fetching */
+    l_enable_chunk_fetch = FALSE;  
   }
 
   if (l_enable_chunk_fetch)
@@ -1292,4 +1230,236 @@ gap_story_render_fetch_composite_image_or_chunk(GapStoryRenderVidHandle *vidhand
 
   return(FALSE);
 
-} /* end gap_story_render_fetch_composite_image_or_chunk */
+} /* end gap_story_render_fetch_composite_image_or_chunk DEPRECATED */
+
+
+/* ------------------------------------------------------------------------
+ * gap_story_render_fetch_composite_image_or_buffer_or_chunk (extended API)
+ * ------------------------------------------------------------------------
+ *
+ * fetch composite VIDEO frame at a given master_frame_nr
+ * within a storyboard framerange list.
+ *
+ * on success the result can be delivered in one of those types:
+ *   GAP_STORY_FETCH_RESULT_IS_IMAGE
+ *   GAP_STORY_FETCH_RESULT_IS_RAW_RGB888
+ *   GAP_STORY_FETCH_RESULT_IS_COMPRESSED_CHUNK
+ *
+ * The delivered data type depends on the flags:
+ *   dont_recode_flag
+ *   enable_rgb888_flag
+ *
+ * In case all of those flags are FALSE, the caller can always expect
+ * a gimp image (GAP_STORY_FETCH_RESULT_IS_IMAGE) as result on success.
+ *
+ * Encoders that can handle RGB888 colormdel can set the enable_rgb888_flag
+ *
+ *   If the enable_rgb888_flag is TRUE and the refered frame can be copied
+ *   without render transitions from only one input video clip
+ *   then the render engine is bypassed, and the result will be of type 
+ *   GAP_STORY_FETCH_RESULT_IS_RAW_RGB888 for this frame.
+ *   (this speeds up encoding of simple 1:1 copied video clip frames
+ *   because the converting from rgb88 to gimp drawable and back to rgb88
+ *   can be skipped in this special case)
+ *   
+ *
+ * Encoders that support lossless video cut can set the dont_recode_flag.
+ *
+ *   if the dont_recode_flag is TRUE, the render engine is also bypassed where
+ *   a direct fetch of the (already compressed) Frame chunk from an input videofile
+ *   is possible for the master_frame_nr.
+ *   (in case there are any transitions or mix with other input channels
+ *   or in case the input is not an mpeg encoded video file it is not possible to 
+ *   make a lossless copy of the input frame data)
+ *
+ *   Restriction: current implementation provided lossless cut only for MPEG1 and MPEG2
+ *
+ *
+ * the compressed fetch depends on following conditions:
+ * - dont_recode_flag == TRUE
+ * - there is only 1 videoinput track at this master_frame_nr
+ * - the videodecoder must support a read_video_chunk procedure
+ *   (libmpeg3 has this support, for the libavformat the support is available vie the gap video api)
+ *    TODO: for future releases should also check for the same vcodec_name)
+ * - the videoframe must match 1:1 in size
+ * - there are no transformations (opacity, offsets ....)
+ * - there are no filtermacros to perform on the fetched frame
+ *
+ * check_flags:
+ *   force checks if corresponding bit value is set. Supportet Bit values are:
+ *      GAP_VID_CHCHK_FLAG_SIZE               check if width and height are equal
+ *      GAP_VID_CHCHK_FLAG_MPEG_INTEGRITY     checks for MPEG P an B frames if the sequence of fetched frames
+ *                                                   also includes the refered I frame (before or after the current
+ *                                                   handled frame)
+ *      GAP_VID_CHCHK_FLAG_JPG                check if fetched cunk is a jpeg encoded frame.
+ *                                                  (typical for MPEG I frames)
+ *      GAP_VID_CHCHK_FLAG_VCODEC_NAME        check for a compatible vcodec_name
+ *
+ *
+ * The resulting frame is deliverd into the GapStoryFetchResult struct.
+ *
+ *   Note that the caller of the fetch procedure can already provide
+ *   allocated memory for the buffers  raw_rgb_data and video_frame_chunk_data.
+ *   (in this case the caler is responsible to allocate the buffers large enough
+ *   to hold one uncompressed frame in rgb888 colormodel representation)
+ *
+ *   in case raw_rgb_data or video_frame_chunk_data is NULL the buffer is automatically
+ *   allocated in correct size when needed.
+ */
+void
+gap_story_render_fetch_composite_image_or_buffer_or_chunk(GapStoryRenderVidHandle *vidhand
+                    , gint32 master_frame_nr  /* starts at 1 */
+                    , gint32  vid_width       /* desired Video Width in pixels */
+                    , gint32  vid_height      /* desired Video Height in pixels */
+                    , char *filtermacro_file  /* NULL if no filtermacro is used */
+                    , gboolean dont_recode_flag                /* IN: TRUE try to fetch comressed chunk if possible */
+                    , gboolean enable_rgb888_flag              /* IN: TRUE deliver result already converted to rgb buffer */
+                    , GapCodecNameElem *vcodec_list            /* IN: list of video_codec names that are compatible to the calling encoder program */
+                    , gint32 video_frame_chunk_maxsize         /* IN: sizelimit (larger chunks are not fetched) */
+                    , gdouble master_framerate
+                    , gint32  max_master_frame_nr              /* the number of frames that will be encoded in total */
+                    , gint32  check_flags                      /* IN: combination of GAP_VID_CHCHK_FLAG_* flag values */
+                    , GapStoryFetchResult *gapStoryFetchResult
+                 )
+{
+#define GAP_MPEG_ASSUMED_REFERENCE_DISTANCE 3
+  static char      *last_videofile = NULL;
+  static gboolean   last_fetch_was_compressed_chunk = FALSE;
+
+  gchar  *l_videofile;
+  GapStoryRenderFrameRangeElem *l_frn_elem;
+
+  gboolean      l_enable_chunk_fetch;
+
+  /* init result record */
+  gapStoryFetchResult->resultEnum = GAP_STORY_FETCH_RESULT_IS_ERROR;
+  gapStoryFetchResult->image_id                   = -1;
+  gapStoryFetchResult->layer_id                   = -1;
+  gapStoryFetchResult->force_keyframe             = FALSE;
+  gapStoryFetchResult->video_frame_chunk_size     = 0;
+  gapStoryFetchResult->video_frame_chunk_hdr_size = 0;     /* assume chunk contains no frame header */
+  
+  l_frn_elem        = NULL;
+  l_enable_chunk_fetch = dont_recode_flag;
+  
+  if ((gapStoryFetchResult->video_frame_chunk_data == NULL)
+  && (l_enable_chunk_fetch == TRUE))
+  {
+    gapStoryFetchResult->video_frame_chunk_data = g_malloc(vid_width * vid_height * 4);
+  }
+
+  if(gap_debug)
+  {
+    printf("gap_story_render_fetch_composite_image_or_buffer_or_chunk START  master_frame_nr:%d  %dx%d dont_recode:%d\n"
+                       , (int)master_frame_nr
+                       , (int)vid_width
+                       , (int)vid_height
+                       , (int)dont_recode_flag
+                       );
+  }
+
+  l_videofile = NULL;     /* NULL: also used as flag for "MUST fetch regular uncompressed frame" */
+
+
+#ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
+
+  if(filtermacro_file)
+  {
+     if(*filtermacro_file != '\0')
+     {
+       if(gap_debug)
+       {
+         printf("chunk fetch disabled due to filtermacro procesing\n");
+       }
+       /* if a filtermacro_file is force disable chunk fetching */
+       l_enable_chunk_fetch = FALSE;  
+     }
+  }
+
+  if (l_enable_chunk_fetch)
+  {
+     if(gap_debug)
+     {
+        printf("start check if chunk fetch is possible\n");
+     }
+
+     l_videofile = p_story_attempt_fetch_chunk(vidhand
+                         , master_frame_nr
+                         , vid_width
+                         , vid_height
+                         , vcodec_list
+                         , &gapStoryFetchResult->video_frame_chunk_data
+                         , &gapStoryFetchResult->video_frame_chunk_size
+                         , video_frame_chunk_maxsize
+                         , master_framerate
+                         , max_master_frame_nr
+                         , &gapStoryFetchResult->video_frame_chunk_hdr_size
+                         , check_flags
+     
+                         , &last_fetch_was_compressed_chunk
+                         , last_videofile
+                      );
+  }
+
+  if(last_fetch_was_compressed_chunk)
+  {
+    gapStoryFetchResult->force_keyframe = TRUE;
+  }
+
+  /* keep the videofile name for the next call
+   * (for MPEG INTEGRITY checks that require continous sequence 
+   *  in the same referenced source video
+   */
+  if(last_videofile)
+  {
+      g_free(last_videofile);
+  }
+  last_videofile = l_videofile;
+
+#endif
+
+  if(l_videofile != NULL)
+  {
+     /* chunk fetch was successful */
+     if(gap_debug)
+     {
+        printf("gap_story_render_fetch_composite_image_or_buffer_or_chunk:  CHUNK fetch succsessful\n");
+     }
+     gapStoryFetchResult->resultEnum = GAP_STORY_FETCH_RESULT_IS_COMPRESSED_CHUNK;
+     return;
+  }
+  else
+  {
+    last_fetch_was_compressed_chunk = FALSE;
+    if(last_videofile)
+    {
+      g_free(last_videofile);
+    }
+    last_videofile = l_videofile;
+
+
+
+    gapStoryFetchResult->video_frame_chunk_size = 0;
+ 
+    if(gap_debug)
+    {
+      printf("gap_story_render_fetch_composite_image_or_buffer_or_chunk:  "
+             "CHUNK fetch not possible (doing frame or rgb888 fetch instead) enable_rgb888_flag:%d\n"
+             ,(int)enable_rgb888_flag
+             );
+    }
+      
+    p_story_render_fetch_composite_image_or_buffer(vidhand
+                                                   ,master_frame_nr
+                                                   ,vid_width
+                                                   ,vid_height
+                                                   ,filtermacro_file
+                                                   ,&gapStoryFetchResult->layer_id
+                                                   ,enable_rgb888_flag
+                                                   ,gapStoryFetchResult  /* IN/OUT: fetched data is stored in this struct */
+                                                   );
+  }
+
+
+} /* end gap_story_render_fetch_composite_image_or_buffer_or_chunk */
+
diff --git a/gap/gap_story_render_processor.c b/gap/gap_story_render_processor.c
old mode 100644
new mode 100755
index c7f9989..1de8d0c
--- a/gap/gap_story_render_processor.c
+++ b/gap/gap_story_render_processor.c
@@ -80,6 +80,7 @@
 #include "gap_frame_fetcher.h"
 #include "gap_accel_char.h"
 
+
 /* data for the storyboard proceesor frame fetching
  */
 typedef struct GapStbFetchData {   /* nick: gfd */
@@ -105,22 +106,89 @@ typedef struct GapStbFetchData {   /* nick: gfd */
   GapStoryRenderFrameType   frn_type;
   char            *trak_filtermacro_file;
 
+  /* performance stuff for bypass render engine (where possible) */
+  GapStoryFetchResult *gapStoryFetchResult;
+  gboolean             isRgb888Result;        /* TRUE use rgb888 buffer to bypass convert to gimp drawable */
+
 }  GapStbFetchData;
 
+
+/* data for video frame prefetch using multiple threads 
+ * (used optional depending on gimprc configuration) 
+ */
+typedef struct VideoPrefetchData   /* vpre */
+{
+  t_GVA_Handle        *gvahand;
+  gint32               prefetchFrameNumber;
+  gint32               targetFrameNumber;
+  gboolean             isPlayingBackwards;
+  gboolean             isPrefetchThreadRunning;
+  GMutex              *mutex;
+  GCond               *targetFrameReadyCond;
+  GCond               *prefetchDoneCond;
+
+} VideoPrefetchData;
+
+
+/* Workaround:
+ * calls to g_mutex_free (mutex) often (but not each time) FAIL when closing videohandle.
+ * even after checks that no prefetch thread is still using the mutex.
+ *
+ * To aviod those type of CRASH a workaround was implemented that puts unused mutex
+ * in a mutex pool for reuse instead of removing the mutex,
+ * to aviod the call to g_mutex_free and to avoid allocating too many mutex resources.
+ * 
+ *
+ * GThread-ERROR **: file gthread-posix.c: line 171 (g_mutex_free_posix_impl):
+ *   error 'Das Gerät oder die Ressource ist belegt' during 'pthread_mutex_destroy ((pthread_mutex_t *) mutex)'
+ */
+typedef struct StbMutexPool   /* mutxp */
+{
+  GMutex      *mutex;
+  gboolean     isFree;
+  void        *next;
+
+} StbMutexPool;
+
+
+
 /*************************************************************
  *         STORYBOARD FUNCTIONS                              *
  *************************************************************
  */
 
 #define MAX_IMG_CACHE_ELEMENTS 6
+#define MULTITHREAD_PREFETCH_AMOUNT 3
+/* SMALL_FCACHE_SIZE_AT_FORWARD_READ is set 2 frames greater than MULTITHREAD_PREFETCH_AMOUNT
+ * With smaller fcache the prefetched frames are often overwritten by the prefetch thread
+ * before the main thread can read them from the fcache. This triggers many unwanted seek operations
+ * with significant loss of performance when multithreaded processing is activated.
+ *  
+ */
+#define SMALL_FCACHE_SIZE_AT_FORWARD_READ  (MULTITHREAD_PREFETCH_AMOUNT + 2)
 
-extern int gap_debug;  /* 1 == print debug infos , 0 dont print debug infos */
 
+#define GVAHAND_HOLDER_RANK_MIN_LEVEL        0
+#define GVAHAND_HOLDER_RANK_NO_BENEFIT_LEVEL 1
+#define GVAHAND_HOLDER_RANK_2                2
+#define GVAHAND_HOLDER_RANK_3                3
+#define GVAHAND_HOLDER_RANK_4                4
+#define GVAHAND_HOLDER_RANK_MAX_LEVEL        5
 
 
+extern int gap_debug;  /* 1 == print debug infos , 0 dont print debug infos */
+
+static GThreadPool         *prefetchThreadPool = NULL;
+static StbMutexPool        *mutexPool = NULL;
+
 static gint32 global_monitor_image_id = -1;
 static gint32 global_monitor_display_id = -1;
 
+
+static GMutex * p_pooled_g_mutex_new();
+static void     p_pooled_g_mutex_free(GMutex *mutex);
+
+
 static void     p_debug_print_render_section_names(GapStoryRenderVidHandle *vidhand);
 static void     p_frame_backup_save(  char *key
                   , gint32 image_id
@@ -138,10 +206,7 @@ static GapStoryRenderErrors * p_new_stb_error(void);
 static void     p_init_stb_error(GapStoryRenderErrors *sterr);
 static void     p_free_stb_error(GapStoryRenderErrors *sterr);
 static void     p_set_stb_error(GapStoryRenderErrors *sterr, char *errtext);
-static void     p_find_min_max_vid_tracknumbers(GapStoryRenderFrameRangeElem *frn_list
-                             , gint32 *lowest_tracknr
-                             , gint32 *highest_tracknr
-                             );
+static void     p_refresh_min_max_vid_tracknumbers(GapStoryRenderVidHandle *vidhand);
 
 static gdouble  p_attribute__at_step(gint32 frame_step /* current frame (since start of current processed clip */
                  ,gdouble from_val
@@ -345,13 +410,16 @@ static gint32     p_prepare_RGB_image(gint32 image_id);
 static void       p_limit_open_videohandles(GapStoryRenderVidHandle *vidhand
                       , gint32 master_frame_nr
                       , gint32 currently_open_videohandles
+                      , gint32 l_max_open_videohandles
                       );
 
+static gint32     p_calculateGvahandHolderRank(GapStoryRenderFrameRangeElem *frn_elem
+                            , gint32 videoFrameNrToBeReadNext
+                            , gint32 track);
 
 static t_GVA_Handle * p_try_to_steal_gvahand(GapStoryRenderVidHandle *vidhand
                       , gint32 master_frame_nr
-                      , char *basename             /* the videofile name */
-                      , gint32 exact_seek
+                      , GapStoryRenderFrameRangeElem *requesting_frn_elem
                       );
 static void       p_split_delace_value(gdouble delace
                       , gdouble localframe_tween_rest
@@ -362,6 +430,37 @@ static void       p_conditional_delace_drawable(GapStbFetchData *gfd, gint32 dra
 static void       p_stb_render_image_or_animimage(GapStbFetchData *gfd
                       , GapStoryRenderVidHandle *vidhand
                       , gint32 master_frame_nr);
+static gboolean   p_is_another_clip_playing_the_same_video_backwards(GapStoryRenderFrameRangeElem *frn_elem_ref);
+static void       p_check_and_open_video_handle(GapStoryRenderFrameRangeElem *frn_elem
+                      , GapStoryRenderVidHandle *vidhand
+                      , gint32 master_frame_nr
+                      , const gchar *videofile
+                      );
+
+
+#define NEAR_FRAME_DISTANCE 36
+
+static void    p_initOptionalMulitprocessorSupport(GapStoryRenderVidHandle *vidhand);
+
+#ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
+static void    p_call_GVA_close(t_GVA_Handle *gvahand);
+static void    p_get_gapStoryFetchResult_from_fcacheFetchResult(GapStbFetchData *gfd, GVA_fcache_fetch_result *fcacheFetchResult);
+static void    p_stb_render_movie_single_processor(GapStbFetchData *gfd
+                      , GapStoryRenderVidHandle *vidhand
+                      , gint32 master_frame_nr
+                      , gint32  vid_width
+                      , gint32  vid_height);
+static void    p_call_GVA_get_next_frame_andSendReadySignal(VideoPrefetchData *vpre, gint32 targetFrameNumber);
+static void    p_videoPrefetchWorkerThreadFunction (VideoPrefetchData *vpre);
+static gint32  p_getPredictedNextFramenr(gint32 targetFrameNr, GapStoryRenderFrameRangeElem *frn_elem);
+static void    p_stb_render_movie_multiprocessor(GapStbFetchData *gfd
+                      , GapStoryRenderVidHandle *vidhand
+                      , gint32 master_frame_nr
+                      , gint32  vid_width
+                      , gint32  vid_height);
+
+#endif
+
 static void       p_stb_render_movie(GapStbFetchData *gfd
                       , GapStoryRenderVidHandle *vidhand
                       , gint32 master_frame_nr
@@ -388,22 +487,108 @@ static void       p_paste_logo_pattern(gint32 drawable_id
                       , gint32 offsetY
                       );
 
+static char*      p_get_insert_area_filename(GapStbFetchData *gfd
+                      , GapStoryRenderVidHandle *vidhand);
 static void       p_do_insert_area_processing(GapStbFetchData *gfd
                       , GapStoryRenderVidHandle *vidhand);
 static gint32     p_prepare_GRAY_image(gint32 image_id);
+static char*      p_get_insert_alpha_filename(GapStbFetchData *gfd
+                      , GapStoryRenderVidHandle *vidhand);
 static void       p_do_insert_alpha_processing(GapStbFetchData *gfd
                       , GapStoryRenderVidHandle *vidhand);
 
-
-static gint32     p_story_render_fetch_composite_image_private(GapStoryRenderVidHandle *vidhand
+static gboolean  p_isFiltermacroActive(const char *filtermacro_file);
+static gboolean  p_story_render_bypass_where_possible(GapStoryRenderVidHandle *vidhand
                     , gint32 master_frame_nr  /* starts at 1 */
                     , gint32  vid_width       /* desired Video Width in pixels */
                     , gint32  vid_height      /* desired Video Height in pixels */
-                    , char *filtermacro_file  /* NULL if no filtermacro is used */
-                    , gint32 *layer_id        /* output: Id of the only layer in the composite image */
-                    , const char *section_name  /* NULL for main section */
-                   );
+                    , gboolean enable_rgb888_flag  /* enable fetch as rgb888 data buffer */
+                    , GapStoryFetchResult      *gapStoryFetchResult
+                    );
+
+static gint32    p_story_render_fetch_composite_image_private(GapStoryRenderVidHandle *vidhand
+                    , gint32 master_frame_nr       /* starts at 1 */
+                    , gint32  vid_width            /* desired Video Width in pixels */
+                    , gint32  vid_height           /* desired Video Height in pixels */
+                    , char *filtermacro_file       /* NULL if no filtermacro is used */
+                    , gint32 *layer_id             /* output: Id of the only layer in the composite image */
+                    , const char *section_name     /* NULL for main section */
+                    , gboolean enable_rgb888_flag  /* enable fetch as rgb888 data buffer */
+                    , GapStoryFetchResult      *gapStoryFetchResult
+                    );
+
+
+/* --------------------------
+ * p_pooled_g_mutex_new
+ * --------------------------
+ */
+static GMutex *
+p_pooled_g_mutex_new()
+{
+  GMutex       *mutex;
+  StbMutexPool *mutexp;
+  
+  for(mutexp = mutexPool; mutexp != NULL; mutexp=mutexp->next)
+  {
+    if(mutexp->isFree == TRUE)
+    {
+      mutexp->isFree = FALSE;
+      if(gap_debug)
+      {
+        printf("p_pooled_g_mutex_new: recycle mutex:%d\n"
+          ,(int)mutexp->mutex
+          );
+      }
+      return(mutexp->mutex);
+    }
+  }
+  
+  mutexp  = g_new(StbMutexPool, 1);
+  mutexp->isFree = FALSE;
+  mutexp->mutex = g_mutex_new();
+  mutexp->next = mutexPool;
+  mutexPool = mutexp;
 
+  if(gap_debug)
+  {
+    printf("p_pooled_g_mutex_new: allocated new mutex:%d\n"
+       ,(int)mutexp->mutex
+       );
+  }
+
+  return(mutexp->mutex);
+  
+}  /* end p_pooled_g_mutex_new */
+
+/* --------------------------
+ * p_pooled_g_mutex_free
+ * --------------------------
+ */
+static void
+p_pooled_g_mutex_free(GMutex       *mutex)
+{
+  StbMutexPool *mutexp;
+  
+  for(mutexp = mutexPool; mutexp != NULL; mutexp=mutexp->next)
+  {
+    if(mutexp->mutex == mutex)
+    {
+      mutexp->isFree = TRUE;
+      if(gap_debug)
+      {
+        printf("p_pooled_g_mutex_free: disposed mutex:%d for later reuse\n"
+           ,(int)mutexp->mutex
+           );
+      }
+      return;
+    }
+  }
+
+  printf("p_pooled_g_mutex_free: ** ERROR mutex:%d not found in pool\n"
+           ,(int)mutex
+           );
+
+} /* end p_pooled_g_mutex_free */
 
 /* ----------------------------------------------------
  * gap_story_render_debug_print_maskdef_elem
@@ -935,38 +1120,65 @@ gap_story_render_set_stb_warning(GapStoryRenderErrors *sterr, char *warntext)
 
 
 /* ----------------------------------------------------
- * p_find_min_max_vid_tracknumbers
+ * p_refresh_min_max_vid_tracknumbers
  * ----------------------------------------------------
  * findout the lowest and highest track number used
- * in the framerange list
+ * in the current framerange list in the specified storyboard videohandle.
+ *
+ * (and constraint the values to limits e.g. array boundaries)
+ *
+ * This procedure is typicall called each time the frn_list
+ * of a storyboard videohandle (GapStoryRenderVidHandle *vidhand)
+ * was changed (after creation and after switch to another section)
  */
 static void
-p_find_min_max_vid_tracknumbers(GapStoryRenderFrameRangeElem *frn_list
-                             , gint32 *lowest_tracknr
-                             , gint32 *highest_tracknr
-                             )
+p_refresh_min_max_vid_tracknumbers(GapStoryRenderVidHandle *vidhand)
 {
   GapStoryRenderFrameRangeElem *frn_elem;
 
-  *lowest_tracknr = GAP_STB_MAX_VID_INTERNAL_TRACKS;
-  *highest_tracknr = -1;
+  gint32 l_lowest_tracknr;
+  gint32 l_highest_tracknr;
 
-  for(frn_elem = frn_list; frn_elem != NULL; frn_elem = (GapStoryRenderFrameRangeElem *)frn_elem->next)
+  if(vidhand->frn_list == NULL)
   {
-    if (frn_elem->track > *highest_tracknr)
-    {
-      *highest_tracknr = frn_elem->track;
-    }
-    if (frn_elem->track < *lowest_tracknr)
+    /* if there is no frn_list (may occure in case the STORYBOARD has unknow sections)
+     * set both min(max to 0
+     */
+    l_lowest_tracknr = 0;
+    l_highest_tracknr = 0;
+  }
+  else
+  {
+    l_lowest_tracknr = GAP_STB_MAX_VID_INTERNAL_TRACKS;
+    l_highest_tracknr = -1;
+
+    for(frn_elem = vidhand->frn_list; frn_elem != NULL; frn_elem = (GapStoryRenderFrameRangeElem *)frn_elem->next)
     {
-      *lowest_tracknr = frn_elem->track;
-    }
+      if (frn_elem->track > l_highest_tracknr)
+      {
+        l_highest_tracknr = frn_elem->track;
+      }
+      if (frn_elem->track < l_lowest_tracknr)
+      {
+        l_lowest_tracknr = frn_elem->track;
+      }
 
+    }
   }
 
-  if(gap_debug) printf("p_find_min_max_vid_tracknumbers: min:%d max:%d\n", (int)*lowest_tracknr, (int)*highest_tracknr);
+  vidhand->minVidTrack  = CLAMP(l_lowest_tracknr, 0, GAP_STB_MAX_VID_INTERNAL_TRACKS);
+  vidhand->maxVidTrack = CLAMP(l_highest_tracknr, 0, GAP_STB_MAX_VID_INTERNAL_TRACKS);
 
-}  /* end p_find_min_max_vid_tracknumbers */
+
+  if(gap_debug)
+  {
+    printf("p_refresh_min_max_vid_tracknumbers: min:%d max:%d\n"
+       , (int)vidhand->minVidTrack
+       , (int)vidhand->maxVidTrack
+       );
+  }
+
+}  /* end p_refresh_min_max_vid_tracknumbers */
 
 
 /* --------------------------------
@@ -1062,14 +1274,21 @@ p_select_section_by_name(GapStoryRenderVidHandle *vidhand, const char *section_n
 
   if (section != NULL)
   {
-    vidhand->frn_list = section->frn_list;
     vidhand->aud_list = section->aud_list;
+    if(vidhand->frn_list != section->frn_list)
+    {
+      vidhand->frn_list = section->frn_list;
+      p_refresh_min_max_vid_tracknumbers(vidhand);
+    }
   }
   else
   {
     vidhand->frn_list = NULL;
     vidhand->aud_list = NULL;
+    p_refresh_min_max_vid_tracknumbers(vidhand);
   }
+
+
   if(gap_debug)
   {
     printf("p_select_section_by_name: addr of section: %d Resulting addr of frn_list: %d)\n"
@@ -2378,6 +2597,7 @@ p_storyboard_analyze(GapStoryBoard *stb
     vidhand->preferred_decoder = g_strdup(stb->preferred_decoder);
   }
 
+  /* stuff for automatically insert of alpha channel for clip type MOVIE */
   vidhand->master_insert_alpha_format = NULL;
   if(stb->master_insert_alpha_format)
   {
@@ -2388,6 +2608,7 @@ p_storyboard_analyze(GapStoryBoard *stb
       p_fmt_string_has_videobasename_format(vidhand->master_insert_alpha_format);
   }
 
+  /* stuff for automatically logo inserts */
   vidhand->master_insert_area_format = NULL;
   if(stb->master_insert_area_format)
   {
@@ -3092,7 +3313,7 @@ p_free_framerange_list(GapStoryRenderFrameRangeElem * frn_list)
     if(frn_elem->filtermacro_file)  { g_free(frn_elem->filtermacro_file);}
 
 #ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
-    if(frn_elem->gvahand)           { GVA_close(frn_elem->gvahand);}
+    if(frn_elem->gvahand)           { p_call_GVA_close(frn_elem->gvahand);}
 #endif
 
     frn_next = (GapStoryRenderFrameRangeElem *)frn_elem->next;
@@ -3487,6 +3708,8 @@ p_mask_fetcher(GapStoryRenderVidHandle *vidhand
                   , NULL             /* each mask has its own mask_vidhand where
                                       * the elements are in main section (section_name = NULL)
                                       */
+                  , FALSE            /* enable_rgb888_flag */
+                  , NULL             /* GapStoryFetchResult */
                  );
 
     if(gap_debug)
@@ -3738,6 +3961,18 @@ p_open_video_handle_private(    gboolean ignore_audio
   /* registrate as user of the frame fetcher resources (e.g. the image cache) */
   vidhand->ffetch_user_id = gap_frame_fetch_register_user("gap_story_render_processor.p_open_video_handle_private");
 
+  vidhand->isLogResourceUsage = FALSE;
+  vidhand->resourceLogInterval = gap_base_get_gimprc_int_value(GAP_GIMPRC_VIDEO_STORYBOARD_RESOURCE_LOG_INTERVAL
+                                                     , 0  /* default value 0 turns off resource logging */
+                                                     , 0
+                                                     , 100000
+                                                     );
+  if(vidhand->resourceLogInterval > 0)
+  {
+    vidhand->isLogResourceUsage = TRUE;
+  }
+  p_initOptionalMulitprocessorSupport(vidhand);
+  
   vidhand->frn_list = NULL;
   vidhand->preferred_decoder = NULL;
   vidhand->master_insert_alpha_format = NULL;
@@ -3811,7 +4046,8 @@ p_open_video_handle_private(    gboolean ignore_audio
   && (input_mode == GAP_RNGTYPE_LAYER)
   && (imagename))
   {
-      gint32 l_from;
+      gint32 l_from;      render_section->frn_list = frn_elem;
+
       gint32 l_to;
 
       l_from = frame_from;
@@ -4079,6 +4315,9 @@ p_open_video_handle_private(    gboolean ignore_audio
 
   /* p_free_stb_error(vidhand->sterr); */
 
+  p_refresh_min_max_vid_tracknumbers(vidhand);
+
+
   if(gap_debug)
   {
     printf("\n\np_open_video_handle_private: END vidhand:%d\n\n", (int)vidhand);
@@ -4281,7 +4520,10 @@ p_exec_filtermacro(gint32 image_id, gint32 layer_id, const char *filtermacro_fil
   gint32 l_rc_layer_id;
   gint          l_nlayers;
   gint32       *l_layers_list;
-
+  static gint32 funcId = -1;
+  
+  GAP_TIMM_GET_FUNCTION_ID(funcId, "p_exec_filtermacro");
+  GAP_TIMM_START_FUNCTION(funcId);
 
   l_rc_layer_id = layer_id;
   if (filtermacro_file)
@@ -4366,6 +4608,8 @@ p_exec_filtermacro(gint32 image_id, gint32 layer_id, const char *filtermacro_fil
 
     }
   }
+
+  GAP_TIMM_STOP_FUNCTION(funcId);
   return(l_rc_layer_id);
 } /* end p_exec_filtermacro */
 
@@ -4476,6 +4720,23 @@ p_transform_and_add_layer( gint32 comp_image_id
   GapStoryCalcAttr  calculate_attributes;
   GapStoryCalcAttr  *calculated;
 
+  static gint32 funcId = -1;
+  static gint32 funcIdFull = -1;
+  static gint32 funcIdScale = -1;
+  static gint32 funcIdRotate = -1;
+  static gint32 funcIdClipped = -1;
+  static gint32 funcIdClipScale = -1;
+  
+  GAP_TIMM_GET_FUNCTION_ID(funcId,          "p_transform_and_add_layer");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdFull,      "p_transform_and_add_layer.Fullsize");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdScale,     "p_transform_and_add_layer.ScaleFullsize");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdRotate,    "p_transform_and_add_layer.RotateFullsize");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdClipped,   "p_transform_and_add_layer.Clippedsize");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdClipScale, "p_transform_and_add_layer.ScaleClippedsize");
+  
+  GAP_TIMM_START_FUNCTION(funcId);
+
+
   if(gap_debug)
   {
     printf("p_transform_and_add_layer: called at layer_id: %d, tmp_image_id:%d comp_image_id:%d\n"
@@ -4551,6 +4812,8 @@ p_transform_and_add_layer( gint32 comp_image_id
 
   if(TRUE == p_transform_operate_on_full_layer(calculated, comp_image_id, tmp_image_id, frn_elem))
   {
+    GAP_TIMM_START_FUNCTION(funcIdFull);
+
     /* operate on layer in full calculated size */
     if(gap_debug)
     {
@@ -4571,10 +4834,11 @@ p_transform_and_add_layer( gint32 comp_image_id
                             );
 
       }
+      GAP_TIMM_START_FUNCTION(funcIdScale);
       gimp_layer_scale(layer_id, calculated->width, calculated->height
                       , FALSE  /* FALSE: centered at image TRUE: centered local on layer */
                       );
-
+      GAP_TIMM_STOP_FUNCTION(funcIdScale);
     }
 
 
@@ -4605,8 +4869,13 @@ p_transform_and_add_layer( gint32 comp_image_id
       l_orig_width  = gimp_drawable_width(layer_id);
       l_orig_height  = gimp_drawable_height(layer_id);
 
+      GAP_TIMM_START_FUNCTION(funcIdRotate);
+
       gap_story_transform_rotate_layer(tmp_image_id, layer_id, rotate);
 
+
+      GAP_TIMM_STOP_FUNCTION(funcIdRotate);
+
       /* recalculate offests to compensate size changes caused by rotation */
       calculated->x_offs = calculated->x_offs + (l_orig_width / 2.0) - (gimp_drawable_width(layer_id) / 2.0);
       calculated->y_offs = calculated->y_offs + (l_orig_height / 2.0) - (gimp_drawable_height(layer_id) / 2.0);
@@ -4625,9 +4894,13 @@ p_transform_and_add_layer( gint32 comp_image_id
                         );
 
     gimp_floating_sel_anchor(l_fsel_layer_id);
+
+    GAP_TIMM_STOP_FUNCTION(funcIdFull);
   }
   else
   {
+    GAP_TIMM_START_FUNCTION(funcIdClipped);
+
     /* operate on clipped rectangle size (rotation not handled in this case) */
     if(gap_debug)
     {
@@ -4712,9 +4985,13 @@ p_transform_and_add_layer( gint32 comp_image_id
     if ((gimp_drawable_width(l_fsel_layer_id) != calculated->visible_width)
     ||  (gimp_drawable_height(l_fsel_layer_id) != calculated->visible_height) )
     {
+      GAP_TIMM_START_FUNCTION(funcIdClipScale);
+
       gimp_layer_scale(l_fsel_layer_id, calculated->visible_width, calculated->visible_height
                       , FALSE  /* FALSE: centered at image TRUE: centered local on layer */
                       );
+
+      GAP_TIMM_STOP_FUNCTION(funcIdClipScale);
     }
 
     /* move floating selection according to target offsets
@@ -4729,6 +5006,8 @@ p_transform_and_add_layer( gint32 comp_image_id
     gimp_floating_sel_anchor(l_fsel_layer_id);
 
 
+    GAP_TIMM_STOP_FUNCTION(funcIdClipped);
+
   }
 
 
@@ -4754,6 +5033,8 @@ p_transform_and_add_layer( gint32 comp_image_id
 
   gimp_layer_set_opacity(l_new_layer_id, calculated->opacity);
 
+  GAP_TIMM_STOP_FUNCTION(funcId);
+
   return(l_new_layer_id);
 }   /* end p_transform_and_add_layer */
 
@@ -4861,71 +5142,153 @@ p_prepare_RGB_image(gint32 image_id)
  * different videofile references and would run into memory and other resource problems
  * when all handles are kept open until the end of rendering process.
  * (note that each video handle has its own frame cache)
+ *
+ * TODO check for videohandles in the mask section
  */
 static void
 p_limit_open_videohandles(GapStoryRenderVidHandle *vidhand
                       , gint32 master_frame_nr
                       , gint32 currently_open_videohandles
+                      , gint32 max_open_videohandles
                       )
 {
 #ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
 #define GAP_STB_DEFAULT_MAX_OPEN_VIDEOFILES 12
+  GapStoryRenderSection *section;
   GapStoryRenderFrameRangeElem *frn_elem;
   gint32 l_count_open_videohandles;
-  gint32 l_max_open_videohandles;
 
-
-  l_max_open_videohandles = gap_base_get_gimprc_int_value("video-storyboard-max-open-videofiles"
-                                                        , GAP_STB_DEFAULT_MAX_OPEN_VIDEOFILES
-                                                        , 2
-                                                        , 100
-                                                        );
   l_count_open_videohandles = currently_open_videohandles;
 
-  if (l_count_open_videohandles <= l_max_open_videohandles)
+  if (l_count_open_videohandles < max_open_videohandles)
   {
     /* we are below the limit, nothing left to do in that case */
     return;
   }
 
-  for (frn_elem = vidhand->frn_list; frn_elem != NULL; frn_elem = (GapStoryRenderFrameRangeElem *)frn_elem->next)
+  for(section = vidhand->section_list; section != NULL; section = section->next)
   {
-    if((frn_elem->last_master_frame_access < master_frame_nr)
-    && (frn_elem->gvahand != NULL))
+    for (frn_elem = section->frn_list; frn_elem != NULL; frn_elem = (GapStoryRenderFrameRangeElem *)frn_elem->next)
     {
-       if(gap_debug)
-       {
-         printf("too many open videofiles %d detected (limit:%d) at master_frame_nr:%d\n"
-                " CLOSING GVA handle for video read access %s\n"
-            , (int)l_count_open_videohandles
-            , (int)l_max_open_videohandles
-            , (int)master_frame_nr
-            , frn_elem->basename
-            );
-       }
-       GVA_close(frn_elem->gvahand);
-       frn_elem->gvahand = NULL;
-       l_count_open_videohandles--;
-
-       if (l_count_open_videohandles <= l_max_open_videohandles)
-       {
-         return;
-       }
-
+      if((frn_elem->last_master_frame_access < master_frame_nr)
+      && (frn_elem->gvahand != NULL))
+      {
+         if(gap_debug)
+         {
+           printf("too many open videofiles %d detected (limit:%d) at master_frame_nr:%d\n"
+                  " CLOSING GVA handle for video read access %s\n"
+              , (int)l_count_open_videohandles
+              , (int)max_open_videohandles
+              , (int)master_frame_nr
+              , frn_elem->basename
+              );
+         }
+         p_call_GVA_close(frn_elem->gvahand);
+         frn_elem->gvahand = NULL;
+         l_count_open_videohandles--;
+  
+         if (l_count_open_videohandles < max_open_videohandles)
+         {
+           return;
+         }
+  
+      }
     }
   }
+  
+  /* at this point there are still too many GVA video handles open
+   * (this may occure if videos are used as masks, therefore try to close
+   * mask video handles too.)
+   */
+  if(vidhand->is_mask_handle != TRUE)
+  {
+    GapStoryRenderMaskDefElem *maskdef_elem;
+    
+    maskdef_elem = vidhand->maskdef_elem;
+    if(maskdef_elem != NULL)
+    {
+      if(maskdef_elem->mask_vidhand != NULL)
+      {
+        p_limit_open_videohandles(maskdef_elem->mask_vidhand
+                                 , master_frame_nr
+                                 , l_count_open_videohandles
+                                 , max_open_videohandles
+                                 );
+      }
+    }
+  }
+
 #endif
   return;
 
 } /* end p_limit_open_videohandles */
 
 
+/* -----------------------------------------
+ * p_calculateGvahandHolderRank
+ * -----------------------------------------
+ * calculate rank for potential  reusage of an already open gva video handle
+ *
+ * return rank
+ *    5 .. videoFrameNrToBeReadNext is available in fcache and curent position >= videoFrameNrToBeReadNext same track
+ *    4 .. videoFrameNrToBeReadNext is available in fcache and curent position >= videoFrameNrToBeReadNext) 
+ *    3 .. videoFrameNrToBeReadNext is reachable with a few sequential read operaton same track
+ *    2 .. videoFrameNrToBeReadNext is reachable with a few sequential read operaton)
+ *    1 .. videoFrameNrToBeReadNext requires forward seek operaton
+ *    0 .. videoFrameNrToBeReadNext requires backward seek operaton
+ */
+static gint32
+p_calculateGvahandHolderRank(GapStoryRenderFrameRangeElem *frn_elem
+                            , gint32 videoFrameNrToBeReadNext
+                            , gint32 track)
+{
+  t_GVA_Handle      *gvahand;
+  t_GVA_RetCode      l_fcr;
+  gint32             rank;
+
+  rank = GVAHAND_HOLDER_RANK_MIN_LEVEL;
+
+  gvahand = frn_elem->gvahand;
+  l_fcr = GVA_search_fcache(gvahand, videoFrameNrToBeReadNext);
+
+  if ((l_fcr == GVA_RET_OK)
+  && (gvahand->current_seek_nr >= videoFrameNrToBeReadNext))
+  {
+    if(frn_elem->track == track)
+    {
+      return(GVAHAND_HOLDER_RANK_MAX_LEVEL);
+    }
+    return(GVAHAND_HOLDER_RANK_4);
+  }
+
+  if(((gvahand->current_seek_nr + NEAR_FRAME_DISTANCE) > videoFrameNrToBeReadNext)
+  &&  (gvahand->current_seek_nr <= videoFrameNrToBeReadNext ) )
+  {
+    if(frn_elem->track == track)
+    {
+      return(GVAHAND_HOLDER_RANK_3);
+    }
+    return(GVAHAND_HOLDER_RANK_2);
+    
+  }
+
+  if (gvahand->current_seek_nr <= videoFrameNrToBeReadNext)
+  {
+    return(GVAHAND_HOLDER_RANK_NO_BENEFIT_LEVEL);
+  }
+
+  return(GVAHAND_HOLDER_RANK_MIN_LEVEL);
+
+
+}  /* end p_calculateGvahandHolderRank */
+
+
 /* ----------------------------------------------------
  * p_try_to_steal_gvahand
  * ----------------------------------------------------
  * try to steal an alread open GVA handle for video read from another
  * element.
- * conditions: must use same videofile, and exact_seek mode
+ * conditions: must use same videofile, seltrack and exact_seek mode
  * but steal only handles that are not in current access
  * (where the last accessed master_frame_nr is lower
  * than the current one)
@@ -4933,42 +5296,127 @@ p_limit_open_videohandles(GapStoryRenderVidHandle *vidhand
 static t_GVA_Handle *
 p_try_to_steal_gvahand(GapStoryRenderVidHandle *vidhand
                       , gint32 master_frame_nr
-                      , char *basename             /* the videofile name */
-                      , gint32 exact_seek
+                      , GapStoryRenderFrameRangeElem *requesting_frn_elem
                       )
 {
 #ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
+  GapStoryRenderSection *section;
   GapStoryRenderFrameRangeElem *frn_elem;
+  GapStoryRenderFrameRangeElem *frn_elem_gvahandHolder;  /* the element that holds the best matching handle */
+  char  *basename;             /* the videofile name */
+  gint32 exact_seek;
+  gint32 seltrack;
+  gint32 track;
+  gint32 gvahandHolderRank;
+  gint32 videoFrameNrToBeReadNext;
   gint32 l_count_open_videohandles;
+  gint32 l_max_open_videohandles;
 
   l_count_open_videohandles = 0;
-  for (frn_elem = vidhand->frn_list; frn_elem != NULL; frn_elem = (GapStoryRenderFrameRangeElem *)frn_elem->next)
+  basename   = requesting_frn_elem->basename;
+  exact_seek = requesting_frn_elem->exact_seek;
+  seltrack   = requesting_frn_elem->seltrack;
+  track      = requesting_frn_elem->track;
+  frn_elem_gvahandHolder = NULL;
+  videoFrameNrToBeReadNext = requesting_frn_elem->frame_from;
+  gvahandHolderRank = -1;  /* intal value lower than lowest regular rank */
+  l_max_open_videohandles = gap_base_get_gimprc_int_value(GAP_GIMPRC_VIDEO_STORYBOARD_MAX_OPEN_VIDEOFILES
+                                                        , GAP_STB_DEFAULT_MAX_OPEN_VIDEOFILES
+                                                        , 2
+                                                        , 100
+                                                        );
+  
+  for(section = vidhand->section_list; section != NULL; section = section->next)
   {
-    if (frn_elem->gvahand != NULL)
+    for (frn_elem = section->frn_list; frn_elem != NULL; frn_elem = (GapStoryRenderFrameRangeElem *)frn_elem->next)
     {
-      l_count_open_videohandles++;
+      if (frn_elem->gvahand != NULL)
+      {
+        l_count_open_videohandles++;
+      }
+      if((frn_elem->exact_seek == exact_seek)
+      && (frn_elem->last_master_frame_access < master_frame_nr)
+      && (frn_elem->seltrack == seltrack)
+      && (frn_elem->gvahand != NULL))
+      {
+        if(strcmp(frn_elem->basename, basename) == 0)
+        {
+          gint32 rank;
+          rank = p_calculateGvahandHolderRank(frn_elem, videoFrameNrToBeReadNext, track);
+
+          if(rank > gvahandHolderRank)
+          {
+            frn_elem_gvahandHolder = frn_elem;
+            gvahandHolderRank = rank;
+            if(rank >= GVAHAND_HOLDER_RANK_MAX_LEVEL)
+            {
+              /* we can skip further checks because optimal matching gva handle was found */
+              break;
+            }
+          }
+          
+         }
+      }
     }
-    if((frn_elem->exact_seek == exact_seek)
-    && (frn_elem->last_master_frame_access < master_frame_nr)
-    && (frn_elem->gvahand != NULL))
+  }
+
+  /* check for open videos in case there are mask definitions
+   * this is not done in case we are already rendering a mask (e.g vidhand is the mask handle)
+   * In case the rank is above the GVAHAND_HOLDER_RANK_NO_BENEFIT_LEVEL
+   * we can skip this check, because an already opened handled will be reused.
+   */
+  if((vidhand->is_mask_handle != TRUE)
+  && (gvahandHolderRank <=  GVAHAND_HOLDER_RANK_NO_BENEFIT_LEVEL))
+  {
+    GapStoryRenderMaskDefElem *maskdef_elem;
+    
+    for(maskdef_elem = vidhand->maskdef_elem; maskdef_elem != NULL;  maskdef_elem = maskdef_elem->next)
     {
-      if(strcmp(frn_elem->basename, basename) == 0)
+      if(maskdef_elem->mask_vidhand)
       {
-         t_GVA_Handle *gvahand;
+        for (frn_elem = maskdef_elem->mask_vidhand->frn_list; frn_elem != NULL; frn_elem = (GapStoryRenderFrameRangeElem *)frn_elem->next)
+        {
+          if (frn_elem->gvahand != NULL)
+          {
+            l_count_open_videohandles++;
+          }
 
-         if(gap_debug)
-         {
-           printf("(RE)using an already open GVA handle for video read access %s\n"
-                  , frn_elem->basename
-                  );
-         }
-         gvahand = frn_elem->gvahand;
-         frn_elem->gvahand = NULL;   /* steal from this element */
-         return(gvahand);
+        }
+      }
+    }
+  }
+
+
+  if (frn_elem_gvahandHolder != NULL)
+  {
+    if ((gvahandHolderRank >  GVAHAND_HOLDER_RANK_NO_BENEFIT_LEVEL)
+    || (l_count_open_videohandles >= l_max_open_videohandles))
+    {
+      t_GVA_Handle *gvahand;
+      if(gap_debug)
+      {
+        printf("(RE)using GVA handle for %s gvahand:%d Rank:%d holder(track:%d from:%d to:%d) requestor(track:%d from:%d to:%d) open videofile: %d (max:%d)\n"
+              , frn_elem_gvahandHolder->basename
+              , (int)gvahand
+              , (int)gvahandHolderRank
+              , (int)frn_elem_gvahandHolder->track
+              , (int)frn_elem_gvahandHolder->frame_from
+              , (int)frn_elem_gvahandHolder->frame_to
+              , (int)requesting_frn_elem->track
+              , (int)requesting_frn_elem->frame_from
+              , (int)requesting_frn_elem->frame_to
+              , (int)l_count_open_videohandles
+              , (int)l_max_open_videohandles
+              );
       }
+      gvahand = frn_elem_gvahandHolder->gvahand;
+      frn_elem_gvahandHolder->gvahand = NULL;   /* steal from this element */
+      return(gvahand);
     }
   }
-  p_limit_open_videohandles(vidhand, master_frame_nr, l_count_open_videohandles);
+ 
+
+  p_limit_open_videohandles(vidhand, master_frame_nr, l_count_open_videohandles, l_max_open_videohandles);
 #endif
   return(NULL);  /* nothing found to steal from, return NULL */
 
@@ -5150,10 +5598,153 @@ gap_story_render_fetch_composite_vthumb(GapStoryRenderVidHandle *stb_comp_vidhan
 
 
 
+/* ----------------------------------------------------
+ * p_dump_stb_resources_gvahand
+ * ----------------------------------------------------
+ * dump GVA video handle information for all currently open
+ * videohandles (eg. all elements in MAIN and all SubSections)
+ */
+void
+p_dump_stb_resources_gvahand(GapStoryRenderVidHandle *vidhand
+                    , gint32 master_frame_nr)
+{
+#ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
+  GapStoryRenderSection *section;
+  GapStoryRenderFrameRangeElem *frn_elem;
+  gint32                        l_max_open_videohandles;
+  gint32                        l_count_open_videohandles;
+
+  l_max_open_videohandles = gap_base_get_gimprc_int_value(GAP_GIMPRC_VIDEO_STORYBOARD_MAX_OPEN_VIDEOFILES
+                                                        , GAP_STB_DEFAULT_MAX_OPEN_VIDEOFILES
+                                                        , 2
+                                                        , 100
+                                                        );
+
+  l_count_open_videohandles = 0;
+  
+  
+  
+  for(section = vidhand->section_list; section != NULL; section = section->next)
+  {
+    char                  *section_name;
+    
+    section_name = section->section_name;
+    if (section_name == NULL)
+    {
+      section_name = "MAIN";
+    }
+    
+    for (frn_elem = section->frn_list; frn_elem != NULL; frn_elem = (GapStoryRenderFrameRangeElem *)frn_elem->next)
+    {
+      if (frn_elem->gvahand != NULL)
+      {
+        t_GVA_Handle *gvahand;
+        
+        l_count_open_videohandles++;
+        gvahand = frn_elem->gvahand;
+        
+        printf("STB Section:%s GVA_handle: %s  master_frame_nr:%d currFrameNr:%d fcache elemSize:%d byteSize:%d\n"
+          , section_name
+          , gvahand->filename
+          , (int) master_frame_nr
+          , (int) gvahand->current_frame_nr
+          , (int)GVA_get_fcache_size_in_elements(gvahand)
+          , (int)GVA_get_fcache_size_in_bytes(gvahand)
+          );
+      }
+    }
+
+  }
+  
+  
+  printf("STB at master_frame_nr:%d currently_open GVA_handles:%d (limit video-storyboard-max-open-videofiles:%d)\n"
+    ,(int)master_frame_nr
+    ,(int)l_count_open_videohandles
+    ,(int)l_max_open_videohandles
+    );
+  
+#endif
+}  /* end p_dump_stb_resources_gvahand */
+
+
+
+/* ----------------------------------------------------
+ * p_story_render_fetch_composite_image_or_buffer
+ * ----------------------------------------------------
+ * fetch composite VIDEO Image at a given master_frame_nr
+ * within a storyboard framerange list.
+ *
+ * the returned image is flattend RGB and scaled to
+ * desired video framesize.
+ *
+ *  it is a merged result of all video tracks,
+ *
+ *  frames at master_frame_nr were loaded
+ *  for all video tracks and added to the composite image
+ *   (track 0 on top, track N on bottom
+ *    of the layerstack)
+ *  opacity, scaling and move (decenter) attributes
+ *  were set to according to current Video Attributes.
+ *
+ * an (optional) filtermacro_file is performed on the
+ * composite image.
+ *
+ * (simple animations without a storyboard file
+ *  are represented by a short storyboard framerange list that has
+ *  just one element entry at track 1).
+ *
+ * return image_id of resulting image and the flattened resulting layer_id
+ */
+static gint32
+p_story_render_fetch_composite_image_or_buffer(GapStoryRenderVidHandle *vidhand
+                    , gint32 master_frame_nr  /* starts at 1 */
+                    , gint32  vid_width       /* desired Video Width in pixels */
+                    , gint32  vid_height      /* desired Video Height in pixels */
+                    , char *filtermacro_file  /* NULL if no filtermacro is used */
+                    , gint32 *layer_id        /* output: Id of the only layer in the composite image */
+                    , gboolean enable_rgb888_flag  /* enable fetch as rgb888 data buffer */
+                    , GapStoryFetchResult      *gapStoryFetchResult
+                 )
+{
+  gint32 image_id;
+  
+  image_id = p_story_render_fetch_composite_image_private(vidhand
+                                                  ,master_frame_nr
+                                                  ,vid_width
+                                                  ,vid_height
+                                                  ,filtermacro_file
+                                                  ,layer_id
+                                                  ,NULL      /* NULL as section name referes to MAIN section */
+                                                  ,enable_rgb888_flag  /* enable fetch as rgb888 data buffer */
+                                                  ,gapStoryFetchResult
+                                                 );
+
+  if (image_id >= 0)
+  {
+    if(gapStoryFetchResult != NULL)
+    {
+      gapStoryFetchResult->resultEnum = GAP_STORY_FETCH_RESULT_IS_IMAGE;
+      gapStoryFetchResult->layer_id = *layer_id;
+      gapStoryFetchResult->image_id = image_id;
+    }
+  }
+  if(vidhand->isLogResourceUsage)
+  {
+    if((master_frame_nr % vidhand->resourceLogInterval) == 0)
+    {
+      gap_frame_fetch_dump_resources();
+      p_dump_stb_resources_gvahand(vidhand, master_frame_nr);
+    }
+  }
+  
+  
+  return (image_id);
+
+}  /* end p_story_render_fetch_composite_image_or_buffer */
 
 
 /* ----------------------------------------------------
- * gap_story_render_fetch_composite_image
+ * gap_story_render_fetch_composite_image (simple API)
  * ----------------------------------------------------
  * fetch composite VIDEO Image at a given master_frame_nr
  * within a storyboard framerange list.
@@ -5188,17 +5779,23 @@ gap_story_render_fetch_composite_image(GapStoryRenderVidHandle *vidhand
                     , gint32 *layer_id        /* output: Id of the only layer in the composite image */
                  )
 {
-  return (
-    p_story_render_fetch_composite_image_private(vidhand
+  gint32 image_id;
+  
+  image_id = p_story_render_fetch_composite_image_or_buffer(vidhand
                                                   ,master_frame_nr
                                                   ,vid_width
                                                   ,vid_height
                                                   ,filtermacro_file
                                                   ,layer_id
-                                                  ,NULL));
+                                                  ,FALSE
+                                                  ,NULL
+                                                  );
+  return (image_id);
+
 }  /* end gap_story_render_fetch_composite_image */
 
 
+
 /* ------------------------------------------------
  * p_split_delace_value
  * ------------------------------------------------
@@ -5390,39 +5987,88 @@ p_stb_render_image_or_animimage(GapStbFetchData *gfd
 
 }  /* end p_stb_render_image_or_animimage */
 
+/* --------------------------------------------------
+ * p_is_another_clip_playing_the_same_video_backwards
+ * --------------------------------------------------
+ * check if there are other video clips (in any track)
+ * that referes to the same videofile and plays the frames in reverse order
+ */
+static gboolean
+p_is_another_clip_playing_the_same_video_backwards(GapStoryRenderFrameRangeElem *frn_elem_ref)
+{
+  GapStoryRenderFrameRangeElem *frn_elem;
+  
+  for (frn_elem = frn_elem_ref->next; frn_elem != NULL; frn_elem = (GapStoryRenderFrameRangeElem *)frn_elem->next)
+  {
+      if(frn_elem->frn_type == GAP_FRN_MOVIE)
+      {
+        if((frn_elem->exact_seek == frn_elem_ref->exact_seek)
+        && (frn_elem->seltrack == frn_elem_ref->seltrack)
+        && (strcmp(frn_elem->basename, frn_elem_ref->basename) == 0)
+        && (frn_elem->frame_from > frn_elem->frame_to))
+        {
+          /* we found a reference to the same video in future clip where it splays backwards */
+          return (TRUE);
+        }
+      }
+  }
+  return (FALSE);
+}  /* end p_is_another_clip_playing_the_same_video_backwards */
 
-/* -------------------------------------------
- * p_stb_render_movie (GAP_FRN_MOVIE)
- * -------------------------------------------
- * fetch frame from a videofile (gfd->framename contains the videofile name)
+
+/* ----------------------------------------------------
+ * p_check_and_open_video_handle
+ * ----------------------------------------------------
+ * check and make sure that the clip specified with frn_elem has a
+ * usable GVA video handle attached.
+ * further set the frame cache size according to direction and configuration.
+ * Note that typical processing for video encoding with ascending sequential
+ * frame access does not speed up with big fcache but wastes memory
+ * (especially on HD videos)
+ * On the other hand beackwards playing clips can have remarkable
+ * better performance using the GVA api internal frame cache.
+ * (because it saves a lot of slow seek operations)
+ * therfore the configured fcache size is only set in case the videoclip
+ * is played backwards (in this or future clips refering the same videofile)
+ * 
+ * returns the actual picked size of the frame cache
  */
 static void
-p_stb_render_movie(GapStbFetchData *gfd
-  , GapStoryRenderVidHandle *vidhand
-  , gint32 master_frame_nr
-  , gint32  vid_width, gint32  vid_height)
+p_check_and_open_video_handle(GapStoryRenderFrameRangeElem *frn_elem
+   , GapStoryRenderVidHandle *vidhand
+   , gint32 master_frame_nr
+   , const gchar *videofile
+   )
 {
-  gfd->tmp_image_id = -1;
+  gboolean isPlayingBackwards;
 
+  if(frn_elem->frame_from > frn_elem->frame_to)
+  {
+    isPlayingBackwards = TRUE;
+  }
+  else
+  {
+    isPlayingBackwards = FALSE;
+  }
+  
 #ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
-
-  if(gfd->frn_elem->gvahand == NULL)
+  
+  if(frn_elem->gvahand == NULL)
   {
      /* before we open a new GVA videohandle, lets check
       * if another element has already opened this videofile,
       * and reuse the already open gvahand handle if possible
       */
-     gfd->frn_elem->gvahand = p_try_to_steal_gvahand(vidhand
+     frn_elem->gvahand = p_try_to_steal_gvahand(vidhand
                                                  , master_frame_nr
-                                                 , gfd->frn_elem->basename
-                                                 , gfd->frn_elem->exact_seek
+                                                 , frn_elem
                                                  );
-     if(gfd->frn_elem->gvahand == NULL)
+     if(frn_elem->gvahand == NULL)
      {
        if(vidhand->preferred_decoder)
        {
-         gfd->frn_elem->gvahand = GVA_open_read_pref(gfd->framename
-                                , gfd->frn_elem->seltrack
+         frn_elem->gvahand = GVA_open_read_pref(videofile
+                                , frn_elem->seltrack
                                 , 1 /* aud_track */
                                 , vidhand->preferred_decoder
                                 , FALSE  /* use MMX if available (disable_mmx == FALSE) */
@@ -5430,25 +6076,323 @@ p_stb_render_movie(GapStbFetchData *gfd
        }
        else
        {
-         gfd->frn_elem->gvahand = GVA_open_read(gfd->framename
-                                           ,gfd->frn_elem->seltrack
+         frn_elem->gvahand = GVA_open_read(videofile
+                                           ,frn_elem->seltrack
                                            ,1 /* aud_track */
                                            );
        }
-
-       if(gfd->frn_elem->gvahand)
+       if(frn_elem->gvahand)
        {
-         GVA_set_fcache_size(gfd->frn_elem->gvahand, GAP_STB_RENDER_GVA_FRAMES_TO_KEEP_CACHED);
+         gint32   fcacheSize;
+         
+         fcacheSize = gap_base_get_gimprc_int_value(GAP_GIMPRC_VIDEO_STORYBOARD_FCACHE_SIZE_PER_VIDEOFILE
+                                                     , GAP_STB_RENDER_GVA_FRAMES_TO_KEEP_CACHED  /* default */
+                                                     , 2   /* min */
+                                                     , 250 /* max */
+                                                   );
+                                                     
+         if(!isPlayingBackwards)
+         {
+           if(FALSE == p_is_another_clip_playing_the_same_video_backwards(frn_elem))
+           {
+             /* use small fcache for standard ascending frame access */
+             fcacheSize = SMALL_FCACHE_SIZE_AT_FORWARD_READ;
+           }
+         }
+         GVA_set_fcache_size(frn_elem->gvahand, fcacheSize);
 
-         gfd->frn_elem->gvahand->do_gimp_progress = vidhand->do_gimp_progress;
-         if(gfd->frn_elem->exact_seek == 1)
+         frn_elem->gvahand->do_gimp_progress = vidhand->do_gimp_progress;
+         if(frn_elem->exact_seek == 1)
          {
-           /* configure the GVA Procedures for exact (but slow) seek emulaion */
-           gfd->frn_elem->gvahand->emulate_seek = TRUE;
+           /* configure the GVA Procedures for exact (but very slow) seek emulaion */
+           frn_elem->gvahand->emulate_seek = TRUE;
          }
        }
      }
+  }
+#endif  
+}  /* end p_check_and_open_video_handle */
+
+
+
+static GThreadPool *
+p_get_PrefetchThreadPool()
+{
+  if (prefetchThreadPool == NULL)
+  {
+    gint    maxThreads;
+    GError *error = NULL;
+    
+      
+    maxThreads = gap_base_get_gimprc_int_value(GAP_GIMPRC_VIDEO_STORYBOARD_MAX_OPEN_VIDEOFILES
+                                                , GAP_STB_DEFAULT_MAX_OPEN_VIDEOFILES
+                                                , 2
+                                                , 100
+                                                );
+      
+    prefetchThreadPool = g_thread_pool_new((GFunc)p_videoPrefetchWorkerThreadFunction
+                                         ,NULL        /* user data */
+                                         ,maxThreads  /* max_threads */
+                                         ,TRUE        /* exclusive */
+                                         ,&error      /* GError **error */
+                                         );
+    if (prefetchThreadPool == NULL)
+    {
+      printf("** ERROR could not create prefetchThreadPool\n");
+    }
+  }
+  
+  return (prefetchThreadPool);
+}
+
+
+/* -------------------------------------------
+ * p_initOptionalMulitprocessorSupport 
+ * -------------------------------------------
+ * this procedure creates a thread pool in case 
+ * the gimprc parameters are configured for multiprocessor support.
+ * further the vidhand->isMultithreadEnabled is set accordingly.
+ * Note that gimprc configuration is ignored in case
+ * GIMP_GAP was compiled without GAP_ENABLE_VIDEOAPI_SUPPORT.
+ */
+static void
+p_initOptionalMulitprocessorSupport(GapStoryRenderVidHandle *vidhand)
+{
+  vidhand->isMultithreadEnabled = FALSE;
+  
+#ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
+  vidhand->isMultithreadEnabled = gap_story_isMultiprocessorSupportEnabled();
+  if (vidhand->isMultithreadEnabled)
+  {
+    /* check and init thread system */
+    vidhand->isMultithreadEnabled = gap_base_thread_init();
+  }
+  
+  if (vidhand->isMultithreadEnabled)
+  {
+    p_get_PrefetchThreadPool();
+  }
+#endif
+}  /* end p_initOptionalMulitprocessorSupport */
+
+
+/* ---------------------------------------------------
+ * p_call_GVA_close (GAP_FRN_MOVIE)
+ * ---------------------------------------------------
+ * check if the GVA video handle has VideoPrefetchData attached
+ * this can occure in case of multiprocessor environment where
+ * prefetch may be running as parallel thread.
+ * in case the attached VideoPrefetchData indicates such an active thread
+ * wait until this thread has finished and free resources (mutex)
+ * finally clode the GVA video handle.
+ */
+#ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
+static void  
+p_call_GVA_close(t_GVA_Handle *gvahand)
+{
+  if(gap_debug)
+  {
+    printf("p_call_GVA_close gvahand:%d\n"
+          ,(int)gvahand
+          );
+  }
+  if(gvahand)
+  {
+    if(gap_debug)
+    {
+      printf("p_call_GVA_close  gvahand:%d (close is pending for %s)\n"
+           ,(int)gvahand
+            ,gvahand->filename
+            );
+    }
+    if(gvahand->user_data)
+    {
+      VideoPrefetchData *vpre;
+      
+      vpre = (VideoPrefetchData *)gvahand->user_data;
+      if(vpre)
+      {
+        GMutex            *mutex;
+        gint retryCount = 0;
+        
+        mutex = vpre->mutex;
+
+RETRY:        
+        GVA_fcache_mutex_lock (vpre->gvahand);
+
+        if((vpre->isPrefetchThreadRunning == TRUE)
+        && (retryCount < 100))
+        {
+          if(gap_debug)
+          {
+            printf("call_GVA_closeWAIT until prefetch worker thread finished (close is pending for %s) retry:%d mutex:%d fcache_mutex:%d\n"
+                  ,gvahand->filename
+                  ,(int)retryCount
+                  ,(int)mutex
+                  ,(int)gvahand->fcache_mutex
+                  );
+          }
+          g_cond_wait (vpre->prefetchDoneCond, vpre->mutex);
+          
+          if(gap_debug)
+          {
+            printf("call_GVA_close WAKE-UP prefetch worker thread finished (closing video %s) mutex:%d\n"
+                  ,gvahand->filename
+                  ,(int)mutex
+                  );
+          }
+          GVA_fcache_mutex_unlock (vpre->gvahand);
+
+          /* thread may need a short time to unlock the mutex after sending the prefetchDoneCond  */
+          g_usleep(150);
+
+          retryCount++;
+          goto RETRY;
+        }
+        else
+        {
+          if(gap_debug)
+          {
+            printf("call_GVA_close PrefetchThread NOT RUNNING, (closing video %s) retry:%d mutex:%d fcache_mutex:%d\n"
+                  ,gvahand->filename
+                  ,retryCount
+                  ,(int)mutex
+                  ,(int)gvahand->fcache_mutex
+                  );
+          }
+        }
+        /* detach the fcache mutex (to prevent API from using the lock again) */
+        gvahand->fcache_mutex = NULL;
+        vpre->mutex = NULL;
+        gvahand->user_data = NULL;
+        g_mutex_unlock(mutex);
+
+  
+        /* dispose the fcache mutex */
+        //g_mutex_free (mutex);               // TODO: g_mutex_free sometimes leads to CRASH
+        p_pooled_g_mutex_free(mutex);         // As workaround keep mutex alive in a pool for reuse...
+        g_cond_free (vpre->prefetchDoneCond);
+        
+        vpre->prefetchDoneCond = NULL;
+
+      }
+    }
+    GVA_close(gvahand);
+  }
+}  /* end  p_call_GVA_close */
+#endif
+
+
+/* --------------------------------------------------------------
+ * p_call_GVA_search_fcache_and_get_frame_as_gimp_layer_or_rgb888
+ * --------------------------------------------------------------
+ * wrapper for debug logging purpose.
+ */
+static void
+p_call_GVA_search_fcache_and_get_frame_as_gimp_layer_or_rgb888(t_GVA_Handle *gvahand
+                 , gint32   framenumber
+                 , gint32   deinterlace
+                 , gdouble  threshold
+                 , gint32   numProcessors
+                 , GVA_fcache_fetch_result *fcacheFetchResult
+                 , const char *caller
+                 )
+{
+  if(gap_debug)
+  {
+    printf("before call GVA_search_fcache: gvahand:%d framenumber:%d deinterlace:%d numProcessors:%d caller:%s\n"
+      ,(int)gvahand
+      ,(int)framenumber
+      ,(int)deinterlace
+      ,(int)numProcessors
+      ,caller
+      );
+  }
+  
+  GVA_search_fcache_and_get_frame_as_gimp_layer_or_rgb888(gvahand
+                 , framenumber
+                 , deinterlace
+                 , threshold
+                 , numProcessors                       /* numProcessors */
+                 , fcacheFetchResult
+                 );
+
+  if(gap_debug)
+  {
+    printf("after call GVA_search_fcache: "
+      "gvahand:%d framenumber:%d isRgb888Result:%d data:%d image_id:%d layer_id:%d isFrameAvailable:%d\n"
+      ,(int)gvahand
+      ,(int)framenumber
+      ,(int)fcacheFetchResult->isRgb888Result
+      ,(int)fcacheFetchResult->rgbBuffer.data
+      ,(int)fcacheFetchResult->image_id
+      ,(int)fcacheFetchResult->layer_id
+      ,(int)fcacheFetchResult->isFrameAvailable
+      );
+  }
+                 
+}  /* end p_call_GVA_search_fcache_and_get_frame_as_gimp_layer_or_rgb888 */
+
+
+
+/* ---------------------------------------------------
+ * p_get_gapStoryFetchResult_from_fcacheFetchResult
+ * ---------------------------------------------------
+ */
+static void
+p_get_gapStoryFetchResult_from_fcacheFetchResult(GapStbFetchData *gfd, GVA_fcache_fetch_result *fcacheFetchResult)
+{
+  /* get id of the newly created image from its layerId */
+  gfd->tmp_image_id = fcacheFetchResult->image_id;
+
+  if(gfd->gapStoryFetchResult != NULL)
+  {
+    gfd->gapStoryFetchResult->layer_id = fcacheFetchResult->layer_id;
+    gfd->gapStoryFetchResult->image_id = fcacheFetchResult->image_id;
+    if (fcacheFetchResult->isRgb888Result == TRUE)
+    {
+      /* result is available as rgb888 in gfd->gapStoryFetchResult->raw_rgb_data */
+      gfd->gapStoryFetchResult->resultEnum = GAP_STORY_FETCH_RESULT_IS_RAW_RGB888;
+
+      if(gfd->gapStoryFetchResult->raw_rgb_data != fcacheFetchResult->rgbBuffer.data)
+      {
+        gfd->gapStoryFetchResult->raw_rgb_data = fcacheFetchResult->rgbBuffer.data;
+      }
+    }
+    else
+    {
+      gfd->gapStoryFetchResult->resultEnum = GAP_STORY_FETCH_RESULT_IS_IMAGE;
+    }
+  }
+}  /* end p_get_gapStoryFetchResult_from_fcacheFetchResult */
+
+
+/* ---------------------------------------------------
+ * p_stb_render_movie_single_processor (GAP_FRN_MOVIE)
+ * ---------------------------------------------------
+ * fetch frame from a videofile (gfd->framename contains the videofile name)
+ * in a single proceeor environment.
+ */
+#ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
+static void
+p_stb_render_movie_single_processor(GapStbFetchData *gfd
+  , GapStoryRenderVidHandle *vidhand
+  , gint32 master_frame_nr
+  , gint32  vid_width, gint32  vid_height)
+{
+  static gint32 funcId = -1;
+  GVA_fcache_fetch_result fcacheFetchResult;
 
+  GAP_TIMM_GET_FUNCTION_ID(funcId, "p_stb_render_movie_single_processor");
+  GAP_TIMM_START_FUNCTION(funcId);
+
+  fcacheFetchResult.isRgb888Result = FALSE;  /* configure fcache for standard fetch as gimp layer */
+  fcacheFetchResult.rgbBuffer.data = NULL;
+  
+  if(gfd->gapStoryFetchResult != NULL)
+  {
+    fcacheFetchResult.isRgb888Result = gfd->isRgb888Result;
+    fcacheFetchResult.rgbBuffer.data = gfd->gapStoryFetchResult->raw_rgb_data;
   }
 
   if(gfd->frn_elem->gvahand)
@@ -5465,36 +6409,27 @@ p_stb_render_movie(GapStbFetchData *gfd
              );
 
 
-     /* set image and layer in the gvahand structure invalid,
-      * to force creation of a new image in the following call of  GVA_frame_to_gimp_layer
-      */
-     gfd->frn_elem->gvahand->image_id = -1;
-     gfd->frn_elem->gvahand->layer_id = -1;
-
-
      /* attempt to read frame from the GVA API internal framecache */
+     p_call_GVA_search_fcache_and_get_frame_as_gimp_layer_or_rgb888(gfd->frn_elem->gvahand
+                 , gfd->localframe_index   /* framenumber */
+                 , l_deinterlace
+                 , l_threshold
+                 , 1                       /* numProcessors */
+                 , &fcacheFetchResult
+                 , "(single A)"
+                 );
 
-     /* printf("\nST: before  GVA_debug_print_fcache (2) #:%d\n", (int)gfd->localframe_index );
-      * GVA_debug_print_fcache(gfd->frn_elem->gvahand);
-      * printf("ST: before  GVA_frame_to_gimp_layer (2) attempt cache read  #:%d\n", (int)gfd->localframe_index );
-      */
-
-     l_fcr = GVA_frame_to_gimp_layer(gfd->frn_elem->gvahand
-                       , TRUE                 /* delete_mode */
-                       , gfd->localframe_index   /* framenumber */
-                       , l_deinterlace
-                       , l_threshold
-                       );
-
-     if (l_fcr != GVA_RET_OK)
+     if (fcacheFetchResult.isFrameAvailable != TRUE)
      {
        /* if no success, we try explicite read that frame  */
        if(gfd->frn_elem->gvahand->current_seek_nr != gfd->localframe_index)
        {
-         if(((gfd->frn_elem->gvahand->current_seek_nr + GAP_STB_RENDER_GVA_FRAMES_TO_KEEP_CACHED) > gfd->localframe_index)
+         if(((gfd->frn_elem->gvahand->current_seek_nr + NEAR_FRAME_DISTANCE) > gfd->localframe_index)
          &&  (gfd->frn_elem->gvahand->current_seek_nr < gfd->localframe_index ) )
          {
-           /* near forward seek is performed by dummyreads to fill up the framecache
+           /* near forward seek is performed by sequential reads
+            * note that a few sequential reads are typically faster than seek operations
+            * (even if native seek support is available for a videofile)
             */
            while(gfd->frn_elem->gvahand->current_seek_nr < gfd->localframe_index)
            {
@@ -5503,11 +6438,51 @@ p_stb_render_movie(GapStbFetchData *gfd
          }
          else
          {
+           gboolean isPlayingBackwards;
+           
+           if(gfd->frn_elem->frame_from > gfd->frn_elem->frame_to)
+           {
+             isPlayingBackwards = TRUE;
+           }
+           else
+           {
+             isPlayingBackwards = FALSE;
+           }
+           
            if(vidhand->do_gimp_progress)
            {
               gimp_progress_init(_("Seek Inputvideoframe..."));
            }
-           GVA_seek_frame(gfd->frn_elem->gvahand, (gdouble)gfd->localframe_index, GVA_UPOS_FRAMES);
+           
+           /* for backwards playing clip seek before the wanted position
+            * and read some frames until wanted position is reaced to fill the fcache
+            * (this shall speed up the next few backwards reads that can be fetched from fcache)
+            */
+           if(isPlayingBackwards)
+           {
+             gdouble seekFrameNumber;
+             gdouble delta;
+           
+           
+             delta = GVA_get_fcache_size_in_elements(gfd->frn_elem->gvahand) -1;
+             seekFrameNumber = MAX((gdouble)gfd->localframe_index - delta, 2);
+             GVA_seek_frame(gfd->frn_elem->gvahand, seekFrameNumber, GVA_UPOS_FRAMES);
+             while(gfd->frn_elem->gvahand->current_seek_nr < gfd->localframe_index)
+             {
+               if(gap_debug)
+               {
+                 printf("BACKWARD fcache filling read current_seek_nr:%d target_frame_nr:%d\n"
+                   ,gfd->frn_elem->gvahand->current_seek_nr
+                   ,gfd->localframe_index
+                   );
+               }
+               GVA_get_next_frame(gfd->frn_elem->gvahand);
+             }
+           }
+           else
+           {
+             GVA_seek_frame(gfd->frn_elem->gvahand, (gdouble)gfd->localframe_index, GVA_UPOS_FRAMES);
+           }
            if(vidhand->do_gimp_progress)
            {
               gimp_progress_init(_("Continue Encoding..."));
@@ -5517,23 +6492,600 @@ p_stb_render_movie(GapStbFetchData *gfd
 
        if(GVA_get_next_frame(gfd->frn_elem->gvahand) == GVA_RET_OK)
        {
-         GVA_frame_to_gimp_layer(gfd->frn_elem->gvahand
-                         , TRUE   /* delete_mode */
-                         , gfd->localframe_index   /* framenumber */
-                         , l_deinterlace
-                         , l_threshold
-                         );
+         p_call_GVA_search_fcache_and_get_frame_as_gimp_layer_or_rgb888(gfd->frn_elem->gvahand
+                 , gfd->localframe_index   /* framenumber */
+                 , l_deinterlace
+                 , l_threshold
+                 , 1                       /* numProcessors */
+                 , &fcacheFetchResult
+                 , "(single B)"
+                 );
        }
      }
-     /* take the newly created image from gvahand stucture */
-     gfd->tmp_image_id = gfd->frn_elem->gvahand->image_id;
-     gfd->frn_elem->gvahand->image_id = -1;
-     gfd->frn_elem->gvahand->layer_id = -1;
+     
+     if (fcacheFetchResult.isFrameAvailable == TRUE)
+     {
+       p_get_gapStoryFetchResult_from_fcacheFetchResult(gfd, &fcacheFetchResult);
+     }
+     else
+     {
+       gfd->tmp_image_id = -1;
+     }
+  }
+  
+
+  GAP_TIMM_STOP_FUNCTION(funcId);
+
+}  /* end p_stb_render_movie_single_processor */
+#endif
+
+
+
+/* -------------------------------------------
+ * p_call_GVA_get_next_frame_andSendReadySignal
+ * -------------------------------------------
+ * sequential read the next frame from videofile
+ * and send signal targetFrameReadyCond after the specified targetFrameNumber
+ * was read (and is now available in the fcache)
+ */
+#ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
+static void
+p_call_GVA_get_next_frame_andSendReadySignal(VideoPrefetchData *vpre, gint32 targetFrameNumber)
+{
+  if(gap_debug)
+  {
+    printf("p_call_GVA_get_next_frame TID:%lld gvahand:%d targetFrameNumber:%d seek_nr:%d\n"
+      , gap_base_get_thread_id()
+      , (int)vpre->gvahand
+      , (int)targetFrameNumber
+      , (int)vpre->gvahand->current_seek_nr
+      );
+  }
+  
+  GVA_get_next_frame(vpre->gvahand);
+  GVA_fcache_mutex_lock (vpre->gvahand);
+  if (vpre->gvahand->current_frame_nr == targetFrameNumber)
+  {
+    if(gap_debug)
+    {
+      printf("p_call_GVA_get_next_frame TID:%lld gvahand:%d targetFrameNumber:%d SEND targetFrameReadyCond\n"
+        , gap_base_get_thread_id()
+        , (int)vpre->gvahand
+        , (int)targetFrameNumber
+        );
+    }
+    g_cond_signal  (vpre->targetFrameReadyCond);
+  }
+  
+  GVA_fcache_mutex_unlock (vpre->gvahand);
+  
+}  /* end p_call_GVA_get_next_frame_andSendReadySignal */
+#endif
+
+/* -------------------------------------------
+ * p_videoPrefetchWorkerThreadFunction
+ * -------------------------------------------
+ * this procedure runs as thread pool function to prefetch the next few
+ * videoframes (targetFrameNumber upto prefetchFrameNumber)  into the GVA api framecache.
+ * o) after the target frame was fetched it sends out the signal condition targetFrameReadyCond
+ * o) after all frames upto prefetchFrameNumber are fetched the signal condition prefetchDoneCond is sent.
+ *
+ * In case there is more than one videotrack to be processed,
+ * there may be more than one prefetch worker thread running parallel at the same time
+ * where each of those threads has its own VideoPrefetchData (e.g has its own gva video handle)
+ *
+ * Note: prefetch of backwards read fill up the fcache too, but in this case
+ * the target framenumber is the last one to be read from the videofile
+ * and the main thread has to wait until the whole prefetch cycle is finished.
+ * Therefore there is no performance advantage compared with a single processor on backward reads.
+ */
+#ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
+static void
+p_videoPrefetchWorkerThreadFunction (VideoPrefetchData *vpre)
+{
+  gint32               prefetchFrameNumber;
+  gint32               targetFrameNumber;
+  gboolean             isPlayingBackwards;
+           
+  if(gap_debug)
+  {
+    printf("p_videoPrefetchWorkerThreadFunction START (before mutex lock) TID:%lld gvahand:%d  targetFrameNumber:%d\n"
+         , gap_base_get_thread_id()
+         ,(int)vpre->gvahand
+         ,(int)vpre->targetFrameNumber
+         );
+  }
+  
+  GVA_fcache_mutex_lock (vpre->gvahand);
+
+  prefetchFrameNumber = vpre->prefetchFrameNumber;
+  targetFrameNumber = vpre->targetFrameNumber;
+  if(vpre->isPlayingBackwards)
+  {
+    isPlayingBackwards = TRUE;
+    prefetchFrameNumber = targetFrameNumber;
+  }
+  else
+  {
+    isPlayingBackwards = FALSE;
+  }
+  
+  GVA_fcache_mutex_unlock (vpre->gvahand);
+
+  if(targetFrameNumber >= 0)
+  {
+    if(((vpre->gvahand->current_seek_nr + NEAR_FRAME_DISTANCE) > prefetchFrameNumber)
+    &&  (vpre->gvahand->current_seek_nr <= targetFrameNumber ) )
+    {
+      if(gap_debug)
+      {
+        printf("p_videoPrefetchWorkerThreadFunction TID:%lld NEAR FORWARD READ gvahand:%d targetFrameNumber:%d\n"
+        , gap_base_get_thread_id()
+         ,(int)vpre->gvahand
+         ,(int)targetFrameNumber
+         );
+      }
+      /* near forward seek is performed by sequential reads
+       * note that a few sequential reads are typically faster than seek operations
+       * (even if native seek support is available for a videofile)
+       */
+      while(vpre->gvahand->current_seek_nr <= prefetchFrameNumber)
+      {
+        p_call_GVA_get_next_frame_andSendReadySignal(vpre, targetFrameNumber);
+      }
+    }
+    else
+    {
+      /* for backwards playing clip seek before the wanted position
+       * and read some frames until wanted position is reaced to fill the fcache
+       * (this shall speed up the next few backwards reads that can be fetched from fcache,
+       * but the main thread must wait until all frames are done because the
+       * wanted frame is the last one to be fetched)
+       */
+      if(isPlayingBackwards)
+      {
+        gdouble seekFrameNumber;
+        gdouble delta;
+        
+        if(gap_debug)
+        {
+          printf("p_videoPrefetchWorkerThreadFunction TID:%lld gvahand:%d BACKWARD READ targetFrameNumber:%d\n"
+             , gap_base_get_thread_id()
+             ,(int)vpre->gvahand
+             ,(int)targetFrameNumber
+             );
+        }
+        
+        delta = GVA_get_fcache_size_in_elements(vpre->gvahand) -1;
+        seekFrameNumber = MAX((gdouble)targetFrameNumber - delta, 2);
+        GVA_seek_frame(vpre->gvahand, seekFrameNumber, GVA_UPOS_FRAMES);
+        while(vpre->gvahand->current_seek_nr <= targetFrameNumber)
+        {
+            if(gap_debug)
+            {
+              printf("BACKWARD TID:%lld prefetch fcache filling read gvahand:%d current_seek_nr:%d target_frame_nr:%d\n"
+                , gap_base_get_thread_id()
+                ,(int)vpre->gvahand
+                ,(int)vpre->gvahand->current_seek_nr
+                ,(int)prefetchFrameNumber
+                );
+            }
+            p_call_GVA_get_next_frame_andSendReadySignal(vpre, targetFrameNumber);
+        }
+      }
+      else
+      {
+        if(gap_debug)
+        {
+          printf("p_videoPrefetchWorkerThreadFunction TID:%lld SEEK / READ gvahand:%d targetFrameNumber:%d\n"
+                , gap_base_get_thread_id()
+                ,(int)vpre->gvahand
+                ,(int)targetFrameNumber
+                );
+        }
+        GVA_seek_frame(vpre->gvahand, (gdouble)targetFrameNumber, GVA_UPOS_FRAMES);
+        while(vpre->gvahand->current_seek_nr <= prefetchFrameNumber)
+        {
+          p_call_GVA_get_next_frame_andSendReadySignal(vpre, targetFrameNumber);
+        }
+      }
+    }
+  }
+  
+
+  GVA_fcache_mutex_lock (vpre->gvahand);
+  vpre->isPrefetchThreadRunning = FALSE;
+  g_cond_signal  (vpre->targetFrameReadyCond);
+  g_cond_signal  (vpre->prefetchDoneCond);
+  GVA_fcache_mutex_unlock (vpre->gvahand);
+
+  if(gap_debug)
+  {
+    printf("p_videoPrefetchWorkerThreadFunction DONE TID:%lld gvahand:%d\n"
+       ,gap_base_get_thread_id()
+       ,(int)vpre->gvahand
+       );
+  }
+
+}  /* end p_videoPrefetchWorkerThreadFunction */
+#endif
+
+
+
+/* -------------------------------------------
+ * p_getPredictedNextFramenr
+ * -------------------------------------------
+ * calculate predicted frmanumber for prefetch on forward read
+ * Frames from targetFrameNr upto the calculated predicted frmanumber
+ * shall be read parallel in advance.
+ * prefetch is limited by MULTITHREAD_PREFETCH_AMOUNT, the GVA fcache size and
+ * the highestReferedFrameNr (current check is limited to current clip)
+ *
+ * TODO if the same video is also refered in the next clip in continous sequence.
+ * the highestReferedFrameNr could be increased accordingly.
+ */
+#ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
+static gint32
+p_getPredictedNextFramenr(gint32 targetFrameNr, GapStoryRenderFrameRangeElem *frn_elem)
+{
+  gint32 predictedFrameNr;
+  
+  
+  if (frn_elem->frame_from > frn_elem->frame_to)
+  {
+    /* Backward Read */
+    predictedFrameNr = targetFrameNr;
+    if(gap_debug)
+    {
+      printf("p_getPredictedNextFramenr BACKWARD READ targetFrameNr:%d predictedFrameNr:%d total_frames:%d\n"
+        ,(int)targetFrameNr
+        ,(int)predictedFrameNr
+        ,(int)frn_elem->gvahand->total_frames
+        );
+    }
+  }
+  else
+  {
+    gint32 fcacheSize;
+    gint32 prefetchAmount;
+    gint32 highestReferedFrameNr;
+    
+    fcacheSize = GVA_get_fcache_size_in_elements(frn_elem->gvahand);
+    prefetchAmount = MIN(MULTITHREAD_PREFETCH_AMOUNT, fcacheSize -1);
+    
+    highestReferedFrameNr = frn_elem->frame_to;
+//     if(frn_elem->gvahand->all_frames_counted == TRUE)
+//     {
+//       highestReferedFrameNr = MIN(frn_elem->frame_to, frn_elem->gvahand->total_frames);
+//     }
+
+    predictedFrameNr = MIN((targetFrameNr + prefetchAmount), highestReferedFrameNr);
+
+    if(gap_debug)
+    {
+      printf("p_getPredictedNextFramenr targetFrameNr:%d predictedFrameNr:%d total_frames:%d (all_counted:%d) amount:%d hi:%d fcacheSize:%d\n"
+        ,(int)targetFrameNr
+        ,(int)predictedFrameNr
+        ,(int)frn_elem->gvahand->total_frames
+        ,(int)frn_elem->gvahand->all_frames_counted
+        ,(int)prefetchAmount
+        ,(int)highestReferedFrameNr
+        ,(int)fcacheSize
+        );
+    }
+
+  }
+
+  return (predictedFrameNr);
+  
+}  /* end  p_getPredictedNextFramenr */
+#endif
+
+
+/* ------------------------------------------------
+ * p_stb_render_movie_multiprocessor (GAP_FRN_MOVIE)
+ * ------------------------------------------------
+ * this procedure runs as main thread when fetching video frames in
+ * multithread environment.
+ * It reads the required target frame from the GVA api fcache,
+ * and trigers a parallel prefetch thread.
+ * The parallel running prefetch thread fills up the GVA api fcache 
+ * by reading (more than one) frame from the videofile in advance.
+ * to increase fcache hit chance at the next call.
+ *
+ * in case the fcache does not (yet) contain the target frame
+ * the main thread has to wait until the target frame is ready.
+ * (This is typical at the first call and whenever the target framenumber
+ * differs significant from the previous call)
+ */
+#ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
+static void
+p_stb_render_movie_multiprocessor(GapStbFetchData *gfd
+  , GapStoryRenderVidHandle *vidhand
+  , gint32 master_frame_nr
+  , gint32  vid_width, gint32  vid_height)
+{
+  VideoPrefetchData *vpre;
+  gint32             l_deinterlace;
+  gdouble            l_threshold;
+  t_GVA_RetCode      l_fcr;
+  gint32             retryCount;
+  gint32             predictedNextFrameNr;
+  gint32             targetFrameNumber;
+  gint32             numProcessors;
+  
+  GVA_fcache_fetch_result fcacheFetchResult;
+  GError *error;
+
+  static gint32 funcId = -1;
+  static gint32 funcIdWait = -1;
+
+  GAP_TIMM_GET_FUNCTION_ID(funcId, "p_stb_render_movie_multiprocessor");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdWait, "p_stb_render_movie_multiprocessor.Wait");
+
+  GAP_TIMM_START_FUNCTION(funcId);
+
+  error = NULL;
+  targetFrameNumber = gfd->localframe_index; /* this framenumber is required now for processing */
+  numProcessors = gap_base_get_numProcessors();
+  
+  fcacheFetchResult.isRgb888Result = FALSE;  /* configure fcache for standard fetch as gimp layer */
+  fcacheFetchResult.rgbBuffer.data = NULL;
+  
+  if(gfd->gapStoryFetchResult != NULL)
+  {
+    fcacheFetchResult.isRgb888Result = gfd->isRgb888Result;
+    fcacheFetchResult.rgbBuffer.data = gfd->gapStoryFetchResult->raw_rgb_data;
+  }
+  
+ 
+ /* split delace value: integer part is deinterlace mode, rest is threshold */
+  p_split_delace_value(gfd->frn_elem->delace
+             , gfd->localframe_tween_rest
+             , &l_deinterlace
+             , &l_threshold
+             );
+
+  predictedNextFrameNr = p_getPredictedNextFramenr(targetFrameNumber, gfd->frn_elem);
+
+  vpre = (VideoPrefetchData *)gfd->frn_elem->gvahand->user_data;
+  if(vpre == NULL)
+  {
+    /* attach VideoPrefetchData for multithread usage as user_data to the GVA handle */
+    vpre = g_new(VideoPrefetchData, 1);
+    vpre->gvahand = gfd->frn_elem->gvahand;
+    vpre->prefetchFrameNumber = predictedNextFrameNr;
+    vpre->targetFrameNumber = targetFrameNumber;
+    vpre->isPrefetchThreadRunning = FALSE;
+    vpre->mutex = p_pooled_g_mutex_new ();
+    vpre->targetFrameReadyCond = g_cond_new ();
+    vpre->prefetchDoneCond = g_cond_new ();
+    vpre->isPlayingBackwards = FALSE;
+    if (gfd->frn_elem->frame_from > gfd->frn_elem->frame_to)
+    {
+      vpre->isPlayingBackwards = TRUE;
+    }
+    gfd->frn_elem->gvahand->user_data = vpre;
+    
+    /* Let the GVA api know about the mutex.
+     * This triggers g_mutex_lock / g_mutex_unlock calls in API internal functions
+     * dealing with the fcache access.
+     */
+    gfd->frn_elem->gvahand->fcache_mutex = vpre->mutex;
+  }
+
+  for(retryCount=0; retryCount < 200; retryCount++)
+  {
+    /* check if targetFrameNumber is available in the GVA API internal framecache
+     * and get a (optional deinterlaced) copy as gimp layer with positive layerId when TRUE.
+     */
+    p_call_GVA_search_fcache_and_get_frame_as_gimp_layer_or_rgb888(gfd->frn_elem->gvahand
+                 , targetFrameNumber
+                 , l_deinterlace
+                 , l_threshold
+                 , numProcessors
+                 , &fcacheFetchResult
+                 , "(multi)"
+                 );
+    if (fcacheFetchResult.isFrameAvailable == TRUE)
+    {
+      /* fcache hit */
+      gboolean triggerMorePrefetch;
+      gint32   fnr;
+      
+      if(gap_debug)
+      {
+        printf("FCACHE-HIT gvahand:%d framenr:%d predictedFrameNr:%d retryCount:%d\n"
+          , (int)vpre->gvahand
+          , (int)targetFrameNumber
+          , (int)predictedNextFrameNr
+          , (int)retryCount
+          );
+      }
+      triggerMorePrefetch = FALSE;
+      if ((vpre->isPrefetchThreadRunning != TRUE)
+      && (predictedNextFrameNr > targetFrameNumber))
+      {
+        for(fnr = targetFrameNumber +1 ; fnr <= predictedNextFrameNr; fnr++)
+        {
+          l_fcr = GVA_search_fcache(gfd->frn_elem->gvahand, fnr);
+          if (l_fcr != GVA_RET_OK)
+          {
+            triggerMorePrefetch = TRUE;
+            break;
+          }
+        }
+      }
+      if (triggerMorePrefetch == TRUE)
+      {
+        if(TRUE == GVA_fcache_mutex_trylock (vpre->gvahand))
+        {
+          predictedNextFrameNr = p_getPredictedNextFramenr(targetFrameNumber +1, gfd->frn_elem);
+          vpre->prefetchFrameNumber = predictedNextFrameNr;
+          vpre->targetFrameNumber = fnr;
+          if(gap_debug)
+          {
+            printf("MORE-PREFETCH PUSH-1 gvahand:%d framenr:%d predictedFrameNr:%d retryCount:%d\n"
+              , (int)vpre->gvahand
+              , (int)fnr
+              , (int)predictedNextFrameNr
+              , (int)retryCount
+              );
+          }
+
+          vpre->isPrefetchThreadRunning = TRUE;
+          /* (re)activate a worker thread for next prefetch that fills fcache upto prefetchFrameNumber */
+          g_thread_pool_push (p_get_PrefetchThreadPool()
+                           , vpre    /* VideoPrefetchData */
+                           , &error
+                           );
+          GVA_fcache_mutex_unlock (vpre->gvahand);
+        }
+      }
+      break;
+    }
+    else
+    {
+      /* frame is NOT (yet) in fcache */
+      GVA_fcache_mutex_lock (vpre->gvahand);
+      
+      if(vpre->isPrefetchThreadRunning != TRUE)
+      {
+        vpre->prefetchFrameNumber = predictedNextFrameNr;
+        vpre->targetFrameNumber = targetFrameNumber;
+
+        if(gap_debug)
+        {
+            printf("TRIGGER PREFETCH PUSH-2 gvahand:%d targetFrameNumber:%d predictedFrameNr:%d retryCount:%d\n"
+              , (int)vpre->gvahand
+              , (int)targetFrameNumber
+              , (int)predictedNextFrameNr
+              , (int)retryCount
+              );
+        }
+ 
+        vpre->isPrefetchThreadRunning = TRUE;
+        /* (re)activate a worker thread for next prefetch that fills fcache upto prefetchFrameNumber */
+        g_thread_pool_push (p_get_PrefetchThreadPool()
+                         , vpre    /* VideoPrefetchData */
+                         , &error
+                         );
+
+      }
+      
+      if(gap_debug)
+      {
+        printf("WAIT  gvahand:%d until prefetch worker thread has fetched target framenr:%d predictedFrameNr:%d retryCount:%d\n"
+          , (int)vpre->gvahand
+          , (int)targetFrameNumber
+          , (int)predictedNextFrameNr
+          , (int)retryCount
+          );
+      }
+
+      GAP_TIMM_START_FUNCTION(funcIdWait);
+
+      /* wait until next frame s fetched 
+       * g_cond_wait Waits until this thread is woken up on targetFrameReadyCond. 
+       * The mutex is unlocked before falling asleep and locked again before resuming. 
+       */ 
+      g_cond_wait (vpre->targetFrameReadyCond, vpre->mutex);
+
+
+      GAP_TIMM_STOP_FUNCTION(funcIdWait);
+
+      if(gap_debug)
+      {
+        printf("WAKE-UP gvahand:%d target framenr:%d predictedFrameNr:%d retryCount:%d\n"
+          , (int)vpre->gvahand
+          , (int)targetFrameNumber
+          , (int)predictedNextFrameNr
+          , (int)retryCount
+          );
+      }
+      GVA_fcache_mutex_unlock (vpre->gvahand);
+  
+      /* retry another attempt to get the frame from the fcache (that shall be filled by the worker thread */
+    }
+  }
+
+  if(gap_debug)
+  {
+    printf("RETRY LOOP gvahand:%d done target framenr:%d predictedFrameNr:%d layerId:%d isRgb888Result:%d retryCount:%d\n"
+      , (int)vpre->gvahand
+      , (int)targetFrameNumber
+      , (int)predictedNextFrameNr
+      , (int)fcacheFetchResult.layer_id
+      , (int)fcacheFetchResult.isRgb888Result
+      , (int)retryCount
+      );
+  }
+
+  if(fcacheFetchResult.isFrameAvailable == TRUE)
+  {
+    p_get_gapStoryFetchResult_from_fcacheFetchResult(gfd, &fcacheFetchResult);
+  }
+  else
+  {
+    if(gfd->gapStoryFetchResult != NULL)
+    {
+      gfd->gapStoryFetchResult->resultEnum = GAP_STORY_FETCH_RESULT_IS_ERROR;
+    }
+    printf("** ERROR Failed to fetch from video:%s gvahand:%d frame:%d predictedFrameNr:%d retryCount:%d\n"
+          , gfd->frn_elem->gvahand->filename
+          , (int)gfd->frn_elem->gvahand
+          , (int)gfd->localframe_index
+          , (int)predictedNextFrameNr
+          , (int)retryCount
+          );
+  }
+
+  GAP_TIMM_STOP_FUNCTION(funcId);
+
+}  /* end p_stb_render_movie_multiprocessor */
+#endif
+
+
+/* -------------------------------------------
+ * p_stb_render_movie (GAP_FRN_MOVIE) 
+ * -------------------------------------------
+ * fetch frame from a videofile (gfd->framename contains the videofile name)
+ * on success the fetched imageId is set in the  gfd->tmp_image_id
+ * on failure gfd->tmp_image_id is set to -1.
+ */
+static void
+p_stb_render_movie(GapStbFetchData *gfd
+  , GapStoryRenderVidHandle *vidhand
+  , gint32 master_frame_nr
+  , gint32  vid_width, gint32  vid_height)
+{
+  gfd->tmp_image_id = -1;
+
+#ifdef GAP_ENABLE_VIDEOAPI_SUPPORT
+
+  p_check_and_open_video_handle(gfd->frn_elem
+                               , vidhand
+                               , master_frame_nr
+                               , gfd->frn_elem->basename /* videofile name */
+                               );
+  if(gfd->frn_elem->gvahand)
+  {
+    if (vidhand->isMultithreadEnabled == TRUE)
+    {
+      p_stb_render_movie_multiprocessor(gfd, vidhand, master_frame_nr, vid_width, vid_height);
+    }
+    else
+    {
+      p_stb_render_movie_single_processor(gfd, vidhand, master_frame_nr, vid_width, vid_height);
+    }
   }
+
 #endif
+
 }  /* end p_stb_render_movie */
 
 
+
 /* -------------------------------------------
  * p_stb_render_section (GAP_FRN_SECTION)
  * -------------------------------------------
@@ -5569,6 +7121,8 @@ p_stb_render_section(GapStbFetchData *gfd
                                            , NULL /* filtrmacro_file */
                                            , &sub_layer_id
                                            , sub_section_name
+                                           , FALSE            /* enable_rgb888_flag */
+                                           , NULL             /* GapStoryFetchResult */
                                            );
 
   if(gap_debug)
@@ -5707,7 +7261,7 @@ p_stb_render_composite_image_postprocessing(GapStbFetchData *gfd
   if ((gimp_image_width(gfd->comp_image_id) != vid_width)
   ||  (gimp_image_height(gfd->comp_image_id) != vid_height) )
   {
-     if(gap_debug) printf("DEBUG: p_story_render_fetch_composite_image_private: scaling tmp image\n");
+     if(gap_debug) printf("DEBUG: p_stb_render_composite_image_postprocessing: scaling tmp image\n");
 
      gimp_image_scale(gfd->comp_image_id, vid_width, vid_height);
   }
@@ -5724,7 +7278,7 @@ p_stb_render_composite_image_postprocessing(GapStbFetchData *gfd
   if((l_nlayers > 1 )
   || (gimp_drawable_has_alpha(gfd->layer_id)))
   {
-     if(gap_debug) printf("DEBUG: p_story_render_fetch_composite_image_private  FINAL flatten Composite image\n");
+     if(gap_debug) printf("DEBUG: p_stb_render_composite_image_postprocessing  FINAL flatten Composite image\n");
 
       /* flatten current frame image (reduce to single layer) */
       gfd->layer_id = gimp_image_flatten (gfd->comp_image_id);
@@ -5815,20 +7369,20 @@ p_paste_logo_pattern(gint32 drawable_id
 } /* end p_copy_and_paste_replacement_pattern */
 
 
-
 /* -------------------------------------
- * p_do_insert_area_processing
+ * p_get_insert_area_filename
  * -------------------------------------
- * add logo area to video clip
+ * get filename for an image that shall
+ * be used as logo for automatically area insertation
+ * on MOVIE clips.
  */
-static void
-p_do_insert_area_processing(GapStbFetchData *gfd
-  , GapStoryRenderVidHandle *vidhand)
+static char*
+p_get_insert_area_filename(GapStbFetchData *gfd
+                      , GapStoryRenderVidHandle *vidhand)
 {
   char *logo_imagename;
   char *videofilename_without_path;
 
-
   videofilename_without_path = gap_lib_build_basename_without_ext(gfd->framename);
 
   if (vidhand->master_insert_area_format_has_framenumber)
@@ -5866,14 +7420,33 @@ p_do_insert_area_processing(GapStbFetchData *gfd
 
   if(gap_debug)
   {
-    printf("p_do_insert_area_processing: format:%s\n video:%s\n logo_imagename:%s\n"
+    printf("p_get_insert_area_filename: format:%s\n video:%s\n logo_imagename:%s\n"
         , vidhand->master_insert_area_format
         , videofilename_without_path
         , logo_imagename
         );
   }
 
+  g_free(videofilename_without_path);
+
+  return(logo_imagename);
+
+}  /* end p_get_insert_area_filename */
 
+
+/* -------------------------------------
+ * p_do_insert_area_processing
+ * -------------------------------------
+ * add logo area to video clip
+ */
+static void
+p_do_insert_area_processing(GapStbFetchData *gfd
+  , GapStoryRenderVidHandle *vidhand)
+{
+  char *logo_imagename;
+
+  logo_imagename = p_get_insert_area_filename(gfd, vidhand);
+  
   if(g_file_test(logo_imagename, G_FILE_TEST_EXISTS))
   {
     gint32 logo_image_id;
@@ -5895,6 +7468,8 @@ p_do_insert_area_processing(GapStbFetchData *gfd
     if(logo_image_id < 0)
     {
       printf("p_do_insert_area_processing: ERROR could not load logo_imagename:%s\n", logo_imagename);
+      
+      g_free(logo_imagename);
       return;
     }
 
@@ -5914,6 +7489,8 @@ p_do_insert_area_processing(GapStbFetchData *gfd
     }
   }
 
+  g_free(logo_imagename);
+
 }  /* end p_do_insert_area_processing */
 
 
@@ -5974,28 +7551,25 @@ p_prepare_GRAY_image(gint32 image_id)
   }
 
   return(l_layer_id);
+  
 } /* end p_prepare_GRAY_image */
 
 
 
-
 /* -------------------------------------
- * p_do_insert_alpha_processing
+ * p_get_insert_alpha_filename
  * -------------------------------------
- * adds alpha channel for videoframes
- * based on an image or series of frames
- * matching the configured format string.
- * (VID_MASTER_INSERT_ALPHA)
- *
+ * get filename for an image that shall
+ * be used as aplha channel for automatically alpha channel
+ * generation on MOVIE clips.
  */
-static void
-p_do_insert_alpha_processing(GapStbFetchData *gfd
-  , GapStoryRenderVidHandle *vidhand)
+static char*
+p_get_insert_alpha_filename(GapStbFetchData *gfd
+                      , GapStoryRenderVidHandle *vidhand)
 {
   char *alpha_imagename;
   char *videofilename_without_path;
 
-
   videofilename_without_path =   gap_lib_build_basename_without_ext(gfd->framename);
 
   if (vidhand->master_insert_alpha_format_has_framenumber)
@@ -6033,12 +7607,35 @@ p_do_insert_alpha_processing(GapStbFetchData *gfd
 
   if(gap_debug)
   {
-    printf("p_do_insert_alpha_processing: format:%s\n video:%s\n alpha_imagename:%s\n"
+    printf("p_get_insert_alpha_filename: format:%s\n video:%s\n alpha_imagename:%s\n"
         , vidhand->master_insert_alpha_format
         , videofilename_without_path
         , alpha_imagename
         );
   }
+  
+  g_free(videofilename_without_path);
+  
+  return(alpha_imagename);
+  
+}  /* end p_get_insert_alpha_filename */
+
+/* -------------------------------------
+ * p_do_insert_alpha_processing
+ * -------------------------------------
+ * adds alpha channel for videoframes
+ * based on an image or series of frames
+ * matching the configured format string.
+ * (VID_MASTER_INSERT_ALPHA)
+ *
+ */
+static void
+p_do_insert_alpha_processing(GapStbFetchData *gfd
+  , GapStoryRenderVidHandle *vidhand)
+{
+  char *alpha_imagename;
+
+  alpha_imagename = p_get_insert_alpha_filename(gfd, vidhand);
 
 
   if(g_file_test(alpha_imagename, G_FILE_TEST_EXISTS))
@@ -6066,6 +7663,7 @@ p_do_insert_alpha_processing(GapStbFetchData *gfd
       printf("p_do_insert_alpha_processing: ERROR could not load alpha_imagename:%s\n"
             , alpha_imagename
             );
+      g_free(alpha_imagename);
       return;
     }
 
@@ -6108,8 +7706,263 @@ p_do_insert_alpha_processing(GapStbFetchData *gfd
     }
   }
 
+  g_free(alpha_imagename);
+
 }  /* end p_do_insert_alpha_processing */
 
+/* --------------------------------------------
+ * p_isFiltermacroActive  
+ * --------------------------------------------
+ */
+static gboolean
+p_isFiltermacroActive(const char *filtermacro_file)
+{
+  if(filtermacro_file)
+  {
+     if(*filtermacro_file != '\0')
+     {
+       return(TRUE);  
+     }
+  }
+  return(FALSE); 
+  
+}  /* end p_isFiltermacroActive */
+
+
+/* --------------------------------------------
+ * p_story_render_bypass_where_possible          rgb888 handling
+ * --------------------------------------------
+ * this procedure checks if the current frame can be directly fetched as rgb888 buffer
+ * form a videoclip without the need to convert to gimp drawable and rendering transitions.
+ * (this can speed up viedeoencoders that can handle rgb888 data)
+ * 
+ * if an rgb888 fetch is possible for the current master_frame_nr
+ * the fetch is performed and TRUE will be returned.
+ * otherwise FALSE is returned.
+ */
+static gboolean
+p_story_render_bypass_where_possible(GapStoryRenderVidHandle *vidhand
+                    , gint32 master_frame_nr  /* starts at 1 */
+                    , gint32  vid_width       /* desired Video Width in pixels */
+                    , gint32  vid_height      /* desired Video Height in pixels */
+                    , gboolean enable_rgb888_flag  /* enable fetch as rgb888 data buffer */
+                    , GapStoryFetchResult      *gapStoryFetchResult
+                 )
+{
+  GapStbFetchData gapStbFetchData;
+  GapStbFetchData *gfd;
+  GapStbFetchData *gfdMovie;
+  gchar           *videofileName;
+  gboolean         isByPassRenderEngine;
+  gint32           l_track;
+  gdouble l_red_f;
+  gdouble l_green_f;
+  gdouble l_blue_f;
+  gdouble l_alpha_f;
+
+  isByPassRenderEngine = FALSE;
+  gfd = &gapStbFetchData;
+  gfdMovie = NULL;
+  videofileName = NULL;
+  
+  if(gap_debug)
+  {
+    printf("p_story_render_bypass_where_possible START master_frame_nr:%d minVidTrack:%d maxVidTrack:%d enable_rgb888_flag:%d\n"
+      ,(int)master_frame_nr
+      ,(int)vidhand->minVidTrack
+      ,(int)vidhand->maxVidTrack
+      ,(int)enable_rgb888_flag
+      );
+  }
+
+  /* check clip rerferences in all tracks.
+   * (bypass is possible in case the track on top the layerstack refers to a fully opaque movie clip
+   * at full image same size without any transitions.
+   * the loop starts at minVidTrack (typically 0) that refers to top of the layerstack position (e.g. foreground)
+   */
+  for(l_track = vidhand->minVidTrack; l_track <= vidhand->maxVidTrack; l_track++)
+  {
+    gfd->framename = p_fetch_framename(vidhand->frn_list
+                 , master_frame_nr /* starts at 1 */
+                 , l_track
+                 , &gfd->frn_type
+                 , &gfd->trak_filtermacro_file
+                 , &gfd->localframe_index   /* used only for ANIMIMAGE, SECTION and Videoframe Number, -1 for all other types */
+                 , &gfd->local_stepcount    /* nth frame within this clip */
+                 , &gfd->localframe_tween_rest  /* non integer part of local position (in case stepsize != 1) */
+                 , &gfd->keep_proportions
+                 , &gfd->fit_width
+                 , &gfd->fit_height
+                 , &l_red_f
+                 , &l_green_f
+                 , &l_blue_f
+                 , &l_alpha_f
+                 , &gfd->rotate        /* output rotateion in degree */
+                 , &gfd->opacity       /* output opacity 0.0 upto 1.0 */
+                 , &gfd->scale_x       /* output 0.0 upto 10.0 where 1.0 is 1:1 */
+                 , &gfd->scale_y       /* output 0.0 upto 10.0 where 1.0 is 1:1 */
+                 , &gfd->move_x        /* output -1.0 upto 1.0 where 0.0 is centered */
+                 , &gfd->move_y        /* output -1.0 upto 1.0 where 0.0 is centered */
+                 , &gfd->frn_elem      /* output selected to the relevant framerange element */
+                 );
+
+    if(gap_debug)
+    {
+      printf("BYPASSCHECK: track:%d master_frame_nr:%d framename:%s "
+             " trak_filtermacroAdr:%d maskNameAdr:%d insert_alpha:%d insert_area:%d\n"
+        ,(int)l_track
+        ,(int)master_frame_nr
+        ,gfd->framename
+        ,(int)gfd->trak_filtermacro_file
+        ,(int)gfd->frn_elem->mask_name
+        ,(int)vidhand->master_insert_alpha_format
+        ,(int)vidhand->master_insert_area_format
+        );
+    }
+
+    
+    if(gfd->framename)
+    {
+      if(gfd->frn_type == GAP_FRN_MOVIE)
+      {
+          if((gfd->opacity == 1.0)
+          && (gfd->rotate  == 0.0)
+          && (gfd->scale_x == 1.0)
+          && (gfd->scale_y == 1.0)
+          && (gfd->move_x  == 0.0)
+          && (gfd->move_y  == 0.0)
+          && (gfd->frn_elem->flip_request == GAP_STB_FLIP_NONE)
+          && (gfd->frn_elem->mask_name == NULL)
+          && (p_isFiltermacroActive(gfd->trak_filtermacro_file) != TRUE)
+          // Fit options are not relevant when clip and master size are exact the same
+          // and no scaling is done.
+          //     && (gfd->fit_width)
+          //     && (gfd->fit_height)
+          //     && (!gfd->keep_proportions)
+          )
+          {
+            gboolean isAutoInsertActive;
+            
+            isAutoInsertActive = FALSE;
+            
+            if(vidhand->master_insert_alpha_format == TRUE)
+            {
+              char *alpha_imagename;
+              alpha_imagename = p_get_insert_alpha_filename(gfd, vidhand);
+
+              if(g_file_test(alpha_imagename, G_FILE_TEST_EXISTS))
+              {
+                isAutoInsertActive = TRUE;;
+              }
+              g_free(alpha_imagename);
+            }
+
+            if((vidhand->master_insert_area_format == TRUE)
+            && (isAutoInsertActive != TRUE))
+            {
+              char *logo_imagename;
+              logo_imagename = p_get_insert_area_filename(gfd, vidhand);
+
+              if(g_file_test(logo_imagename, G_FILE_TEST_EXISTS))
+              {
+                isAutoInsertActive = TRUE;;
+              }
+              g_free(logo_imagename);
+            }
+            
+            if (isAutoInsertActive != TRUE)
+            {
+              gfdMovie = gfd;
+              videofileName = g_strdup(gfd->framename);
+            }
+          }
+        
+      }
+      g_free(gfd->framename);
+    }
+    
+    if(gfdMovie != NULL)
+    {
+      break;
+    }
+    
+    if(gfd->frn_type != GAP_FRN_SILENCE)
+    {
+      if(gfd->opacity != 0.0)
+      {
+        break;
+      }
+    }
+    
+    /* at this point we detected that current layer is fully transparent
+     * therefore we can continue checking the next track (e.g. lower layerstack position)
+     */
+     
+  }       /* end for loop over all video tracks */
+  
+  
+
+
+  if((gfdMovie != NULL)
+  && (videofileName != NULL))
+  {
+     gfdMovie->framename = videofileName;
+     p_check_and_open_video_handle(gfdMovie->frn_elem
+                                  , vidhand
+                                  , master_frame_nr
+                                  , videofileName
+                                  );
+     if(gap_debug)
+     {
+       printf("BYPASSCHECK(2): master_frame_nr:%d framename:%s "
+              " gvahand size:(%d x %d) vid size:(%d x %d)\n"
+         ,(int)master_frame_nr
+         ,videofileName
+         ,(int)gfdMovie->frn_elem->gvahand->width
+         ,(int)gfdMovie->frn_elem->gvahand->height
+         ,(int)vid_width
+         ,(int)vid_height
+         );
+     }
+
+     if(gfdMovie->frn_elem->gvahand)
+     {
+       if((gfdMovie->frn_elem->gvahand->width == vid_width)
+       && (gfdMovie->frn_elem->gvahand->height == vid_height))
+       {
+         /* because there are no transformations and the movie clip is the only active track (e.g. layer)
+          * we can bypass the render processing and deliver the frame data as rgb888 buffer
+          */
+         gfdMovie->gapStoryFetchResult = gapStoryFetchResult;
+         gfdMovie->isRgb888Result      = enable_rgb888_flag;
+         p_stb_render_movie(gfdMovie, vidhand, master_frame_nr, vid_width, vid_height);
+
+         if(gap_debug)
+         {
+           printf("BYPASSCHECK(3): master_frame_nr:%d framename:%s resultEnum:%d\n"
+             ,(int)master_frame_nr
+             ,videofileName
+             ,(int)gapStoryFetchResult->resultEnum
+             );
+         }
+
+         if(gapStoryFetchResult->resultEnum == GAP_STORY_FETCH_RESULT_IS_RAW_RGB888)
+         {
+           isByPassRenderEngine = TRUE;
+         }
+       }
+     }
+     
+  }
+  
+  if(videofileName != NULL)
+  {
+    g_free(videofileName);
+  }
+  
+  return(isByPassRenderEngine);
+  
+}  /* end p_story_render_bypass_where_possible  */
 
 
 /* --------------------------------------------
@@ -6120,45 +7973,71 @@ p_do_insert_alpha_processing(GapStbFetchData *gfd
  * are fetched as layers. The track number is the layerstack index.
  * optional filtermacro processing is done for the separate layers (clip specific filtermacre)
  * and for the composite image (global filtermacro)
+ *
+ * This procedure can deliver the image id of the resulting composite image
+ * as returncode. Optionally the result can be delivered as GapStoryFetchResult struct
+ * (in case gapStoryFetchResult was provided not NULL by the caller)
+ *
+ * The GapStoryFetchResult enables the direct fetch of rgb888 frame data feature.
+ * Therefore the caller must also set enable_rgb888_flag = TRUE to activate that feature.
+ * direct fetch can bypass the render engine for frames in refered video clips
+ * that can be copied 1:1 when no transformations are required to render that frame.
  */
 static gint32
 p_story_render_fetch_composite_image_private(GapStoryRenderVidHandle *vidhand
-                    , gint32 master_frame_nr  /* starts at 1 */
-                    , gint32  vid_width       /* desired Video Width in pixels */
-                    , gint32  vid_height      /* desired Video Height in pixels */
-                    , char *filtermacro_file  /* NULL if no filtermacro is used */
-                    , gint32 *layer_id        /* output: Id of the only layer in the composite image */
-                    , const char *section_name  /* NULL for main section */
+                    , gint32 master_frame_nr       /* starts at 1 */
+                    , gint32  vid_width            /* desired Video Width in pixels */
+                    , gint32  vid_height           /* desired Video Height in pixels */
+                    , char *filtermacro_file       /* NULL if no filtermacro is used */
+                    , gint32 *layer_id             /* output: Id of the only layer in the composite image */
+                    , const char *section_name     /* NULL for main section */
+                    , gboolean enable_rgb888_flag  /* enable fetch as rgb888 data buffer */
+                    , GapStoryFetchResult      *gapStoryFetchResult
                  )
 {
   GapStbFetchData gapStbFetchData;
   GapStbFetchData *gfd;
 
-  gint    l_track;
-  gint32    l_track_min;
-  gint32    l_track_max;
+  gint32  l_track;
 
   gdouble l_red_f;
   gdouble l_green_f;
   gdouble l_blue_f;
   gdouble l_alpha_f;
 
+  static gint32 funcId = -1;
+  static gint32 funcIdDirect = -1;
+  static gint32 funcIdDirectScale = -1;
+  
+  GAP_TIMM_GET_FUNCTION_ID(funcId, "p_story_render_fetch_composite_image_private");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdDirect, "p_story_render_fetch_composite_image_private.Direct");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdDirectScale, "p_story_render_fetch_composite_image_private.Direct.Scale");
+
+  GAP_TIMM_START_FUNCTION(funcId);
+
+
   gfd = &gapStbFetchData;
   gfd->localframe_tween_rest = 0.0;
   gfd->comp_image_id   = -1;
   gfd->tmp_image_id    = -1;
   gfd->layer_id        = -1;
+  gfd->gapStoryFetchResult = NULL;
+  gfd->isRgb888Result      = FALSE;
   *layer_id         = -1;
 
 
 
   if(gap_debug)
   {
-    printf("p_story_render_fetch_composite_image_private START  master_frame_nr:%d  %dx%d vidhand:%d\n"
+    printf("p_story_render_fetch_composite_image_private START  master_frame_nr:%d  %dx%d vidhand:%d"
+           " enable_rgb888_flag:%d gapStoryFetchResultAdr:%d filtermacroAdr:%d\n"
         , (int)master_frame_nr
         , (int)vid_width
         , (int)vid_height
         , (int)vidhand
+        , (int)enable_rgb888_flag
+        , (int)gapStoryFetchResult
+        , (int)filtermacro_file
         );
     if (section_name == NULL)
     {
@@ -6183,13 +8062,43 @@ p_story_render_fetch_composite_image_private(GapStoryRenderVidHandle *vidhand
     }
   }
 
-  p_find_min_max_vid_tracknumbers(vidhand->frn_list, &l_track_min, &l_track_max);
 
+  if((enable_rgb888_flag == TRUE)
+  && (section_name == NULL)
+  && (gapStoryFetchResult != NULL)
+  && (p_isFiltermacroActive(filtermacro_file) != TRUE))
+  {
+    gboolean isByPassRenderEngine;
+    
+    isByPassRenderEngine = p_story_render_bypass_where_possible(vidhand
+                    , master_frame_nr 
+                    , vid_width
+                    , vid_height
+                    , enable_rgb888_flag
+                    , gapStoryFetchResult
+                    );
+    
+    if (isByPassRenderEngine == TRUE)
+    {
+      if(gap_debug)
+      {
+        printf("p_story_render_fetch_composite_image_private: "
+               "isByPassRenderEngine is TRUE (deliver rgb888) enable_rgb888_flag:%d TRUE:%d\n"
+               ,(int)enable_rgb888_flag
+               ,(int)TRUE
+               );
+      }
+      /* the result is availables as rgb888 buffer in the gapStoryFetchResult struct
+       * (no layer/ image was created for for performance reasons)
+       */
+      return (-1);
+    }
+  }
 
   /* reverse order, has the effect, that track 0 is processed as last track
-   * and will be put on top of the layerstack
+   * and will be put on top of the layerstack (e.g. in the foreground)
    */
-  for(l_track = MIN(GAP_STB_MAX_VID_INTERNAL_TRACKS, l_track_max); l_track >= MAX(0, l_track_min); l_track--)
+  for(l_track = vidhand->maxVidTrack; l_track >= vidhand->minVidTrack; l_track--)
   {
     gfd->framename = p_fetch_framename(vidhand->frn_list
                  , master_frame_nr /* starts at 1 */
@@ -6260,13 +8169,16 @@ p_story_render_fetch_composite_image_private(GapStoryRenderVidHandle *vidhand
            }
            if(gfd->tmp_image_id < 0)
            {
-              printf("\n** ERROR fetching master_frame_nr:%d, from framename:%s Current CLIP was:\n"
+              printf("\n** ERROR fetching master_frame_nr:%d, from framename:%s targetFrameNumber:%d Current CLIP was:\n"
                  , (int)master_frame_nr
                  , gfd->framename
+                 , gfd->localframe_index
                  );
               gap_story_render_debug_print_frame_elem(gfd->frn_elem, -1);
               printf("\n** storyboard render processing failed\n");
               g_free(gfd->framename);
+
+              GAP_TIMM_STOP_FUNCTION(funcId);
               return -1;
            }
            gfd->layer_id = p_prepare_RGB_image(gfd->tmp_image_id);
@@ -6316,6 +8228,7 @@ p_story_render_fetch_composite_image_private(GapStoryRenderVidHandle *vidhand
          && (gfd->frn_type != GAP_FRN_ANIMIMAGE)
          )
          {
+           GAP_TIMM_START_FUNCTION(funcIdDirect);
            /* because there are no transformations in the first handled track,
             * we can save time and directly use the loaded tmp image as base for the composite image
             */
@@ -6326,9 +8239,17 @@ p_story_render_fetch_composite_image_private(GapStoryRenderVidHandle *vidhand
            if ((gimp_image_width(gfd->comp_image_id) != vid_width)
            ||  (gimp_image_height(gfd->comp_image_id) != vid_height) )
            {
-              if(gap_debug) printf("DEBUG: p_story_render_fetch_composite_image_private scaling composite image\n");
+              GAP_TIMM_START_FUNCTION(funcIdDirectScale);
+              
+              if(gap_debug)
+              {
+                printf("DEBUG: p_story_render_fetch_composite_image_private scaling composite image\n");
+              }
               gimp_image_scale(gfd->comp_image_id, vid_width, vid_height);
+
+              GAP_TIMM_STOP_FUNCTION(funcIdDirectScale);
            }
+           GAP_TIMM_STOP_FUNCTION(funcIdDirect);
          }
          else
          {
@@ -6395,6 +8316,8 @@ p_story_render_fetch_composite_image_private(GapStoryRenderVidHandle *vidhand
            , (int)*layer_id );
   }
 
+  GAP_TIMM_STOP_FUNCTION(funcId);
+
   return(gfd->comp_image_id);
 
 } /* end p_story_render_fetch_composite_image_private */
diff --git a/gap/gap_story_render_processor.h b/gap/gap_story_render_processor.h
index aa6f022..f9523a7 100644
--- a/gap/gap_story_render_processor.h
+++ b/gap/gap_story_render_processor.h
@@ -90,6 +90,50 @@ typedef struct GapCodecNameElem {
   void  *next;
 }  GapCodecNameElem;
 
+typedef enum GapStoryFetchResultEnum
+{
+    GAP_STORY_FETCH_RESULT_IS_IMAGE              = 1
+,   GAP_STORY_FETCH_RESULT_IS_RAW_RGB888         = 2
+,   GAP_STORY_FETCH_RESULT_IS_COMPRESSED_CHUNK   = 3
+,   GAP_STORY_FETCH_RESULT_IS_ERROR              = -1
+} GapStoryFetchResultEnum;
+
+
+/* The GapStoryFetchResult represents the fetched composite video frame
+ * Depending on the flags dont_recode_flag and enable_rgb888_flag
+ * the delivered frame can have one of the types
+ * that are defined in GapStoryFetchResultEnum.
+ *
+ * Note that the caller of the fetch procedure can already provide
+ * allocated memory for the buffers  raw_rgb_data and video_frame_chunk_data.
+ * (in this case the caler is responsible to allocate the buffers large enough
+ * to hold one uncompressed frame in rgb888 colormodel representation)
+ *
+ * in case raw_rgb_data or video_frame_chunk_data is NULL the buffer is automatically
+ * allocated in correct size when needed. (this is done by the fetch procedures 
+ * GVA_search_fcache_and_get_frame_as_gimp_layer_or_rgb888
+ * gap_story_render_fetch_composite_image_or_chunk )
+ */
+typedef struct GapStoryFetchResult {
+  GapStoryFetchResultEnum resultEnum;
+  
+  /* GAP_STORY_FETCH_RESULT_IS_IMAGE */
+  gint32    layer_id;        /* Id of the only layer in the composite image */
+  gint32    image_id;        /* output: Id of the only layer in the composite image */
+  
+  /* GAP_STORY_FETCH_RESULT_IS_RAW_RGB888 */
+  unsigned  char    *raw_rgb_data;          /* raw data RGB888 at full size height * width * 3 */
+  
+  
+  /* GAP_STORY_FETCH_RESULT_IS_COMPRESSED_CHUNK */
+  gboolean  force_keyframe;                 /* the calling encoder should encode an I-Frame when true */
+  gint32    video_frame_chunk_size;         /* total size of frame (may include a videoformat specific frameheader) */
+  gint32    video_frame_chunk_hdr_size;     /* size of videoformat specific frameheader (0 if has no hdr) */
+  unsigned  char *video_frame_chunk_data    /* copy of the already compressed video frame from source video */
+
+} GapStoryFetchResult;
+
+
 /* --------------------------*/
 /* PROCEDURE DECLARATIONS    */
 /* --------------------------*/
@@ -109,6 +153,33 @@ void     gap_story_render_drop_audio_cache(void);
 
 
 
+/* ----------------------------------------------------
+ * gap_story_render_fetch_composite_image (simple API)
+ * ----------------------------------------------------
+ * fetch composite VIDEO Image at a given master_frame_nr
+ * within a storyboard framerange list.
+ *
+ * the returned image is flattend RGB and scaled to
+ * desired video framesize.
+ *
+ *  it is a merged result of all video tracks,
+ *
+ *  frames at master_frame_nr were loaded
+ *  for all video tracks and added to the composite image
+ *   (track 0 on top, track N on bottom
+ *    of the layerstack)
+ *  opacity, scaling and move (decenter) attributes
+ *  were set to according to current Video Attributes.
+ *
+ * an (optional) filtermacro_file is performed on the
+ * composite image.
+ *
+ * (simple animations without a storyboard file
+ *  are represented by a short storyboard framerange list that has
+ *  just one element entry at track 1).
+ *
+ * return image_id of resulting image and the flattened resulting layer_id
+ */
 gint32   gap_story_render_fetch_composite_image(GapStoryRenderVidHandle *vidhand
                     , gint32 master_frame_nr  /* starts at 1 */
                     , gint32  vid_width       /* desired Video Width in pixels */
@@ -117,7 +188,98 @@ gint32   gap_story_render_fetch_composite_image(GapStoryRenderVidHandle *vidhand
                     , gint32 *layer_id        /* output: Id of the only layer in the composite image */
                  );
 
-gboolean gap_story_render_fetch_composite_image_or_chunk(GapStoryRenderVidHandle *vidhand
+
+
+/* ------------------------------------------------------------------------
+ * gap_story_render_fetch_composite_image_or_buffer_or_chunk (extended API)
+ * ------------------------------------------------------------------------
+ *
+ * fetch composite VIDEO frame at a given master_frame_nr
+ * within a storyboard framerange list.
+ *
+ * on success the result can be delivered in one of those types:
+ *   GAP_STORY_FETCH_RESULT_IS_IMAGE
+ *   GAP_STORY_FETCH_RESULT_IS_RAW_RGB888
+ *   GAP_STORY_FETCH_RESULT_IS_COMPRESSED_CHUNK
+ *
+ * The delivered data type depends on the flags:
+ *   dont_recode_flag
+ *   enable_rgb888_flag
+ *
+ * In case all of those flags are FALSE, the caller can always expect
+ * a gimp image (GAP_STORY_FETCH_RESULT_IS_IMAGE) as result on success.
+ *
+ * Encoders that can handle RGB888 colormdel can set the enable_rgb888_flag
+ *
+ *   If the enable_rgb888_flag is TRUE and the refered frame can be copied
+ *   without render transitions from only one input video clip
+ *   then the render engine is bypassed, and the result will be of type 
+ *   GAP_STORY_FETCH_RESULT_IS_RAW_RGB888 for this frame.
+ *   (this speeds up encoding of simple 1:1 copied video clip frames
+ *   because the converting from rgb88 to gimp drawable and back to rgb88
+ *   can be skipped in this special case)
+ *   
+ *
+ * Encoders that support lossless video cut can set the dont_recode_flag.
+ *
+ *   if the dont_recode_flag is TRUE, the render engine is also bypassed where
+ *   a direct fetch of the (already compressed) Frame chunk from an input videofile
+ *   is possible for the master_frame_nr.
+ *   (in case there are any transitions or mix with other input channels
+ *   or in case the input is not an mpeg encoded video file it is not possible to 
+ *   make a lossless copy of the input frame data)
+ *
+ *   Restriction: current implementation provided lossless cut only for MPEG1 and MPEG2
+ *
+ *
+ * the compressed fetch depends on following conditions:
+ * - dont_recode_flag == TRUE
+ * - there is only 1 videoinput track at this master_frame_nr
+ * - the videodecoder must support a read_video_chunk procedure
+ *   (libmpeg3 has this support, for the libavformat the support is available vie the gap video api)
+ *    TODO: for future releases should also check for the same vcodec_name)
+ * - the videoframe must match 1:1 in size
+ * - there are no transformations (opacity, offsets ....)
+ * - there are no filtermacros to perform on the fetched frame
+ *
+ * check_flags:
+ *   force checks if corresponding bit value is set. Supportet Bit values are:
+ *      GAP_VID_CHCHK_FLAG_SIZE               check if width and height are equal
+ *      GAP_VID_CHCHK_FLAG_MPEG_INTEGRITY     checks for MPEG P an B frames if the sequence of fetched frames
+ *                                                   also includes the refered I frame (before or after the current
+ *                                                   handled frame)
+ *      GAP_VID_CHCHK_FLAG_JPG                check if fetched cunk is a jpeg encoded frame.
+ *                                                  (typical for MPEG I frames)
+ *      GAP_VID_CHCHK_FLAG_VCODEC_NAME        check for a compatible vcodec_name
+ *
+ *
+ * The resulting frame is deliverd into the GapStoryFetchResult struct.
+ *
+ *   Note that the caller of the fetch procedure can already provide
+ *   allocated memory for the buffers  raw_rgb_data and video_frame_chunk_data.
+ *   (in this case the caler is responsible to allocate the buffers large enough
+ *   to hold one uncompressed frame in rgb888 colormodel representation)
+ *
+ *   in case raw_rgb_data or video_frame_chunk_data is NULL the buffer is automatically
+ *   allocated in correct size when needed.
+ */
+void  gap_story_render_fetch_composite_image_or_buffer_or_chunk(GapStoryRenderVidHandle *vidhand
+                    , gint32 master_frame_nr  /* starts at 1 */
+                    , gint32  vid_width       /* desired Video Width in pixels */
+                    , gint32  vid_height      /* desired Video Height in pixels */
+                    , char *filtermacro_file  /* NULL if no filtermacro is used */
+                    , gboolean dont_recode_flag                /* IN: TRUE try to fetch comressed chunk if possible */
+                    , gboolean enable_rgb888_flag              /* IN: TRUE deliver result already converted to rgb buffer */
+                    , GapCodecNameElem *vcodec_list            /* IN: list of video_codec names that are compatible to the calling encoder program */
+                    , gint32 video_frame_chunk_maxsize         /* IN: sizelimit (larger chunks are not fetched) */
+                    , gdouble master_framerate
+                    , gint32  max_master_frame_nr              /* the number of frames that will be encoded in total */
+                    , gint32  check_flags                      /* IN: combination of GAP_VID_CHCHK_FLAG_* flag values */
+                    , GapStoryFetchResult *gapStoryFetchResult
+                 );
+
+//////////////////////
+gboolean gap_story_render_fetch_composite_image_or_chunk(GapStoryRenderVidHandle *vidhand  //// DEPRECATED
                     , gint32 master_frame_nr  /* starts at 1 */
                     , gint32  vid_width       /* desired Video Width in pixels */
                     , gint32  vid_height      /* desired Video Height in pixels */
@@ -139,6 +301,8 @@ gboolean gap_story_render_fetch_composite_image_or_chunk(GapStoryRenderVidHandle
                     , gint32 check_flags                       /* IN: combination of GAP_VID_CHCHK_FLAG_* flag values */
                  );
 
+
+
 GapStoryRenderVidHandle *  gap_story_render_open_vid_handle_from_stb(
                            GapStoryBoard *stb_ptr
                           ,gint32 *frame_count   /* output total frame_count , or 0 on failure */
diff --git a/gap/gap_story_render_types.h b/gap/gap_story_render_types.h
old mode 100644
new mode 100755
index f208374..dc0414f
--- a/gap/gap_story_render_types.h
+++ b/gap/gap_story_render_types.h
@@ -332,7 +332,7 @@ typedef struct GapStoryRenderSection
 
 
 
-typedef struct GapStoryRenderVidHandle
+typedef struct GapStoryRenderVidHandle  /* nick: vidhand */
 {
   GapStoryRenderSection           *section_list;
   GapStoryRenderSection           *parsing_section;
@@ -378,6 +378,13 @@ typedef struct GapStoryRenderVidHandle
 
   gint32     ffetch_user_id;
 
+  gint32     minVidTrack;                /* lowest video track number of all elemnents in the current frn_list */
+  gint32     maxVidTrack;                /* highest video track number of all elemnents in the current frn_list */
+
+  gboolean      isLogResourceUsage;      /* triggers logging of resources (open videohandles an cached images) */
+  gint32        resourceLogInterval;
+  gboolean      isMultithreadEnabled;    /* triggers prefetch of videoframes via thread pool parallel processing */
+  
 } GapStoryRenderVidHandle;  /* used for storyboard processing */
 
 
diff --git a/libgapbase/Makefile.am b/libgapbase/Makefile.am
index 56509e5..ce21654 100644
--- a/libgapbase/Makefile.am
+++ b/libgapbase/Makefile.am
@@ -20,6 +20,8 @@ libgapbase_a_SOURCES = \
 	gap_base.h		\
 	gap_file_util.c		\
 	gap_file_util.h		\
+	gap_timm.c		\
+	gap_timm.h		\
 	gap_val_file.c		\
 	gap_val_file.h
 
diff --git a/libgapbase/gap_base.c b/libgapbase/gap_base.c
index 10ee720..9378a33 100644
--- a/libgapbase/gap_base.c
+++ b/libgapbase/gap_base.c
@@ -45,6 +45,11 @@
 #include <unistd.h>
 #endif
 
+#ifdef GAP_HAVE_PTHREAD
+#include "pthread.h"
+#endif
+
+
 #include <glib/gstdio.h>
 
 /* GIMP includes */
@@ -573,3 +578,198 @@ gap_base_mix_value_exp_and_round(gdouble factor, gdouble a, gdouble b)
 {
   return (ROUND(gap_base_mix_value_exp(factor, a, b)));
 }
+
+
+
+/* ---------------------------------
+ * gap_base_get_numProcessors
+ * ---------------------------------
+ * get number of available processors.
+ * This implementation uses the gimprc parameter of the gimp core application.
+ * Therefore the returned number does not reflect hardware information, but
+ * reprents the number of processors that are configured to be used by th gimp.
+ */
+gint
+gap_base_get_numProcessors()
+{
+  gint      numProcessors;
+
+  numProcessors = gap_base_get_gimprc_int_value("num-processors"
+                               , 1  /* default */
+                               , 1  /* min */
+                               , 32 /* max */
+                               );
+  return (numProcessors);
+}
+
+
+/* ---------------------------------
+ * gap_base_thread_init
+ * ---------------------------------
+ * check if thread support and init thread support if true.
+ * returns TRUE on successful initialisation of thread system
+ *         FALSE in case thread support is not available.
+ * Note: multiple calls are tolarated and shall always deliver the same result. 
+ */
+gboolean 
+gap_base_thread_init()
+{
+  static gboolean isFirstCall = TRUE;
+  gboolean isThreadSupportOk;
+  
+  /* check if thread system is already initialized */
+  if(isFirstCall == TRUE)
+  {
+    if(gap_debug)
+    {
+      printf("gap_base_thread_init: CALLING g_thread_init\n");
+    }
+    /* try to init thread system */
+    g_thread_init(NULL);
+
+    isFirstCall = FALSE;
+  }
+
+  isThreadSupportOk = g_thread_supported();
+
+  if(gap_debug)
+  {
+    printf("gap_base_thread_init: isThreadSupportOk:%d\n"
+      ,(int)isThreadSupportOk
+      );
+  }
+
+  return(isThreadSupportOk);
+}
+
+
+/* ---------------------------------
+ * gap_timm_get_thread_id
+ * ---------------------------------
+ * get id of the current thread.
+ * gthread does not provide that feature.
+ *
+ * therefore use pthread implementation to get the current thread id.
+ * In case pthread is not available at compiletime this procedure
+ * will always return 0 and runtime.
+ */
+gint64
+gap_base_get_thread_id()
+{
+  gint64 threadId = 0;
+  
+//#ifdef HAVE_SYS_TYPES_H
+//  threadId = (gint64)gettid();
+//#endif
+
+#ifdef GAP_HAVE_PTHREAD
+  threadId = pthread_self();
+  if(gap_debug)
+  {
+    printf("pthread_self threadId:%lld\n", threadId);
+  }
+#endif
+
+  return (threadId);
+}
+
+
+/* ---------------------------------
+ * gap_base_get_gimp_mutex
+ * ---------------------------------
+ * get the global mutex that is intended to synchronize calls to gimp core function
+ * from within gap plug-ins when running in multithreaded environment.
+ * the returned mutex is a singleton e.g. is the same static mutex at each call.
+ * therefore the caller must not free the returned mutex.
+ */
+GStaticMutex *
+gap_base_get_gimp_mutex()
+{
+  static GStaticMutex gimpMutex = G_STATIC_MUTEX_INIT;
+  
+  return (&gimpMutex);
+
+}
+
+
+
+/* ---------------------------
+ * gap_base_gimp_mutex_trylock 
+ * ---------------------------
+ * lock the static gimpMutex singleton if present (e.g. is NOT NULL)
+ *
+ * return immediate FALSE in case the mutex is locked by another thread
+ * return TRUE in case the mutex was locked successfully (may sleep until other threads unlock the mutex)
+ *        TRUE will be immediatly returned in case
+ *        the thread system is not initialized, e.g g_thread_init was not yet called
+ */
+gboolean
+gap_base_gimp_mutex_trylock(GapTimmRecord  *gimpMutexStats)
+{
+  gboolean isSuccessful;
+
+  GStaticMutex        *gimpMutex;
+
+  gimpMutex = gap_base_get_gimp_mutex();
+  if(gimpMutex)
+  {
+    GAP_TIMM_START_RECORD(gimpMutexStats);
+
+    isSuccessful = g_static_mutex_trylock (gimpMutex);
+
+    GAP_TIMM_STOP_RECORD(gimpMutexStats);
+  }
+  else
+  {
+    isSuccessful = TRUE;
+  }
+
+  return(isSuccessful);
+
+}  /* end gap_base_gimp_mutex_trylock */
+
+
+/* ---------------------------
+ * gap_base_gimp_mutex_lock 
+ * ---------------------------
+ * lock the static gimpMutex singleton if present (e.g. is NOT NULL)
+ */
+void
+gap_base_gimp_mutex_lock(GapTimmRecord  *gimpMutexStats)
+{
+  GStaticMutex        *gimpMutex;
+
+  gimpMutex = gap_base_get_gimp_mutex();
+  if(gimpMutex)
+  {
+    GAP_TIMM_START_RECORD(gimpMutexStats);
+
+    g_static_mutex_lock (gimpMutex);
+
+    GAP_TIMM_STOP_RECORD(gimpMutexStats);
+  }
+
+}  /* end gap_base_gimp_mutex_lock */
+
+
+/* ---------------------------
+ * gap_base_gimp_mutex_unlock 
+ * ---------------------------
+ * lock the static gimpMutex singleton if present (e.g. is NOT NULL)
+ */
+void
+gap_base_gimp_mutex_unlock(GapTimmRecord  *gimpMutexStats)
+{
+  GStaticMutex        *gimpMutex;
+
+  gimpMutex = gap_base_get_gimp_mutex();
+  if(gimpMutex)
+  {
+    GAP_TIMM_START_RECORD(gimpMutexStats);
+    
+    g_static_mutex_unlock (gimpMutex);
+    
+    GAP_TIMM_STOP_RECORD(gimpMutexStats);
+  }
+
+}  /* end gap_base_gimp_mutex_unlock */
diff --git a/libgapbase/gap_base.h b/libgapbase/gap_base.h
index 9788b04..97e0f3c 100644
--- a/libgapbase/gap_base.h
+++ b/libgapbase/gap_base.h
@@ -32,6 +32,7 @@
 #define _GAP_BASE_H
 
 #include "libgimp/gimp.h"
+#include "gap_timm.h"
 
 /* -----------------------------
  * gap_base_shorten_filename
@@ -202,4 +203,86 @@ gdouble
 gap_base_mix_value_exp_and_round(gdouble factor, gdouble a, gdouble b);
 
 
+/* ---------------------------------
+ * gap_base_get_numProcessors
+ * ---------------------------------
+ * get number of available processors.
+ * This implementation uses the gimprc parameter of the gimp core application.
+ * Therefore the returned number does not reflect hardware information, but
+ * reprents the number of processors that are configured to be used by th gimp.
+ */
+gint
+gap_base_get_numProcessors();
+
+/* ---------------------------------
+ * gap_base_thread_init
+ * ---------------------------------
+ * check if thread support and init thread support if true.
+ * returns TRUE on successful initialisation of thread system
+ *         FALSE in case thread support is not available.
+ * Note: multiple calls are tolarated and shall always deliver the same result. 
+ */
+gboolean 
+gap_base_thread_init();
+
+
+/* ---------------------------------
+ * gap_timm_get_thread_id
+ * ---------------------------------
+ * get id of the current thread.
+ * gthread does not provide that feature.
+ *
+ * therefore use pthread implementation to get the current thread id.
+ * In case pthread is not available at compiletime this procedure
+ * will always return 0 and runtime.
+ */
+gint64
+gap_base_get_thread_id();
+
+/* ---------------------------------
+ * gap_base_get_gimp_mutex
+ * ---------------------------------
+ * get the global mutex that is intended to synchronize calls to gimp core function
+ * from within gap plug-ins when running in multithreaded environment.
+ * the returned mutex is a singleton e.g. is the same static mutex at each call.
+ * therefore the caller must not free the returned mutex.
+ */
+GStaticMutex *
+gap_base_get_gimp_mutex();
+
+/* ---------------------------
+ * gap_base_gimp_mutex_trylock 
+ * ---------------------------
+ * lock the static gimpMutex singleton if present (e.g. is NOT NULL)
+ *
+ * return immediate FALSE in case the mutex is locked by another thread
+ * return TRUE in case the mutex was locked successfully (may sleep until other threads unlock the mutex)
+ *        TRUE will be immediatly returned in case
+ *        the thread system is not initialized, e.g g_thread_init was not yet called
+ */
+gboolean
+gap_base_gimp_mutex_trylock(GapTimmRecord  *gimpMutexStats);
+
+
+/* ---------------------------
+ * gap_base_gimp_mutex_lock 
+ * ---------------------------
+ * lock the static gimpMutex singleton if present (e.g. is NOT NULL)
+ */
+void
+gap_base_gimp_mutex_lock(GapTimmRecord  *gimpMutexStats);
+
+
+/* ---------------------------
+ * gap_base_gimp_mutex_unlock 
+ * ---------------------------
+ * lock the static gimpMutex singleton if present (e.g. is NOT NULL)
+ */
+void
+gap_base_gimp_mutex_unlock(GapTimmRecord  *gimpMutexStats);
+
+
+
+
+
 #endif
diff --git a/libgapbase/gap_libgapbase.h b/libgapbase/gap_libgapbase.h
index 90a818e..6b3d354 100644
--- a/libgapbase/gap_libgapbase.h
+++ b/libgapbase/gap_libgapbase.h
@@ -36,6 +36,7 @@
 #include "gap_val_file.h"
 #include "gap_file_util.h"
 #include "gap_base.h"
+#include "gap_timm.h"
 
 
 /* GAP_BASE_MIX_VALUE  0.0 <= factor <= 1.0
diff --git a/libgapbase/gap_timm.c b/libgapbase/gap_timm.c
new file mode 100755
index 0000000..c3affcc
--- /dev/null
+++ b/libgapbase/gap_timm.c
@@ -0,0 +1,569 @@
+/* gap_timm.c
+ *    by hof (Wolfgang Hofer)
+ *    simple runtime measuring procedures.
+ *    Restrictions: 
+ *     - current implementation does not support measuring of recursive procedure calls
+ *     - measure results in multithread environment may be falsified
+ *       due to wait cycles while locking the mutex.
+ *  2010/10/19
+ *
+ */
+/* The GIMP -- an image manipulation program
+ * Copyright (C) 1995 Spencer Kimball and Peter Mattis
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* revision history:
+ * version 2.7.0;             hof: created
+ */
+
+/* SYTEM (UNIX) includes */
+#include "config.h"
+#include "string.h"
+/* GIMP includes */
+/* GAP includes */
+#include "gap_timm.h"
+#include "gap_base.h"
+
+
+#define GAP_TIMM_MAX_ELEMENTS 500
+#define GAP_TIMM_MAX_THREADS  30
+#define GAP_TIMM_MAX_FUNCNAME 65
+
+typedef struct GapTimmElement
+{
+  gint32    funcId;
+  char      funcName[GAP_TIMM_MAX_FUNCNAME];
+  gint32    maxThreadIdx;
+  gint64    funcThreadId[GAP_TIMM_MAX_THREADS];
+  gboolean  isStartTimeRecorded[GAP_TIMM_MAX_THREADS];
+  GTimeVal  startTime[GAP_TIMM_MAX_THREADS];
+  guint32   numberOfCallsStarted;
+  guint32   numberOfCallsFinished;
+  gboolean  errorFlag;
+  guint64   summaryDuration;
+  guint64   minDuration;
+  guint64   maxDuration;
+  GMutex   *funcMutex;
+ 
+} GapTimmElement;
+
+typedef struct GapTimmData
+{
+  GMutex         *mutex;
+  GapTimmElement *tab;
+  gint32          tabSizeInElements;
+  gint32          maxFuncId;
+  gboolean        isMultiThreadSupport;
+} GapTimmData;
+
+
+
+extern      int gap_debug; /* ==0  ... dont print debug infos */
+
+static GapTimmData timmData =
+{
+  NULL   /* GMutex  *mutex; */
+, NULL   /* GapTimmElement *tab;  */
+, 0      /* gint32          tabSizeInElements; */
+, 0      /* gint32          maxFuncId; */
+
+};
+
+
+static void 
+p_initGapTimmData()
+{
+  if (timmData.tab == NULL)
+  {
+    /* check and init thread system */
+    timmData.isMultiThreadSupport = gap_base_thread_init();
+    
+    if(timmData.isMultiThreadSupport == TRUE)
+    {
+      timmData.mutex = g_mutex_new();
+    }
+    timmData.maxFuncId = -1;
+    timmData.tabSizeInElements = GAP_TIMM_MAX_ELEMENTS;
+    timmData.tab = g_malloc0(timmData.tabSizeInElements * sizeof(GapTimmElement));
+  }
+}
+
+static guint64
+p_timespecDiff(GTimeVal *startTimePtr, GTimeVal *endTimePtr)
+{
+  return ((endTimePtr->tv_sec * G_USEC_PER_SEC) + endTimePtr->tv_usec) -
+           ((startTimePtr->tv_sec * G_USEC_PER_SEC) + startTimePtr->tv_usec);
+}
+
+
+/* ---------------------------------
+ * p_tim_mutex_lock
+ * ---------------------------------
+ * lock the full timm table
+ */
+static void
+p_tim_mutex_lock()
+{
+  if(timmData.mutex)
+  {
+    g_mutex_lock(timmData.mutex);
+  }
+}
+
+/* ---------------------------------
+ * p_tim_mutex_unlock
+ * ---------------------------------
+ * unlock the full timm table
+ */
+static void
+p_tim_mutex_unlock()
+{
+  if(timmData.mutex)
+  {
+    g_mutex_unlock(timmData.mutex);
+  }
+}
+
+/* ---------------------------------
+ * p_get_threadIndex
+ * ---------------------------------
+ * get index for current funcId and current thread.
+ * (tab entries  where 
+ */
+static gint
+p_get_threadIndex(gint32 funcId)
+{
+  gint    idx;
+  gint    firstFreeIdx;
+  gint64  treadId;
+  
+  treadId = gap_base_get_thread_id();
+
+  p_tim_mutex_lock();
+  
+  firstFreeIdx = -1;
+  
+  for(idx = 0; idx <= timmData.tab[funcId].maxThreadIdx; idx++)
+  {
+    if(timmData.tab[funcId].funcThreadId[idx] == treadId)
+    {
+      p_tim_mutex_unlock();
+      return(idx);
+    }
+    if ((firstFreeIdx < 0)
+    && (timmData.tab[funcId].isStartTimeRecorded[idx] == FALSE))
+    {
+      firstFreeIdx = idx;
+    }
+  }
+  
+  if (firstFreeIdx >= 0)
+  {
+    timmData.tab[funcId].funcThreadId[firstFreeIdx] = treadId;
+    p_tim_mutex_unlock();
+    return (firstFreeIdx);
+  
+  }
+  
+  firstFreeIdx = timmData.tab[funcId].maxThreadIdx +1;
+  if(firstFreeIdx < GAP_TIMM_MAX_THREADS)
+  {
+    timmData.tab[funcId].maxThreadIdx = firstFreeIdx;
+    timmData.tab[funcId].isStartTimeRecorded[firstFreeIdx] = FALSE;
+    timmData.tab[funcId].funcThreadId[firstFreeIdx] = treadId;
+    p_tim_mutex_unlock();
+    return (firstFreeIdx);
+  }
+  
+  printf("p_get_threadIndex: funcID:%d  ERROR more than %d parallel threads! (measured runtime results will wrong for this funcId..)\n"
+     ,(int)funcId
+     ,(int)GAP_TIMM_MAX_THREADS
+     );
+     
+  p_tim_mutex_unlock();
+  
+  return (0);
+  
+}  /* end p_get_threadIndex */
+
+
+
+
+/* ---------------------------------
+ * gap_timm_get_function_id
+ * ---------------------------------
+ * returns a unique funcId for the specified function name.
+ * note that this id is
+ * (the returned id may differ for the same functionName 
+ *  when this procedure is called in another session or another process)
+ *
+ */
+gint32
+gap_timm_get_function_id(const char *functionName)
+{
+  gint32 ii;
+
+  
+  p_initGapTimmData();
+
+  if(timmData.mutex)
+  {
+    g_mutex_lock(timmData.mutex);
+  }
+
+  for(ii=0; ii <= timmData.maxFuncId; ii++)
+  {
+    if(strcmp(functionName, timmData.tab[ii].funcName) == 0)
+    {
+      if(timmData.mutex)
+      {
+        g_mutex_unlock(timmData.mutex);
+      }
+      return(timmData.tab[ii].funcId);
+    }
+  }
+  if (timmData.maxFuncId < timmData.tabSizeInElements -1)
+  {
+    /* init element for the new funcId */
+    timmData.tab[ii].funcThreadId[0] = gap_base_get_thread_id();
+    timmData.tab[ii].maxThreadIdx = 0;
+
+    timmData.maxFuncId++;
+    g_snprintf(&timmData.tab[ii].funcName[0], GAP_TIMM_MAX_FUNCNAME -1
+               ,"%s"
+               ,functionName
+            );
+    timmData.tab[ii].isStartTimeRecorded[0] = FALSE;
+    timmData.tab[ii].numberOfCallsStarted = 0;
+    timmData.tab[ii].numberOfCallsFinished = 0;
+    timmData.tab[ii].errorFlag = FALSE;
+    timmData.tab[ii].summaryDuration = 0;
+    timmData.tab[ii].minDuration = 0;
+    timmData.tab[ii].maxDuration = 0;
+    timmData.tab[ii].funcMutex = NULL;
+    if(timmData.isMultiThreadSupport)
+    {
+      timmData.tab[ii].funcMutex = g_mutex_new();
+    }
+  }
+
+  if(timmData.mutex)
+  {
+    g_mutex_unlock(timmData.mutex);
+  }
+  return (timmData.maxFuncId);
+  
+}  /* end gap_timm_get_function_id */
+
+
+/* ---------------------------------
+ * gap_timm_start_function
+ * ---------------------------------
+ * record start time for the function identified by funcId.
+ */
+void
+gap_timm_start_function(gint32 funcId)
+{
+  p_initGapTimmData();
+  if((funcId >= 0) && (funcId<=timmData.maxFuncId))
+  {
+    gint threadIdx;
+    if(timmData.tab[funcId].funcMutex)
+    {
+      g_mutex_lock(timmData.tab[funcId].funcMutex);
+    }
+    
+    
+    threadIdx = p_get_threadIndex(funcId);
+    
+    timmData.tab[funcId].numberOfCallsStarted++;
+    if(timmData.tab[funcId].isStartTimeRecorded[threadIdx])
+    {
+      timmData.tab[funcId].errorFlag = TRUE;
+    }
+    else
+    {
+      timmData.tab[funcId].isStartTimeRecorded[threadIdx] = TRUE;
+    }
+    g_get_current_time(&timmData.tab[funcId].startTime[threadIdx]);
+
+    if(timmData.tab[funcId].funcMutex)
+    {
+      g_mutex_unlock(timmData.tab[funcId].funcMutex);
+    }
+  }
+  else
+  {
+    printf("gap_timm_start_function: ERROR unsupported funcId:%d\n"
+      ,(int)funcId
+      );
+  }
+  
+}  /* end gap_timm_start_function */
+
+
+/* ---------------------------------
+ * gap_timm_stop_function
+ * ---------------------------------
+ * in case a starttime was recorded for the specified funcId
+ * calculate the duration since start time recording
+ * and remove the recorded starttime.
+ *
+ * typically gap_timm_start_function shall be called at begin
+ * of a function to be measured and gap_timm_stop_function
+ * is called after the 'to be measurded' processing was done.
+ *
+ * in case there is no recorded start time
+ * an error message is printed and no calculation is done.
+ */
+void
+gap_timm_stop_function(gint32 funcId)
+{
+  p_initGapTimmData();
+  if((funcId >= 0) && (funcId<=timmData.maxFuncId))
+  {
+    gint threadIdx;
+    if(timmData.tab[funcId].funcMutex)
+    {
+      g_mutex_lock(timmData.tab[funcId].funcMutex);
+    }
+    
+    
+    threadIdx = p_get_threadIndex(funcId);
+    
+    if(timmData.tab[funcId].isStartTimeRecorded[threadIdx])
+    {
+       GTimeVal  stopTime;
+       guint64   duration;
+       
+       g_get_current_time(&stopTime);
+       duration = p_timespecDiff(&timmData.tab[funcId].startTime[threadIdx]
+                                ,&stopTime
+                                );
+       timmData.tab[funcId].summaryDuration += duration;
+       if(duration > timmData.tab[funcId].maxDuration)
+       {
+         timmData.tab[funcId].maxDuration = duration;
+       }
+       if ((duration < timmData.tab[funcId].minDuration)
+       || (timmData.tab[funcId].numberOfCallsFinished == 0))
+       {
+         timmData.tab[funcId].minDuration = duration;
+       }
+       timmData.tab[funcId].numberOfCallsFinished++;
+       timmData.tab[funcId].isStartTimeRecorded[threadIdx] = FALSE;
+    }
+    else
+    {
+      timmData.tab[funcId].errorFlag = TRUE;
+      if(gap_debug)
+      {
+        printf("gap_timm_stop_function: ERROR no startTime was found for funcId:%d threadId:%d threadIdx:%d(%s)\n"
+          ,(int)funcId
+          ,(int)timmData.tab[funcId].funcThreadId[threadIdx]
+          ,(int)threadIdx
+          ,&timmData.tab[funcId].funcName[0]
+          );
+      }
+    }
+    
+    if(timmData.tab[funcId].funcMutex)
+    {
+      g_mutex_unlock(timmData.tab[funcId].funcMutex);
+    }
+  }
+  else
+  {
+    printf("gap_timm_stop_function: ERROR unsupported funcId:%d\n"
+      ,(int)funcId
+      );
+  }
+  
+}  /* end gap_timm_stop_function */
+
+
+/* ---------------------------------
+ * gap_timm_print_statistics
+ * ---------------------------------
+ * print runtime statistics for all recorded funcId's.
+ */
+void
+gap_timm_print_statistics()
+{
+  gint32 ii;
+  
+  p_initGapTimmData();
+  p_tim_mutex_lock();
+  
+  printf("gap_timm_print_statistics runtime recording has %d entries:\n"
+        ,(int)timmData.maxFuncId +1
+        );
+  
+  for(ii=0; ii <= timmData.maxFuncId; ii++)
+  {
+    guint64 avgDuration;
+    avgDuration = 0;
+    if(timmData.tab[ii].numberOfCallsFinished > 0)
+    {
+      avgDuration = timmData.tab[ii].summaryDuration / timmData.tab[ii].numberOfCallsFinished;
+    }
+    
+    printf("id:%03d %-65.65s calls:%06u sum:%llu min:%llu max:%llu avg:%llu"
+      , (int) ii
+      , &timmData.tab[ii].funcName[0]
+      , (int) timmData.tab[ii].numberOfCallsFinished
+      , timmData.tab[ii].summaryDuration
+      , timmData.tab[ii].minDuration
+      , timmData.tab[ii].maxDuration
+      , avgDuration
+      );
+    if(timmData.tab[ii].errorFlag)
+    {
+      printf("(Err)");
+    }
+    if(timmData.tab[ii].numberOfCallsFinished != timmData.tab[ii].numberOfCallsStarted)
+    {
+      printf("(callsStarted:%d)"
+        , (int)timmData.tab[ii].numberOfCallsStarted
+        );
+    }
+    printf(" usecs\n");
+    
+  }
+  fflush(stdout);
+  p_tim_mutex_unlock();
+
+}  /* end gap_timm_print_statistics */
+
+
+
+
+
+
+/// recording features without function id
+/// those functions do not syncronize
+
+/* ---------------------------------
+ * gap_timm_init_record
+ * ---------------------------------
+ * reset the specified record
+ */
+void
+gap_timm_init_record(GapTimmRecord *timmp)
+{
+  if(timmp == NULL)
+  {
+    return;
+  }
+  timmp->numberOfCalls         = 0;
+  timmp->isStartTimeRecorded   = FALSE;
+  timmp->summaryDuration       = 0;
+  timmp->minDuration           = 0;
+  timmp->maxDuration           = 0;
+  g_get_current_time(&timmp->startTime);
+
+}  /* end gap_timm_init_record */
+
+/* ---------------------------------
+ * gap_timm_start_record
+ * ---------------------------------
+ * record start time for the function identified by funcId.
+ */
+void
+gap_timm_start_record(GapTimmRecord *timmp)
+{
+  if(timmp == NULL)
+  {
+    return;
+  }
+  timmp->numberOfCalls++;
+  timmp->isStartTimeRecorded = TRUE;
+  g_get_current_time(&timmp->startTime);
+}  /* end gap_timm_start_record */
+
+
+/* ---------------------------------
+ * gap_timm_stop_record
+ * ---------------------------------
+ * calculate the duration since start time recording
+ */
+void
+gap_timm_stop_record(GapTimmRecord *timmp)
+{
+  GTimeVal  stopTime;
+  guint64   duration;
+
+  if(timmp == NULL)
+  {
+    return;
+  }
+  
+  if(timmp->isStartTimeRecorded)
+  {
+    g_get_current_time(&stopTime);
+    duration = p_timespecDiff(&timmp->startTime, &stopTime);
+    timmp->summaryDuration += duration;
+    if(duration > timmp->maxDuration)
+    {
+      timmp->maxDuration = duration;
+    }
+    if ((duration < timmp->minDuration)
+    || (timmp->numberOfCalls == 0))
+    {
+      timmp->minDuration = duration;
+    }
+    timmp->isStartTimeRecorded = FALSE;
+  }
+  
+}  /* end gap_timm_stop_function */
+
+
+/* ---------------------------------
+ * gap_timm_print_record
+ * ---------------------------------
+ * print runtime statistics for all recorded funcId's.
+ */
+void
+gap_timm_print_record(GapTimmRecord *timmp, const char *functionName)
+{
+  guint64 avgDuration;
+
+  if(timmp == NULL)
+  {
+    return;
+  }
+
+
+  avgDuration = 0;
+  if(timmp->numberOfCalls > 0)
+  {
+    avgDuration = timmp->summaryDuration / timmp->numberOfCalls;
+  }
+    
+  printf("tim: %-65.65s calls:%06u sum:%llu min:%llu max:%llu avg:%llu usecs\n"
+      , functionName
+      , (int) timmp->numberOfCalls
+      , timmp->summaryDuration
+      , timmp->minDuration
+      , timmp->maxDuration
+      , avgDuration
+      );
+
+}  /* end gap_timm_print_record */
+
+
+
diff --git a/libgapbase/gap_timm.h b/libgapbase/gap_timm.h
new file mode 100755
index 0000000..7b1e8e1
--- /dev/null
+++ b/libgapbase/gap_timm.h
@@ -0,0 +1,189 @@
+/* gap_timm.h
+ *    by hof (Wolfgang Hofer)
+ *    runtime measuring procedures
+ *    provides MACROS for runtime recording
+ *       the xx_FUNCTION macros capture runtime results in a static table by functionId and threadId
+ *                       (this requires a mutex that may affect the measured results due to synchronisation)
+ *       the xx_RECORD macros capture values in the buffer provided by the caller (and no mutex locking is done)
+ *
+ *    Note that the timm proecures shall be called via the MACROS
+ *
+ *  2010/10/19
+ *
+ */
+/* The GIMP -- an image manipulation program
+ * Copyright (C) 1995 Spencer Kimball and Peter Mattis
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* revision history:
+ * version 2.7.0;             hof: created
+ */
+
+#ifndef _GAP_TIMM_H
+#define _GAP_TIMM_H
+
+/* SYTEM (UNIX) includes */
+#include <stdio.h>
+#include <stdlib.h>
+
+/* GIMP includes */
+#include "gtk/gtk.h"
+#include "libgimp/gimp.h"
+
+
+typedef struct GapTimmRecord
+{
+  gboolean  isStartTimeRecorded;
+  GTimeVal  startTime;
+  guint32   numberOfCalls;
+  guint64   summaryDuration;
+  guint64   minDuration;
+  guint64   maxDuration;
+ 
+} GapTimmRecord;
+
+
+/* macros to enable runtime recording function calls at compiletime.
+ * in case GAP_RUNTIME_RECORDING_NOLOCK is not defined at compiletime
+ * the macros expand to nothing, e.g the runtime recording function calls
+ * are not compiled at all.
+ */
+#ifdef GAP_RUNTIME_RECORDING_NOLOCK
+
+#define GAP_TIMM_INIT_RECORD(timmp)  gap_timm_init_record(timmp)
+#define GAP_TIMM_START_RECORD(timmp) gap_timm_start_record(timmp)
+#define GAP_TIMM_STOP_RECORD(timmp)  gap_timm_stop_record(timmp)
+#define GAP_TIMM_PRINT_RECORD(timmp, functionName)  gap_timm_print_record(timmp, functionName)
+
+#else
+
+#define GAP_TIMM_INIT_RECORD(timmp)
+#define GAP_TIMM_START_RECORD(timmp)
+#define GAP_TIMM_STOP_RECORD(timmp)
+#define GAP_TIMM_PRINT_RECORD(timmp, functionName)
+
+
+#endif
+
+/* macros to enable runtime recording function calls at compiletime.
+ * in case GAP_RUNTIME_RECORDING_LOCK is not defined at compiletime
+ * the macros expand to nothing, e.g the runtime recording function calls
+ * are not compiled at all.
+ * Note that the xx_FUNCTION macros call g_mutex_lock
+ */
+
+#ifdef GAP_RUNTIME_RECORDING_LOCK
+
+#define GAP_TIMM_GET_FUNCTION_ID(funcId, functionName)  if(funcId < 0) { funcId = gap_timm_get_function_id(functionName); }
+#define GAP_TIMM_START_FUNCTION(funcId)                 gap_timm_start_function(funcId) 
+#define GAP_TIMM_STOP_FUNCTION(funcId)                  gap_timm_stop_function(funcId)
+#define GAP_TIMM_PRINT_FUNCTION_STATISTICS()            gap_timm_print_statistics()
+
+#else
+
+#define GAP_TIMM_GET_FUNCTION_ID(funcId, functionName)
+#define GAP_TIMM_START_FUNCTION(funcId)
+#define GAP_TIMM_STOP_FUNCTION(funcId)
+#define GAP_TIMM_PRINT_FUNCTION_STATISTICS()
+
+#endif
+
+
+
+/* ---------------------------------
+ * gap_timm_get_function_id
+ * ---------------------------------
+ * returns a unique funcId for the specified function name.
+ * note that this id is
+ * (the returned id may differ for the same functionName 
+ *  when this procedure is called in another session or another process)
+ *
+ */
+gint32 gap_timm_get_function_id(const char *functionName);
+
+
+/* ---------------------------------
+ * gap_timm_start_function
+ * ---------------------------------
+ * record start time for the function identified by funcId.
+ */
+void   gap_timm_start_function(gint32 funcId);
+
+
+/* ---------------------------------
+ * gap_timm_stop_function
+ * ---------------------------------
+ * in case a starttime was recorded for the specified funcId
+ * calculate the duration since start time recording
+ * and remove the recorded starttime.
+ *
+ * typically gap_timm_start_function shall be called at begin
+ * of a function to be measured and gap_timm_stop_function
+ * is called after the 'to be measurded' processing was done.
+ *
+ * in case there is no recorded start time
+ * an error message is printed and no calculation is done.
+ */
+void   gap_timm_stop_function(gint32 funcId);
+
+
+/* ---------------------------------
+ * gap_timm_print_statistics
+ * ---------------------------------
+ * print runtime statistics for all recorded funcId's.
+ */
+void   gap_timm_print_statistics();
+
+
+
+/* ---------------------------------
+ * gap_timm_init_record
+ * ---------------------------------
+ * reset the specified record
+ */
+void
+gap_timm_init_record(GapTimmRecord *timmp);
+
+
+/* ---------------------------------
+ * gap_timm_start_record
+ * ---------------------------------
+ * record start time for the function identified by funcId.
+ */
+void
+gap_timm_start_record(GapTimmRecord *timmp);
+
+
+/* ---------------------------------
+ * gap_timm_stop_record
+ * ---------------------------------
+ * calculate the duration since start time recording
+ */
+void
+gap_timm_stop_record(GapTimmRecord *timmp);
+
+
+/* ---------------------------------
+ * gap_timm_print_record
+ * ---------------------------------
+ * print runtime statistics for all recorded funcId's.
+ */
+void
+gap_timm_print_record(GapTimmRecord *timmp, const char *functionName);
+
+
+#endif
diff --git a/libgapvidapi/Makefile.am b/libgapvidapi/Makefile.am
index 9b62368..b5c7283 100644
--- a/libgapvidapi/Makefile.am
+++ b/libgapvidapi/Makefile.am
@@ -32,6 +32,7 @@ EXTRA_DIST = \
 	gap_vid_api_mpeg3toc.c	\
 	gap_vid_api_quicktime.c	\
 	gap_vid_api_util.c	\
+	gap_vid_api_mp_util.c	\
 	gap_vid_api_vidindex.c	\
 	gap_vid_api-intl.h	\
 	example.c
diff --git a/libgapvidapi/gap_vid_api.c b/libgapvidapi/gap_vid_api.c
old mode 100644
new mode 100755
index fc0cee9..ecd4a42
--- a/libgapvidapi/gap_vid_api.c
+++ b/libgapvidapi/gap_vid_api.c
@@ -36,6 +36,7 @@
  * ---------------------------------------------
  */
 
+
 /* API access for GIMP-GAP frame sequences needs no external
  * libraries and is always enabled
  * (there is no configuration parameter for this "decoder" implementation)
@@ -45,6 +46,7 @@
 /* ------------------------------------------------
  * revision history
  *
+ * 2010.11.20     (hof)  added multiprocessor support.
  * 2004.04.25     (hof)  integration into gimp-gap, using config.h
  * 2004.02.28     (hof)  added procedures GVA_frame_to_buffer, GVA_delace_frame
  */
@@ -74,8 +76,24 @@
 extern      int gap_debug; /* ==0  ... dont print debug infos */
 
 #include "gap_vid_api.h"
+#include "gap_base.h"
+
+
+static inline void   gva_delace_mix_rows( gint32 width
+                        , gint32 bpp
+                        , gint32 row_bytewidth
+                        , gint32 mix_threshold   /* 0 <= mix_threshold <= 33554432 (256*256*256*2) */
+                        , const guchar *prev_row
+                        , const guchar *next_row
+                        , guchar *mixed_row
+                        );
+static gint32        gva_delace_calculate_mix_threshold(gdouble threshold);
+static gint32        gva_delace_calculate_interpolate_flag(gint32 deinterlace);
+
 #include "gap_vid_api_util.c"
 #include "gap_vid_api_vidindex.c"
+#include "gap_vid_api_mp_util.c"
+#include "gap_libgapbase.h"
 
 t_GVA_DecoderElem  *GVA_global_decoder_list = NULL;
 
@@ -117,6 +135,7 @@ static t_GVA_Handle *            p_gva_worker_open_read(const char *filename, gi
                                     ,gboolean disable_mmx
                                     );
 
+
 /* ---------------------------
  * GVA_percent_2_frame
  * ---------------------------
@@ -468,8 +487,12 @@ GVA_set_fcache_size(t_GVA_Handle *gvahand
   if ((frames_to_keep_cahed > 0)
   &&  (frames_to_keep_cahed <= GVA_MAX_FCACHE_SIZE))
   {
+      GVA_fcache_mutex_lock (gvahand);
+
       /* re-adjust fcache size as desired by calling program */
       p_build_frame_cache(gvahand, frames_to_keep_cahed);
+
+      GVA_fcache_mutex_unlock (gvahand);
   }
   else
   {
@@ -480,6 +503,54 @@ GVA_set_fcache_size(t_GVA_Handle *gvahand
 }  /* end GVA_set_fcache_size */
 
 
+/* -------------------------------
+ * GVA_get_fcache_size_in_elements
+ * -------------------------------
+ * return internal frame cache allocation size in number of elements (e.g. frames)
+ * note that the fcache is already fully allocated at open time, therefore
+ * the number of actual cached frames may be smaller than the returned number.
+ */
+gint32 
+GVA_get_fcache_size_in_elements(t_GVA_Handle *gvahand)
+{
+  t_GVA_Frame_Cache *fcache;
+  gint32 frame_cache_size;
+  
+  fcache = &gvahand->fcache;
+  frame_cache_size = fcache->frame_cache_size;
+ 
+  return (frame_cache_size);
+
+}  /* end GVA_get_fcache_size_in_elements */
+
+
+/* ------------------------------
+ * GVA_get_fcache_size_in_bytes
+ * ------------------------------
+ * return internal frame cache allocation size in bytes
+ * note that the fcache is already fully allocated at open time, therefore
+ * the returned number is an indicator for memory usage but not for actually cached frames.
+ */
+gint32 
+GVA_get_fcache_size_in_bytes(t_GVA_Handle *gvahand)
+{
+  int wwidth, wheight, bpp;
+  gint32 bytesUsedPerElem;
+  gint32 bytesUsedSummary;
+
+  wwidth  = MAX(gvahand->width, 2);
+  wheight = MAX(gvahand->height, 2);
+  bpp = gvahand->frame_bpp;
+
+  bytesUsedPerElem = (wwidth * wheight * bpp + 4);          /* data size per cache element */
+  bytesUsedPerElem += (sizeof(unsigned char*) * wheight);   /* rowpointers per cache element */
+
+
+  bytesUsedSummary = bytesUsedPerElem * GVA_get_fcache_size_in_elements(gvahand);
+  return(bytesUsedSummary);
+
+}  /* end GVA_get_fcache_size_in_bytes */
+
 
 /* ------------------------------------
  * GVA_search_fcache
@@ -504,14 +575,23 @@ GVA_search_fcache(t_GVA_Handle *gvahand
 {
   t_GVA_Frame_Cache *fcache;
   t_GVA_Frame_Cache_Elem  *fc_ptr;
+  static gint32 funcId = -1;
+  
+  GAP_TIMM_GET_FUNCTION_ID(funcId, "GVA_search_fcache");
 
 
-  if(gap_debug) printf("GVA_search_fcache: search for framenumber: %d\n", (int)framenumber );
+  if(gap_debug)
+  {
+    printf("GVA_search_fcache: search for framenumber: %d\n", (int)framenumber );
+  }
   if(gvahand->fcache.fcache_locked)
   {
     return(GVA_RET_EOF);  /* dont touch the fcache while locked */
   }
 
+  GVA_fcache_mutex_lock (gvahand);
+  GAP_TIMM_START_FUNCTION(funcId);
+
   /* init with framedata of current frame
    * (for the case that framenumber not available in fcache)
    */
@@ -531,6 +611,10 @@ GVA_search_fcache(t_GVA_Handle *gvahand
           gvahand->fc_frame_data = fc_ptr->frame_data;  /* framedata of cached frame */
           gvahand->fc_row_pointers = fc_ptr->row_pointers;
 
+
+          GAP_TIMM_STOP_FUNCTION(funcId);
+          GVA_fcache_mutex_unlock (gvahand);
+
           return(GVA_RET_OK);  /* OK */
         }
       }
@@ -542,15 +626,25 @@ GVA_search_fcache(t_GVA_Handle *gvahand
 
       if(fcache->fc_current == fc_ptr)
       {
+        GAP_TIMM_STOP_FUNCTION(funcId);
+
+        GVA_fcache_mutex_unlock (gvahand);
+
         return (GVA_RET_EOF);  /* STOP, we are back at startpoint of the ringlist */
       }
       if(fc_ptr == NULL)
       {
+        GAP_TIMM_STOP_FUNCTION(funcId);
+        GVA_fcache_mutex_unlock (gvahand);
+
         return (GVA_RET_ERROR);  /* internal error, ringlist is broken */
       }
     }
   }
 
+  GAP_TIMM_STOP_FUNCTION(funcId);
+  GVA_fcache_mutex_unlock (gvahand);
+
   /* ringlist not found */
   return (GVA_RET_ERROR);
 
@@ -592,6 +686,8 @@ GVA_search_fcache_by_index(t_GVA_Handle *gvahand
     return(GVA_RET_EOF);  /* dont touch the fcache while locked */
   }
 
+  GVA_fcache_mutex_lock (gvahand);
+
   /* init with framedata of current frame
    * (for the case that framenumber not available in fcache)
    */
@@ -612,13 +708,24 @@ GVA_search_fcache_by_index(t_GVA_Handle *gvahand
         *framenumber = fc_ptr->framenumber;
         if(fc_ptr->framenumber < 0)
         {
-          if(gap_debug) printf("GVA_search_fcache_by_index: INDEX: %d  NOT FOUND (fnum < 0) ###########\n", (int)index );
+          if(gap_debug)
+          {
+            printf("GVA_search_fcache_by_index: INDEX: %d  NOT FOUND (fnum < 0) ###########\n", (int)index );
+          }
+
+          GVA_fcache_mutex_unlock (gvahand);
+
           return (GVA_RET_EOF);
         }
         gvahand->fc_frame_data = fc_ptr->frame_data;  /* framedata of cached frame */
         gvahand->fc_row_pointers = fc_ptr->row_pointers;
 
-        if(gap_debug) printf("GVA_search_fcache_by_index: fnum; %d INDEX: %d  FOUND ;;;;;;;;;;;;;;;;;;\n", (int)*framenumber, (int)index );
+        if(gap_debug)
+        {
+          printf("GVA_search_fcache_by_index: fnum; %d INDEX: %d  FOUND ;;;;;;;;;;;;;;;;;;\n", (int)*framenumber, (int)index );
+        }
+        GVA_fcache_mutex_unlock (gvahand);
+
         return(GVA_RET_OK);  /* OK */
       }
 
@@ -629,16 +736,25 @@ GVA_search_fcache_by_index(t_GVA_Handle *gvahand
 
       if(fcache->fc_current == fc_ptr)
       {
-        if(gap_debug) printf("GVA_search_fcache_by_index: INDEX: %d  NOT FOUND (ring done) ************\n", (int)index );
+        if(gap_debug)
+        {
+          printf("GVA_search_fcache_by_index: INDEX: %d  NOT FOUND (ring done) ************\n", (int)index );
+        }
+        GVA_fcache_mutex_unlock (gvahand);
+
         return (GVA_RET_EOF);  /* STOP, we are back at startpoint of the ringlist */
       }
       if(fc_ptr == NULL)
       {
+        GVA_fcache_mutex_unlock (gvahand);
+
         return (GVA_RET_ERROR);  /* internal error, ringlist is broken */
       }
     }
   }
 
+  GVA_fcache_mutex_unlock (gvahand);
+
   /* ringlist not found */
   return (GVA_RET_ERROR);
 
@@ -646,6 +762,739 @@ GVA_search_fcache_by_index(t_GVA_Handle *gvahand
 
 
 
+/* ---------------------------------
+ * p_copyRgbBufferToPixelRegion
+ * ---------------------------------
+ * tile based copy from rgbBuffer to PixelRegion 
+ */
+static inline void
+p_copyRgbBufferToPixelRegion (const GimpPixelRgn *dstPR
+                    ,const GVA_RgbPixelBuffer *srcBuff)
+{
+  guint    row;
+  guchar*  src;
+  guchar*  dest;
+   
+  dest  = dstPR->data;
+  src = srcBuff->data 
+       + (dstPR->y * srcBuff->rowstride)
+       + (dstPR->x * srcBuff->bpp);
+
+  for (row = 0; row < dstPR->h; row++)
+  {
+     memcpy(dest, src, dstPR->rowstride);
+     src  += srcBuff->rowstride;
+     dest += dstPR->rowstride;
+  }
+  
+}  /* end p_copyRgbBufferToPixelRegion */
+
+/* -------------------------------------------------------
+ * GVA_search_fcache_and_get_frame_as_gimp_layer_or_rgb888    procedure instrumented for PERFTEST
+ * -------------------------------------------------------
+ * search the frame cache for given framenumber
+ * and deliver result into the specified GVA_fcache_fetch_result struct.
+ * The attribute isRgb888Result in the GVA_fcache_fetch_result struct
+ * controls the type of result where 
+ *    isRgb888Result == TRUE   will deliver a uchar buffer (optional deinterlaced) 
+ *                             that is copy of the fcache in RGB888 colormodel
+ *
+ *    isRgb888Result == FALSE  will deliver a gimp layer
+ *                             that is copy of the fcache (optional deinterlaced)
+ *                             and is the only layer in a newly created gimp image ( without display attached).
+ *
+ *
+ * fetchResult->isFrameAvailable == FALSE  indicates that the required framenumber
+ * was not available in the fcache.
+ *
+ * This procedure also set fcache internal rowpointers.
+ *  gvahand->fc_frame_data
+ *  gvahand->fc_row_pointers
+ *
+ *
+ * Notes:
+ * o)  this procedure does not set image aspect for performance reasons.
+ *     in case aspect is required the calling programm has to perform
+ *     the additional call like this:
+ *       GVA_search_fcache_and_get_frame_as_gimp_layer(gvahand, ....);
+ *       GVA_image_set_aspect(gvahand, gimp_drawable_get_image(fetchResult->layer_id));
+ */
+void
+GVA_search_fcache_and_get_frame_as_gimp_layer_or_rgb888(t_GVA_Handle *gvahand
+                 , gint32   framenumber
+                 , gint32   deinterlace
+                 , gdouble  threshold
+                 , gint32   numProcessors
+                 , GVA_fcache_fetch_result *fetchResult
+                 )
+{
+  t_GVA_Frame_Cache *fcache;
+  t_GVA_Frame_Cache_Elem  *fc_ptr;
+  gint32                   l_threshold;
+  gint32                   l_mix_threshold;
+
+  static gint32 funcId = -1;
+  static gint32 funcIdMemcpy = -1;
+  static gint32 funcIdToDrawableRect = -1;
+  static gint32 funcIdToDrawableTile = -1;
+  static gint32 funcIdDrawableFlush = -1;
+  static gint32 funcIdDrawableDetach = -1;
+  
+  GAP_TIMM_GET_FUNCTION_ID(funcId, "GVA_search_fcache_and_get_frame");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdMemcpy, "GVA_search_fcache_and_get_frame.memcpy");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdToDrawableRect, "GVA_search_fcache_and_get_frame.toDrawable (rgn_set_rect)");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdToDrawableTile, "GVA_search_fcache_and_get_frame.toDrawable (tilebased)");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdDrawableFlush, "GVA_search_fcache_and_get_frame.gimp_drawable_flush");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdDrawableDetach, "GVA_search_fcache_and_get_frame.gimp_drawable_detach");
+
+  fetchResult->isFrameAvailable = FALSE;
+  fetchResult->layer_id = -1;
+  fetchResult->image_id = -1;
+
+  if(gap_debug)
+  {
+    printf("GVA_search_fcache_and_get_frame_as_gimp_layer: search for framenumber: %d\n", (int)framenumber );
+  }
+  if(gvahand->fcache.fcache_locked)
+  {
+    return;  /* dont touch the fcache while locked */
+  }
+
+  GAP_TIMM_START_FUNCTION(funcId);
+
+
+  /* expand threshold range from 0.0-1.0  to 0 - MIX_MAX_THRESHOLD */
+  threshold = CLAMP(threshold, 0.0, 1.0);
+  l_threshold = (gdouble)MIX_MAX_THRESHOLD * (threshold * threshold * threshold);
+  l_mix_threshold = CLAMP((gint32)l_threshold, 0, MIX_MAX_THRESHOLD);
+
+  GVA_fcache_mutex_lock (gvahand);
+
+
+  /* init with framedata of current frame
+   * (for the case that framenumber not available in fcache)
+   */
+  gvahand->fc_frame_data = gvahand->frame_data;
+  gvahand->fc_row_pointers = gvahand->row_pointers;
+
+  fcache = &gvahand->fcache;
+  if(fcache->fc_current)
+  {
+    fc_ptr = (t_GVA_Frame_Cache_Elem  *)fcache->fc_current;
+    while(1 == 1)
+    {
+      if(framenumber == fc_ptr->framenumber)
+      {
+        if(fc_ptr->framenumber >= 0)
+        {
+          /* FCACHE HIT */
+          static gboolean           isPerftestInitialized = FALSE;          
+          static gboolean           isPerftestApiTilesDefault;
+          static gboolean           isPerftestApiTiles;          /* copy tile-by-tile versus gimp_pixel_rgn_set_rect all at once */
+          static gboolean           isPerftestApiMemcpyMP;       /* memcopy versus multithreade memcopy in rowStipres */
+
+          GVA_RgbPixelBuffer  rgbBufferLocal;
+          GVA_RgbPixelBuffer *rgbBuffer;
+          guchar            *frameData;
+          GimpDrawable      *drawable;
+          GimpPixelRgn       pixel_rgn;
+          gboolean           isEarlyUnlockPossible;
+          
+          
+          gvahand->fc_frame_data = fc_ptr->frame_data;  /* framedata of cached frame */
+          gvahand->fc_row_pointers = fc_ptr->row_pointers;
+          
+          
+          if(gvahand->frame_bpp != 3)
+          {
+            /* force fetch as drawable in case video data is not of type rgb888
+             */
+            fetchResult->isRgb888Result = FALSE;
+          }
+          
+          if (fetchResult->isRgb888Result == TRUE)
+          {
+            rgbBuffer = &fetchResult->rgbBuffer;
+          }
+          else
+          {
+            /* in case fetch result is gimp layer, use a local buffer for delace purpose */
+            rgbBuffer = &rgbBufferLocal;
+            rgbBuffer->data = NULL;
+          }
+          
+          rgbBuffer->width = gvahand->width;
+          rgbBuffer->height = gvahand->height;
+          rgbBuffer->bpp = gvahand->frame_bpp;
+          rgbBuffer->rowstride = gvahand->width * gvahand->frame_bpp;     /* bytes per pixel row */
+          rgbBuffer->deinterlace = deinterlace;
+          rgbBuffer->threshold = threshold;
+          frameData = NULL;
+          isEarlyUnlockPossible = TRUE;
+          
+          /* PERFTEST configuration values to test performance of various strategies on multiprocessor machines */
+          if (isPerftestInitialized != TRUE)
+          {
+            isPerftestInitialized = TRUE;
+
+            if(numProcessors > 1)
+            {
+              /* copy full size as one big rectangle gives the gimp core the chance to process with more than one thread */
+              isPerftestApiTilesDefault = FALSE;
+            }
+            else
+            {
+              /* tile based copy was a little bit faster in tests where gimp-core used only one CPU */
+              isPerftestApiTilesDefault = TRUE;
+            }
+            isPerftestApiTiles = gap_base_get_gimprc_gboolean_value("isPerftestApiTiles", isPerftestApiTilesDefault);
+            isPerftestApiMemcpyMP = gap_base_get_gimprc_gboolean_value("isPerftestApiMemcpyMP", TRUE);
+          }
+          
+          
+          if (deinterlace != 0)
+          {
+            if(rgbBuffer->data == NULL)
+            {
+              rgbBuffer->data = g_malloc(rgbBuffer->rowstride * rgbBuffer->height);
+              if(fetchResult->isRgb888Result != TRUE)
+              {
+                frameData = rgbBuffer->data;  /* frameData will be freed after convert to drawable */
+              }
+            }
+            GVA_copy_or_deinterlace_fcache_data_to_rgbBuffer(rgbBuffer
+                                                          , gvahand->fc_frame_data
+                                                          , numProcessors
+                                                          );
+          }
+          else
+          {
+            if (fetchResult->isRgb888Result == TRUE)
+            {
+              /* it is required to make an 1:1 copy of the rgb888 fcache data 
+               * to the rgbBuffer.
+               * allocate the buffer in case the caller has supplied just a NULL data pointer.
+               * otherwise use the supplied buffer.
+               */
+              if(rgbBuffer->data == NULL)
+              {
+                rgbBuffer->data = g_malloc(rgbBuffer->rowstride * rgbBuffer->height);
+              }
+
+              if(isPerftestApiMemcpyMP)
+              {
+                GVA_copy_or_deinterlace_fcache_data_to_rgbBuffer(rgbBuffer
+                                                          , gvahand->fc_frame_data
+                                                          , numProcessors
+                                                          );
+              }
+              else
+              {
+                GAP_TIMM_START_FUNCTION(funcIdMemcpy);
+                
+                memcpy(rgbBuffer->data, gvahand->fc_frame_data, (rgbBuffer->rowstride * rgbBuffer->height));
+                
+                GAP_TIMM_STOP_FUNCTION(funcIdMemcpy);
+              }
+            }
+            else
+            {
+              /* setup rgbBuffer->data to point direct to the fcache frame data
+               * No additional frameData buffer is allocated in this case and no extra memcpy is required,
+               * but the fcache mutex must stay in locked state until data is completely transfered
+               * to the drawable.
+               */
+              rgbBuffer->data = gvahand->fc_frame_data;
+              isEarlyUnlockPossible = FALSE;
+            }
+          }
+          
+          
+          
+          if(isEarlyUnlockPossible == TRUE)
+          {
+           /* at this point the frame data is already copied to the
+            * rgbBuffer or there is no need to convert to drawable at all.
+            * therefore we can already unlock the mutex
+            * so that other threads already can continue using the fcache
+            */
+            GVA_fcache_mutex_unlock (gvahand);
+          }
+          
+          if (fetchResult->isRgb888Result == TRUE)
+          {
+            fetchResult->isFrameAvailable = TRUE; /* OK frame available in fcache and was copied to rgbBuffer */
+            GAP_TIMM_STOP_FUNCTION(funcId);
+            return;
+          }
+          
+          fetchResult->image_id = gimp_image_new (rgbBuffer->width, rgbBuffer->height, GIMP_RGB);
+          if (gimp_image_undo_is_enabled(fetchResult->image_id))
+          {
+            gimp_image_undo_disable(fetchResult->image_id);
+          }
+          
+          if(rgbBuffer->bpp == 4)
+          {
+            fetchResult->layer_id = gimp_layer_new (fetchResult->image_id
+                                            , "layername"
+                                            , rgbBuffer->width
+                                            , rgbBuffer->height
+                                            , GIMP_RGBA_IMAGE
+                                            , 100.0, GIMP_NORMAL_MODE);
+          }
+          else
+          {
+            fetchResult->layer_id = gimp_layer_new (fetchResult->image_id
+                                            , "layername"
+                                            , rgbBuffer->width
+                                            , rgbBuffer->height
+                                            , GIMP_RGB_IMAGE
+                                            , 100.0, GIMP_NORMAL_MODE);
+          }
+
+          drawable = gimp_drawable_get (fetchResult->layer_id);
+          
+          
+          if(isPerftestApiTiles)
+          {
+            gpointer pr;
+            GAP_TIMM_START_FUNCTION(funcIdToDrawableTile);
+
+            gimp_pixel_rgn_init (&pixel_rgn, drawable, 0, 0
+                           , drawable->width, drawable->height
+                           , TRUE      /* dirty */
+                           , FALSE     /* shadow */
+                           );
+
+            for (pr = gimp_pixel_rgns_register (1, &pixel_rgn);
+                 pr != NULL;
+                 pr = gimp_pixel_rgns_process (pr))
+            {
+              p_copyRgbBufferToPixelRegion (&pixel_rgn, rgbBuffer);
+            }
+
+            GAP_TIMM_STOP_FUNCTION(funcIdToDrawableTile);
+          }
+          else
+          {
+            GAP_TIMM_START_FUNCTION(funcIdToDrawableRect);
+
+            gimp_pixel_rgn_init (&pixel_rgn, drawable, 0, 0
+                           , drawable->width, drawable->height
+                           , TRUE      /* dirty */
+                           , FALSE     /* shadow */
+                           );
+            gimp_pixel_rgn_set_rect (&pixel_rgn, rgbBuffer->data
+                           , 0
+                           , 0
+                           , drawable->width
+                           , drawable->height
+                           );
+
+            GAP_TIMM_STOP_FUNCTION(funcIdToDrawableRect);
+          }
+          
+          if(isEarlyUnlockPossible != TRUE)
+          {
+            /* isEarlyUnlockPossible == FALSE indicates the case where the image was directly filled from fcache
+             * in this scenario the mutex must be unlocked at this later time 
+             * after the fcache data is already transfered to the drawable
+             */
+             GVA_fcache_mutex_unlock (gvahand);
+          }
+
+
+          GAP_TIMM_START_FUNCTION(funcIdDrawableFlush);
+          gimp_drawable_flush (drawable);
+          GAP_TIMM_STOP_FUNCTION(funcIdDrawableFlush);
+
+          GAP_TIMM_START_FUNCTION(funcIdDrawableDetach);
+          gimp_drawable_detach(drawable);
+          GAP_TIMM_STOP_FUNCTION(funcIdDrawableDetach);
+
+          /*
+           * gimp_drawable_merge_shadow (drawable->id, TRUE);
+           */
+
+          /* add new layer on top of the layerstack */
+          gimp_image_add_layer (fetchResult->image_id, fetchResult->layer_id, 0);
+          gimp_drawable_set_visible(fetchResult->layer_id, TRUE);
+
+          /* clear undo stack */
+          if (gimp_image_undo_is_enabled(fetchResult->image_id))
+          {
+            gimp_image_undo_disable(fetchResult->image_id);
+          }
+
+          if(frameData != NULL)
+          {
+            g_free(frameData);
+            frameData = NULL;
+          }
+
+          fetchResult->isFrameAvailable = TRUE; /* OK  frame available in fcache and was converted to drawable */
+          GAP_TIMM_STOP_FUNCTION(funcId);
+          return;
+        }
+      }
+
+      /* try to get framedata from fcache ringlist,
+       * by stepping backwards the frames that were read before
+       */
+      fc_ptr = (t_GVA_Frame_Cache_Elem  *)fc_ptr->prev;
+
+      if(fcache->fc_current == fc_ptr)
+      {
+        break;  /* STOP, we are back at startpoint of the ringlist */
+      }
+      if(fc_ptr == NULL)
+      {
+        printf("** ERROR in GVA_search_fcache_and_get_frame_as_gimp_layer ringlist broken \n");
+        break;  /* internal error, ringlist is broken */
+      }
+    }
+  }
+
+  GVA_fcache_mutex_unlock (gvahand);
+  GAP_TIMM_STOP_FUNCTION(funcId);
+
+
+}  /* end GVA_search_fcache_and_get_frame_as_gimp_layer_or_rgb888 */
+
+
+// /* ---------------------------------------------
+//  * GVA_search_fcache_and_get_frame_as_gimp_layer    procedure instrumented for PERFTEST
+//  * ---------------------------------------------
+//  * search the frame cache for given framenumber
+//  * and return a copy (or deinterlaced copy) as gimp layer (in a newly created image)
+//  * in case framenumber was NOT found in the fcache return -1
+//  *
+//  * This procedure also set fcache internal rowpointers.
+//  *  gvahand->fc_frame_data
+//  *  gvahand->fc_row_pointers
+//  *
+//  *
+//  * RETURN: layerId (a positive integer)
+//  *         -1 if framenumber not found in fcache, or errors occured
+//  *
+//  * Notes:
+//  * o)  this procedure does not set image aspect for performance reasons.
+//  *     in case aspect is required the calling programm has to perform
+//  *     the additional call like this:
+//  *       layer_id = GVA_search_fcache_and_get_frame_as_gimp_layer(gvahand, ....);
+//  *       GVA_image_set_aspect(gvahand, gimp_drawable_get_image(layer_id));
+//  */
+// gint32
+// GVA_search_fcache_and_get_frame_as_gimp_layer(t_GVA_Handle *gvahand
+//                  , gint32 framenumber
+//                  , gint32   deinterlace
+//                  , gdouble  threshold
+//                  , gint32   numProcessors
+//                  )
+// {
+//   t_GVA_Frame_Cache *fcache;
+//   t_GVA_Frame_Cache_Elem  *fc_ptr;
+//   gint32                   l_threshold;
+//   gint32                   l_mix_threshold;
+//   gint32                   l_new_layer_id;
+// 
+//   static gint32 funcId = -1;
+//   static gint32 funcIdMemcpy = -1;
+//   static gint32 funcIdToDrawableRect = -1;
+//   static gint32 funcIdToDrawableTile = -1;
+//   static gint32 funcIdDrawableFlush = -1;
+//   static gint32 funcIdDrawableDetach = -1;
+//   
+//   GAP_TIMM_GET_FUNCTION_ID(funcId, "GVA_search_fcache_and_get_frame");
+//   GAP_TIMM_GET_FUNCTION_ID(funcIdMemcpy, "GVA_search_fcache_and_get_frame.memcpy");
+//   GAP_TIMM_GET_FUNCTION_ID(funcIdToDrawableRect, "GVA_search_fcache_and_get_frame.toDrawable (rgn_set_rect)");
+//   GAP_TIMM_GET_FUNCTION_ID(funcIdToDrawableTile, "GVA_search_fcache_and_get_frame.toDrawable (tilebased)");
+//   GAP_TIMM_GET_FUNCTION_ID(funcIdDrawableFlush, "GVA_search_fcache_and_get_frame.gimp_drawable_flush");
+//   GAP_TIMM_GET_FUNCTION_ID(funcIdDrawableDetach, "GVA_search_fcache_and_get_frame.gimp_drawable_detach");
+// 
+//   l_new_layer_id = -1;
+// 
+//   if(gap_debug)
+//   {
+//     printf("GVA_search_fcache_and_get_frame_as_gimp_layer: search for framenumber: %d\n", (int)framenumber );
+//   }
+//   if(gvahand->fcache.fcache_locked)
+//   {
+//     return(l_new_layer_id);  /* dont touch the fcache while locked */
+//   }
+// 
+//   GAP_TIMM_START_FUNCTION(funcId);
+// 
+// 
+//   /* expand threshold range from 0.0-1.0  to 0 - MIX_MAX_THRESHOLD */
+//   threshold = CLAMP(threshold, 0.0, 1.0);
+//   l_threshold = (gdouble)MIX_MAX_THRESHOLD * (threshold * threshold * threshold);
+//   l_mix_threshold = CLAMP((gint32)l_threshold, 0, MIX_MAX_THRESHOLD);
+// 
+//   GVA_fcache_mutex_lock (gvahand);
+// 
+// 
+//   /* init with framedata of current frame
+//    * (for the case that framenumber not available in fcache)
+//    */
+//   gvahand->fc_frame_data = gvahand->frame_data;
+//   gvahand->fc_row_pointers = gvahand->row_pointers;
+// 
+//   fcache = &gvahand->fcache;
+//   if(fcache->fc_current)
+//   {
+//     fc_ptr = (t_GVA_Frame_Cache_Elem  *)fcache->fc_current;
+//     while(1 == 1)
+//     {
+//       if(framenumber == fc_ptr->framenumber)
+//       {
+//         if(fc_ptr->framenumber >= 0)
+//         {
+//           /* FCACHE HIT */
+//           static gboolean           isPerftestInitialized = FALSE;          
+//           static gboolean           isPerftestApiTilesDefault;
+//           static gboolean           isPerftestApiTiles;          /* copy tile-by-tile versus gimp_pixel_rgn_set_rect all at once */
+//           static gboolean           isPerftestApiMemcpyMP;       /* memcopy versus multithreade memcopy in rowStipres */
+//           static gboolean           isPerftestEarlyFcacheUnlock; /* TRUE: 1:1 copy the fcache to frameData buffer even if delace not required 
+//                                                            *       to enable early unlock of the fcache mutex.
+//                                                            *       (prefetch thread in the storyboard processor
+//                                                            *        may benefit from the early unlock
+//                                                            *        and have a chance for better overall performance)
+//                                                            */
+// 
+//           GVA_RgbPixelBuffer rgbBuffer;
+//           guchar            *frameData;
+//           gint32             image_id;
+//           GimpDrawable      *drawable;
+//           GimpPixelRgn       pixel_rgn;
+//           
+//           
+//           gvahand->fc_frame_data = fc_ptr->frame_data;  /* framedata of cached frame */
+//           gvahand->fc_row_pointers = fc_ptr->row_pointers;
+//           
+//           rgbBuffer.width = gvahand->width;
+//           rgbBuffer.height = gvahand->height;
+//           rgbBuffer.bpp = gvahand->frame_bpp;
+//           rgbBuffer.rowstride = gvahand->width * gvahand->frame_bpp;     /* bytes per pixel row */
+//           rgbBuffer.deinterlace = deinterlace;
+//           rgbBuffer.threshold = threshold;
+//           frameData = NULL;
+//           
+//           /* PERFTEST configuration values to test performance of various strategies on multiprocessor machines */
+//           if (isPerftestInitialized != TRUE)
+//           {
+//             isPerftestInitialized = TRUE;
+// 
+//             if(numProcessors > 1)
+//             {
+//               /* copy full size as one big rectangle gives the gimp core the chance to process with more than one thread */
+//               isPerftestApiTilesDefault = FALSE;
+//             }
+//             else
+//             {
+//               /* tile based copy was a little bit faster in tests where gimp-core used only one CPU */
+//               isPerftestApiTilesDefault = TRUE;
+//             }
+//             isPerftestApiTiles = gap_base_get_gimprc_gboolean_value("isPerftestApiTiles", isPerftestApiTilesDefault);
+//             isPerftestApiMemcpyMP = gap_base_get_gimprc_gboolean_value("isPerftestApiMemcpyMP", TRUE);
+//             isPerftestEarlyFcacheUnlock = gap_base_get_gimprc_gboolean_value("isPerftestEarlyFcacheUnlock", FALSE);
+//           }
+//           
+//           if (deinterlace != 0)
+//           {
+//             frameData = g_malloc(rgbBuffer.rowstride * rgbBuffer.height);
+//             rgbBuffer.data = frameData;
+//             GVA_copy_or_deinterlace_fcache_data_to_rgbBuffer(&rgbBuffer
+//                                                           , gvahand->fc_frame_data
+//                                                           , numProcessors
+//                                                           );
+//           }
+//           else
+//           {
+//             if (isPerftestEarlyFcacheUnlock)
+//             {
+//               /* for early unlock startegy it is required to make a copy of the fcache data */
+// 
+//               frameData = g_malloc(rgbBuffer.rowstride * rgbBuffer.height);
+//               rgbBuffer.data = frameData;
+//               if(isPerftestApiMemcpyMP)
+//               {
+//                 GVA_copy_or_deinterlace_fcache_data_to_rgbBuffer(&rgbBuffer
+//                                                           , gvahand->fc_frame_data
+//                                                           , numProcessors
+//                                                           );
+//               }
+//               else
+//               {
+//                 GAP_TIMM_START_FUNCTION(funcIdMemcpy);
+//                 
+//                 memcpy(rgbBuffer.data, gvahand->fc_frame_data, (rgbBuffer.rowstride * rgbBuffer.height));
+//                 
+//                 GAP_TIMM_STOP_FUNCTION(funcIdMemcpy);
+//               }
+//             }
+//             else
+//             {
+//               /* setup rgbBuffer.data to point direct to the fcache frame data
+//                * No additional frameData buffer is allocated in this case and no extra memcpy is required,
+//                * but the fcache mutex must stay in locked state until data is completely transfered
+//                * to the drawable. This can have performance impact on parallel running prefetch threads
+//                */
+//               rgbBuffer.data = gvahand->fc_frame_data;
+//             }
+//           }
+//           
+//           
+//           
+//           if(frameData != TRUE)
+//           {
+//            /* at this point the frame data is already copied to newly allocated
+//             * frameData buffer (that is owned exclusive by the current thread)
+//             * therefore we can already unlock the mutex
+//             * so that other threads already can continue using the fcache
+//             */
+//             GVA_fcache_mutex_unlock (gvahand);
+//           }
+//           
+//          
+//           image_id = gimp_image_new (rgbBuffer.width, rgbBuffer.height, GIMP_RGB);
+//           if (gimp_image_undo_is_enabled(image_id))
+//           {
+//             gimp_image_undo_disable(image_id);
+//           }
+//           
+//           if(rgbBuffer.bpp == 4)
+//           {
+//             l_new_layer_id = gimp_layer_new (image_id
+//                                               , "layername"
+//                                               , rgbBuffer.width
+//                                               , rgbBuffer.height
+//                                               , GIMP_RGBA_IMAGE
+//                                               , 100.0, GIMP_NORMAL_MODE);
+//           }
+//           else
+//           {
+//             l_new_layer_id = gimp_layer_new (image_id
+//                                               , "layername"
+//                                               , rgbBuffer.width
+//                                               , rgbBuffer.height
+//                                               , GIMP_RGB_IMAGE
+//                                               , 100.0, GIMP_NORMAL_MODE);
+//           }
+// 
+// 
+//           
+//           drawable = gimp_drawable_get (l_new_layer_id);
+//           
+//           
+//           if(isPerftestApiTiles)
+//           {
+//             gpointer pr;
+//             GAP_TIMM_START_FUNCTION(funcIdToDrawableTile);
+// 
+//             gimp_pixel_rgn_init (&pixel_rgn, drawable, 0, 0
+//                              , drawable->width, drawable->height
+//                              , TRUE      /* dirty */
+//                              , FALSE     /* shadow */
+//                              );
+// 
+//             for (pr = gimp_pixel_rgns_register (1, &pixel_rgn);
+//                  pr != NULL;
+//                  pr = gimp_pixel_rgns_process (pr))
+//             {
+//               p_copyRgbBufferToPixelRegion (&pixel_rgn, &rgbBuffer);
+//             }
+// 
+//             GAP_TIMM_STOP_FUNCTION(funcIdToDrawableTile);
+//           }
+//           else
+//           {
+//             GAP_TIMM_START_FUNCTION(funcIdToDrawableRect);
+// 
+//             gimp_pixel_rgn_init (&pixel_rgn, drawable, 0, 0
+//                              , drawable->width, drawable->height
+//                              , TRUE      /* dirty */
+//                              , FALSE     /* shadow */
+//                              );
+//             gimp_pixel_rgn_set_rect (&pixel_rgn, rgbBuffer.data
+//                              , 0
+//                              , 0
+//                              , drawable->width
+//                              , drawable->height
+//                              );
+// 
+//             GAP_TIMM_STOP_FUNCTION(funcIdToDrawableRect);
+//           }
+//           
+//           if(frameData == NULL)
+//           {
+//             /* frameData == NULL indicates the case where the image was directly filled from fcache
+//              * in this scenario the mutex must be unlocked at this later time 
+//              * after the fcache data is already transfered to the drawable
+//              */
+//              GVA_fcache_mutex_unlock (gvahand);
+//           }
+// 
+// 
+//           GAP_TIMM_START_FUNCTION(funcIdDrawableFlush);
+//           gimp_drawable_flush (drawable);
+//           GAP_TIMM_STOP_FUNCTION(funcIdDrawableFlush);
+// 
+//           GAP_TIMM_START_FUNCTION(funcIdDrawableDetach);
+//           gimp_drawable_detach(drawable);
+//           GAP_TIMM_STOP_FUNCTION(funcIdDrawableDetach);
+// 
+//           /*
+//            * gimp_drawable_merge_shadow (drawable->id, TRUE);
+//            */
+// 
+//           /* add new layer on top of the layerstack */
+//           gimp_image_add_layer (image_id, l_new_layer_id, 0);
+//           gimp_drawable_set_visible(l_new_layer_id, TRUE);
+// 
+//           /* clear undo stack */
+//           if (gimp_image_undo_is_enabled(image_id))
+//           {
+//             gimp_image_undo_disable(image_id);
+//           }
+// 
+//           if(frameData != NULL)
+//           {
+//             g_free(frameData);
+//             frameData = NULL;
+//           }
+// 
+//           GAP_TIMM_STOP_FUNCTION(funcId);
+//           return (l_new_layer_id); /* OK  frame available in fcache */
+//         }
+//       }
+// 
+//       /* try to get framedata from fcache ringlist,
+//        * by stepping backwards the frames that were read before
+//        */
+//       fc_ptr = (t_GVA_Frame_Cache_Elem  *)fc_ptr->prev;
+// 
+//       if(fcache->fc_current == fc_ptr)
+//       {
+//         break;  /* STOP, we are back at startpoint of the ringlist */
+//       }
+//       if(fc_ptr == NULL)
+//       {
+//         printf("** ERROR in GVA_search_fcache_and_get_frame_as_gimp_layer ringlist broken \n");
+//         break;  /* internal error, ringlist is broken */
+//       }
+//     }
+//   }
+// 
+//   GVA_fcache_mutex_unlock (gvahand);
+//   GAP_TIMM_STOP_FUNCTION(funcId);
+// 
+//   return (l_new_layer_id);
+// 
+// }  /* end GVA_search_fcache_and_get_frame_as_gimp_layer */
+
+
 /* --------------------
  * p_guess_total_frames
  * --------------------
@@ -857,15 +1706,39 @@ p_gva_worker_close(t_GVA_Handle  *gvahand)
     t_GVA_DecoderElem *dec_elem;
 
 
+
+
     dec_elem = (t_GVA_DecoderElem *)gvahand->dec_elem;
 
     if(dec_elem)
     {
-      if(gap_debug) printf("GVA: p_gva_worker_close: before CLOSE %s with decoder:%s\n", gvahand->filename,  dec_elem->decoder_name);
+      char *nameMutexLockStats;
+      
+      if(gap_debug)
+      {
+        printf("GVA: gvahand:%d p_gva_worker_close: before CLOSE %s with decoder:%s\n"
+           , (int)gvahand
+           , gvahand->filename
+           , dec_elem->decoder_name
+           );
+      }
 
+      /* log mutex wait statistics (only in case compiled with runtime recording) */
+      nameMutexLockStats = g_strdup_printf("... close gvahand:%d fcacheMutexLockStatistic ", (int)gvahand);
+      GVA_copy_or_delace_print_statistics();
+      GAP_TIMM_PRINT_RECORD(&gvahand->fcacheMutexLockStats, nameMutexLockStats);
+      g_free(nameMutexLockStats);
+      
       (*dec_elem->fptr_close)(gvahand);
 
-      if(gap_debug) printf("GVA: p_gva_worker_close: after CLOSE %s with decoder:%s\n", gvahand->filename,  dec_elem->decoder_name);
+      if(gap_debug)
+      {
+        printf("GVA: gvahand:%d p_gva_worker_close: after CLOSE %s with decoder:%s\n"
+           , (int)gvahand
+           , gvahand->filename
+           , dec_elem->decoder_name
+           );
+      }
 
       if(gvahand->filename)
       {
@@ -908,9 +1781,12 @@ p_gva_worker_get_next_frame(t_GVA_Handle  *gvahand)
       }
 
       fcache = &gvahand->fcache;
+      GVA_fcache_mutex_lock (gvahand);
       fcache->fcache_locked = TRUE;
       if(fcache->fc_current)
       {
+        t_GVA_Frame_Cache_Elem *fc_current;
+
         /* if fcache framenumber is negative, we can reuse
          * that EMPTY element without advance
          */
@@ -922,16 +1798,23 @@ p_gva_worker_get_next_frame(t_GVA_Handle  *gvahand)
           gvahand->frame_data = fcache->fc_current->frame_data;
           gvahand->row_pointers = fcache->fc_current->row_pointers;
         }
+        
+        fc_current = fcache->fc_current;
+
+        GVA_fcache_mutex_unlock (gvahand);
 
         /* CALL decoder specific implementation of GET_NEXT_FRAME procedure */
         l_rc = (*dec_elem->fptr_get_next_frame)(gvahand);
 
+        GVA_fcache_mutex_lock (gvahand);
+
         if (l_rc == GVA_RET_OK)
         {
-          fcache->fc_current->framenumber = gvahand->current_frame_nr;
+          fc_current->framenumber = gvahand->current_frame_nr;
         }
       }
       fcache->fcache_locked = FALSE;
+      GVA_fcache_mutex_unlock (gvahand);
     }
   }
   return(l_rc);
@@ -964,6 +1847,9 @@ p_gva_worker_seek_frame(t_GVA_Handle  *gvahand, gdouble pos, t_GVA_PosUnit pos_u
       }
       fcache = &gvahand->fcache;
       fcache->fcache_locked = TRUE;
+
+      GVA_fcache_mutex_lock (gvahand);
+
       if(fcache->fc_current)
       {
         /* if fcache framenumber is negative, we can reuse
@@ -982,6 +1868,8 @@ p_gva_worker_seek_frame(t_GVA_Handle  *gvahand, gdouble pos, t_GVA_PosUnit pos_u
           gvahand->row_pointers = fcache->fc_current->row_pointers;
         }
       }
+
+      GVA_fcache_mutex_unlock (gvahand);
         
       /* CALL decoder specific implementation of SEEK_FRAME procedure */
       l_rc = (*dec_elem->fptr_seek_frame)(gvahand, pos, pos_unit);
@@ -1266,6 +2154,11 @@ p_gva_worker_open_read(const char *filename, gint32 vid_track, gint32 aud_track
   gvahand->percentage_done = 0.0;
   gvahand->frame_counter = 0;
   gvahand->gva_thread_save = TRUE;  /* default for most decoder libs */
+  gvahand->fcache_mutex = NULL;     /* per default do not use g_mutex_lock / g_mutex_unlock at fcache access */
+  gvahand->user_data = NULL;        /* reserved for user data */
+  
+  GAP_TIMM_INIT_RECORD(&gvahand->fcacheMutexLockStats);
+
 
   if(GVA_global_decoder_list == NULL)
   {
@@ -1422,6 +2315,11 @@ GVA_get_next_frame(t_GVA_Handle  *gvahand)
 {
   t_GVA_RetCode l_rc;
 
+  static gint32 funcId = -1;
+  
+  GAP_TIMM_GET_FUNCTION_ID(funcId, "GVA_get_next_frame");
+  GAP_TIMM_START_FUNCTION(funcId);
+
   if(gap_debug)
   {
     printf("GVA_get_next_frame: START handle:%d\n", (int)gvahand);
@@ -1434,6 +2332,9 @@ GVA_get_next_frame(t_GVA_Handle  *gvahand)
     printf("GVA_get_next_frame: END rc:%d\n", (int)l_rc);
   }
 
+
+  GAP_TIMM_STOP_FUNCTION(funcId);
+
   return(l_rc);
 }
 
@@ -1442,6 +2343,11 @@ GVA_seek_frame(t_GVA_Handle  *gvahand, gdouble pos, t_GVA_PosUnit pos_unit)
 {
   t_GVA_RetCode l_rc;
 
+  static gint32 funcId = -1;
+  
+  GAP_TIMM_GET_FUNCTION_ID(funcId, "GVA_seek_frame");
+  GAP_TIMM_START_FUNCTION(funcId);
+
   if(gap_debug)
   {
     printf("GVA_seek_frame: START handle:%d, pos%.4f unit:%d\n"
@@ -1456,6 +2362,8 @@ GVA_seek_frame(t_GVA_Handle  *gvahand, gdouble pos, t_GVA_PosUnit pos_unit)
       , (int)l_rc);
   }
 
+  GAP_TIMM_STOP_FUNCTION(funcId);
+
   return(l_rc);
 }
 
@@ -1740,19 +2648,19 @@ p_check_image_is_alive(gint32 image_id)
 
 
 /* ------------------------------------
- * p_mix_rows
+ * gva_delace_mix_rows
  * ------------------------------------
  * mix 2 input pixelrows (prev_row, next_row)
  * to one resulting pixelrow (mixed_row)
  * All pixelrows must have same width and bpp
  */
 static inline void
-p_mix_rows( gint32 width
+gva_delace_mix_rows( gint32 width
           , gint32 bpp
           , gint32 row_bytewidth
           , gint32 mix_threshold   /* 0 <= mix_threshold <= 33554432 (256*256*256*2) */
-          , guchar *prev_row
-          , guchar *next_row
+          , const guchar *prev_row
+          , const guchar *next_row
           , guchar *mixed_row
           )
 {
@@ -1819,16 +2727,16 @@ p_mix_rows( gint32 width
       mixed_row += bpp;
     }
   }
-}  /* end p_mix_rows */
+}  /* end gva_delace_mix_rows */
 
 
 
 /* ------------------------------------
- * p_calculate_mix_threshold
+ * gva_delace_calculate_mix_threshold
  * ------------------------------------
  */
 static gint32
-p_calculate_mix_threshold(gdouble threshold)
+gva_delace_calculate_mix_threshold(gdouble threshold)
 {
   gint32  l_threshold;
   gint32  l_mix_threshold;
@@ -1842,11 +2750,11 @@ p_calculate_mix_threshold(gdouble threshold)
 
 
 /* ------------------------------------
- * p_calculate_interpolate_flag
+ * gva_delace_calculate_interpolate_flag
  * ------------------------------------
  */
 static gint32
-p_calculate_interpolate_flag(gint32 deinterlace)
+gva_delace_calculate_interpolate_flag(gint32 deinterlace)
 {
   gint32  l_interpolate_flag;
  
@@ -1899,8 +2807,8 @@ GVA_delace_frame(t_GVA_Handle *gvahand
   l_framedata_copy = g_malloc(l_row_bytewidth * gvahand->height);
 
 
-  l_interpolate_flag = p_calculate_interpolate_flag(deinterlace);
-  l_mix_threshold = p_calculate_mix_threshold(threshold);
+  l_interpolate_flag = gva_delace_calculate_interpolate_flag(deinterlace);
+  l_mix_threshold = gva_delace_calculate_mix_threshold(threshold);
 
   l_row_ptr_dest = l_framedata_copy;
   for(l_row = 0; l_row < gvahand->height; l_row++)
@@ -1924,7 +2832,7 @@ GVA_delace_frame(t_GVA_Handle *gvahand
           /* we have both prev and next row within valid range
            * and can calculate an interpolated row
            */
-          p_mix_rows ( gvahand->width
+          gva_delace_mix_rows ( gvahand->width
                        , gvahand->frame_bpp
                        , l_row_bytewidth
                        , l_mix_threshold
@@ -1970,8 +2878,8 @@ p_gva_deinterlace_drawable (GimpDrawable *drawable, gint32 deinterlace, gdouble
   gint32  l_mix_threshold;
 
 
-  l_interpolate_flag = p_calculate_interpolate_flag(deinterlace);
-  l_mix_threshold = p_calculate_mix_threshold(threshold);
+  l_interpolate_flag = gva_delace_calculate_interpolate_flag(deinterlace);
+  l_mix_threshold = gva_delace_calculate_mix_threshold(threshold);
 
   bytes = drawable->bpp;
 
@@ -2005,7 +2913,7 @@ p_gva_deinterlace_drawable (GimpDrawable *drawable, gint32 deinterlace, gdouble
       gimp_pixel_rgn_get_row (&srcPR, upper, x, row - 1, width);
       gimp_pixel_rgn_get_row (&srcPR, lower, x, row + 1, width);
  
-      p_mix_rows ( width
+      gva_delace_mix_rows ( width
                  , drawable->bpp
                  , l_row_bytewidth
                  , l_mix_threshold
@@ -2191,6 +3099,9 @@ GVA_frame_to_gimp_layer_2(t_GVA_Handle *gvahand
      return (-2);
   }
 
+
+  GVA_fcache_mutex_lock (gvahand);
+
   /* expand threshold range from 0.0-1.0  to 0 - MIX_MAX_THRESHOLD */
   threshold = CLAMP(threshold, 0.0, 1.0);
   l_threshold = (gdouble)MIX_MAX_THRESHOLD * (threshold * threshold * threshold);
@@ -2323,6 +3234,8 @@ GVA_frame_to_gimp_layer_2(t_GVA_Handle *gvahand
     }
   }
 
+  GVA_fcache_mutex_unlock (gvahand);
+
   if (gap_debug)
   {
     printf("DEBUG: after copy data rows (NO SHADOW)\n");
@@ -2413,6 +3326,8 @@ GVA_fcache_to_gimp_image(t_GVA_Handle *gvahand
 
   fcache = &gvahand->fcache;
 
+  GVA_fcache_mutex_lock (gvahand);
+
   /* search the fcache element with smallest positve framenumber */
   fc_minframe = NULL;
   if(fcache->fc_current)
@@ -2457,6 +3372,8 @@ GVA_fcache_to_gimp_image(t_GVA_Handle *gvahand
       &&((fc_ptr->framenumber <= max_framenumber) || (max_framenumber < 0))
       && (fc_ptr->framenumber >= 0))
       {
+          GVA_fcache_mutex_unlock (gvahand);
+
           GVA_frame_to_gimp_layer_2(gvahand
                   , &image_id
                   , layer_id
@@ -2466,6 +3383,8 @@ GVA_fcache_to_gimp_image(t_GVA_Handle *gvahand
                   , threshold
                   );
            delete_mode = FALSE;  /* keep old layers */
+
+           GVA_fcache_mutex_lock (gvahand);
       }
 
       /* step from fc_minframe forward in the fcache ringlist,
@@ -2475,15 +3394,19 @@ GVA_fcache_to_gimp_image(t_GVA_Handle *gvahand
 
       if(fc_minframe == fc_ptr)
       {
+        GVA_fcache_mutex_unlock (gvahand);
         return (image_id);  /* STOP, we are back at startpoint of the ringlist */
       }
       if(fc_ptr == NULL)
       {
+        GVA_fcache_mutex_unlock (gvahand);
         return (image_id);  /* internal error, ringlist is broken */
       }
     }
   }
 
+  GVA_fcache_mutex_unlock (gvahand);
+
   return image_id;
 }  /* end GVA_fcache_to_gimp_image */
 
@@ -2586,6 +3509,11 @@ GVA_frame_to_buffer(t_GVA_Handle *gvahand
 {
   gint32  frame_size;
   guchar *frame_data;
+  static gint32 funcIdDoScale = -1;
+  static gint32 funcIdNoScale = -1;
+  
+  GAP_TIMM_GET_FUNCTION_ID(funcIdDoScale, "GVA_frame_to_buffer.do_scale");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdNoScale, "GVA_frame_to_buffer.no_scale");
 
   frame_data = NULL;
   
@@ -2605,6 +3533,9 @@ GVA_frame_to_buffer(t_GVA_Handle *gvahand
     gint32       deinterlace_mask;
     gint         *arr_src_col;
 
+
+    GAP_TIMM_START_FUNCTION(funcIdDoScale);
+
     if(gap_debug) printf("GVA_frame_to_buffer: DO_SCALE\n");
     /* for safety: width and height must be set to useful values
      * (dont accept bigger values than video size or values less than 1 pixel)
@@ -2673,9 +3604,13 @@ GVA_frame_to_buffer(t_GVA_Handle *gvahand
       g_free(arr_src_col);
     }
 
+    GAP_TIMM_STOP_FUNCTION(funcIdDoScale);
+
   }
   else
   {
+    GAP_TIMM_START_FUNCTION(funcIdNoScale);
+
     *bpp = gvahand->frame_bpp;
     *width = gvahand->width;
     *height = gvahand->height;
@@ -2690,6 +3625,7 @@ GVA_frame_to_buffer(t_GVA_Handle *gvahand
       frame_data = g_malloc(frame_size);
       if(frame_data == NULL)
       {
+        GAP_TIMM_STOP_FUNCTION(funcIdNoScale);
         return (NULL);
       }
       memcpy(frame_data, gvahand->fc_frame_data, frame_size);
@@ -2702,6 +3638,7 @@ GVA_frame_to_buffer(t_GVA_Handle *gvahand
                                    );
       if(frame_data == NULL)
       {
+        GAP_TIMM_STOP_FUNCTION(funcIdNoScale);
         return (NULL);
       }
     }
@@ -2722,6 +3659,8 @@ GVA_frame_to_buffer(t_GVA_Handle *gvahand
            frame_data[3+(i*4)] = 255;
         }
     }
+
+    GAP_TIMM_STOP_FUNCTION(funcIdNoScale);
   }
 
   return(frame_data);
@@ -2744,6 +3683,9 @@ GVA_frame_to_buffer(t_GVA_Handle *gvahand
  * IN: do_scale  FALSE: deliver frame at original size (ignore bpp, width and height parameters)
  *               TRUE: deliver frame at size specified by width, height, bpp
  *                     scaling is done fast in low quality 
+ * IN: isBackwards  FALSE: disable prefetch on backwards seek (prefetch typically slows down in case of forward playing clip).
+ *                  TRUE: enable prefetch of some (10) frames that fills up fcache and gives better performance
+ *                        on backwards playing clips
  * IN: framenumber   The wanted framenumber
  *                   return NULL if the wanted framnumber could not be read.
  * IN: deinterlace   0: no deinterlace, 1 pick odd lines, 2 pick even lines
@@ -2761,6 +3703,7 @@ GVA_frame_to_buffer(t_GVA_Handle *gvahand
 guchar *
 GVA_fetch_frame_to_buffer(t_GVA_Handle *gvahand
                 , gboolean do_scale
+                , gboolean isBackwards
                 , gint32 framenumber
                 , gint32 deinterlace
                 , gdouble threshold
@@ -2769,6 +3712,8 @@ GVA_fetch_frame_to_buffer(t_GVA_Handle *gvahand
                 , gint32 *height
                 )
 {
+#define GVA_NEAR_FRAME_DISTANCE 10
+#define GVA_NEAR_FRAME_DISTANCE_BACK 30
   guchar *frame_data;
   t_GVA_RetCode  l_rc;
 
@@ -2792,13 +3737,28 @@ GVA_fetch_frame_to_buffer(t_GVA_Handle *gvahand
     l_delta = framenumber - gvahand->current_frame_nr;
     l_rc = GVA_RET_OK;
 
-    if((l_delta >= 1) && (l_delta <= 10))
+    if((l_delta >= 1) && (l_delta <= GVA_NEAR_FRAME_DISTANCE))
     {
       /* target framenumber is very near to the current_frame_nr
        * in this case positioning via sequential read is faster than seek
        */
       l_readsteps = l_delta;
     }
+    else if ((l_delta < 0) && (isBackwards))
+    {
+      /* backwards seek is done to a position upto GVA_NEAR_FRAME_DISTANCE_BACK
+       * frames before the wanted framenumber to fill up the fcache
+       */
+      gdouble seekFrameNumber;
+      gdouble prefetchFrames;
+           
+           
+      prefetchFrames = MIN((GVA_get_fcache_size_in_elements(gvahand) -1), GVA_NEAR_FRAME_DISTANCE_BACK) ;
+      seekFrameNumber = MAX((gdouble)framenumber - prefetchFrames, 1);
+      l_readsteps = 1 + (framenumber - seekFrameNumber);
+      GVA_seek_frame(gvahand, seekFrameNumber, GVA_UPOS_FRAMES);
+      
+    }
     else
     {
       l_rc = GVA_seek_frame(gvahand, framenumber, GVA_UPOS_FRAMES);
@@ -2809,11 +3769,13 @@ GVA_fetch_frame_to_buffer(t_GVA_Handle *gvahand
       l_rc = GVA_get_next_frame(gvahand);
       if(gap_debug)
       {
-        printf("GVA_fetch_frame_to_buffer: l_readsteps:%d framenumber;%d curr:%d l_rc:%d\n"
+        printf("GVA_fetch_frame_to_buffer: l_readsteps:%d framenumber;%d curr:%d l_rc:%d delta:%d fcacheSize:%d\n"
           , (int)l_readsteps
           , (int)framenumber
           , (int)gvahand->current_frame_nr
           , (int)l_rc
+          , (int)l_delta
+          , (int)GVA_get_fcache_size_in_elements(gvahand)
           );
       }
       l_readsteps--;
diff --git a/libgapvidapi/gap_vid_api.h b/libgapvidapi/gap_vid_api.h
old mode 100644
new mode 100755
index 878b79e..60ccf8e
--- a/libgapvidapi/gap_vid_api.h
+++ b/libgapvidapi/gap_vid_api.h
@@ -24,6 +24,7 @@
 #include <libgimp/gimp.h>
 
 #include "gap/gap_image.h"
+#include "gap_base.h"
 
 #define GVA_MPGFRAME_UNKNOWN -1
 #define GVA_MPGFRAME_I_TYPE 1
@@ -126,6 +127,32 @@ typedef struct t_GVA_Frame_Cache
 } t_GVA_Frame_Cache;
 
 
+typedef struct GVA_RgbPixelBuffer
+{
+  guchar       *data;          /* pointer to region data */
+  guint         width;         /* width in pixels */
+  guint         height;        /* height in pixels */
+  guint         bpp;           /* bytes per pixel (always initialized with 3) */
+  guint         rowstride;     /* bytes per pixel row */
+  gint32        deinterlace;
+  gdouble       threshold;     /* threshold value for  deinterlacing */
+} GVA_RgbPixelBuffer;
+
+
+typedef struct GVA_fcache_fetch_result {
+  gboolean   isFrameAvailable;        /* OUT */
+  gboolean   isRgb888Result;          /* IN/OUT The caller shall set isRgb888Result to 
+                                       *        FALSE: in case the result shall be converted
+                                       *               to a newly created gimp layer (in a new image)
+                                       *        TRUE:  in case rgbBuffer is prefered.
+                                       *               (this flag may flip to FALSE
+                                       *               in case the video has an alpha channel e.g.bpp == 4)
+                                       */
+  gint32     layer_id;                /* OUT the id of the newly created layer */
+  gint32     image_id;                /* OUT the id of the newly created image */
+  GVA_RgbPixelBuffer    rgbBuffer;    /* IN/OUT the buffer */
+} GVA_fcache_fetch_result;
+
 typedef  gboolean       (*t_GVA_progress_callback_fptr)(gdouble progress, gpointer user_data);
 
 
@@ -156,7 +183,7 @@ typedef struct t_GVA_VideoindexHdr
   char     val_mtim[15];
   char     key_flen[5];
   char     val_flen[10];
-  
+
 } t_GVA_VideoindexHdr;
 
 typedef enum
@@ -222,7 +249,7 @@ typedef struct t_GVA_Handle  /* nickname: gvahand */
   gboolean create_vindex;       /* TRUE: allow the fptr_count_frames procedure to create a videoindex file */
   t_GVA_Videoindex *vindex;
   gint32            mtime;
-  
+
   gboolean disable_mmx;
   gboolean do_gimp_progress;    /* WARNING: dont try to set this TRUE if you call the API from a thread !! */
   gboolean all_frames_counted;  /* TRUE: counted all frames, total_frames is an exact value
@@ -263,7 +290,7 @@ typedef struct t_GVA_Handle  /* nickname: gvahand */
   gint32  audio_cannels;        /* number of channel (in the selected aud_track) */
 
   gboolean critical_timecodesteps_found;
-  
+
 
   gdouble percentage_done;      /* 0.0 <= percentage_done <= 1.0 */
 
@@ -290,6 +317,19 @@ typedef struct t_GVA_Handle  /* nickname: gvahand */
   gint32  aud_track;
   char   *filename;
   gboolean gva_thread_save;
+
+  GapTimmRecord  fcacheMutexLockStats;   /* record runtime for locking the fcache mutex */
+  GMutex  *fcache_mutex;      /* NULL for standard singleprocessor usage
+                               * if the gvahand video handle is used in multithread environment
+                               * (see storyboard processor implementation)
+                               * the caller shall supply a mutex created with g_mutex_new
+                               * that will be used to lock the fcache while GVA procedures
+                               * access the fcache. (using g_mutex_lock, g_mutex_unlock)
+                               * Note that the GVA_open_read procedure(s) will init fcache_mutex = NULL
+                               */
+
+  gpointer user_data;         /* is set to NULL at open and is not internally used by GVA procedures */
+
 } t_GVA_Handle;
 
 typedef enum
@@ -381,6 +421,9 @@ t_GVA_SeekSupport GVA_check_seek_support(t_GVA_Handle  *gvahand);
 void            GVA_set_fcache_size(t_GVA_Handle *gvahand
                  ,gint32 frames_to_keep_cahed
                  );
+gint32          GVA_get_fcache_size_in_elements(t_GVA_Handle *gvahand);
+gint32          GVA_get_fcache_size_in_bytes(t_GVA_Handle *gvahand);
+
 
 t_GVA_RetCode   GVA_search_fcache(t_GVA_Handle *gvahand
                  ,gint32 framenumber
@@ -390,6 +433,29 @@ t_GVA_RetCode   GVA_search_fcache_by_index(t_GVA_Handle *gvahand
                  ,gint32 index
                  ,gint32 *framenumber
                  );
+
+void           GVA_search_fcache_and_get_frame_as_gimp_layer_or_rgb888(t_GVA_Handle *gvahand
+                 , gint32   framenumber
+                 , gint32   deinterlace
+                 , gdouble  threshold
+                 , gint32   numProcessors
+                 , GVA_fcache_fetch_result *fetchResult
+                 );
+
+// gint32          GVA_search_fcache_and_get_frame_as_gimp_layer(t_GVA_Handle *gvahand
+//                  , gint32 framenumber
+//                  , gint32   deinterlace
+//                  , gdouble  threshold
+//                  , gint32   numProcessors
+//                  );
+
+
+gboolean        GVA_fcache_mutex_trylock(t_GVA_Handle  *gvahand);
+void            GVA_fcache_mutex_lock(t_GVA_Handle  *gvahand);
+void            GVA_fcache_mutex_unlock(t_GVA_Handle  *gvahand);
+
+
+
 void            GVA_debug_print_fcache(t_GVA_Handle *gvahand);
 void            GVA_image_set_aspect(t_GVA_Handle *gvahand, gint32 image_id);
 
@@ -427,6 +493,7 @@ guchar *       GVA_frame_to_buffer(t_GVA_Handle *gvahand
                 );
 guchar *       GVA_fetch_frame_to_buffer(t_GVA_Handle *gvahand
                 , gboolean do_scale
+                , gboolean isBackwards
                 , gint32 framenumber
                 , gint32 deinterlace
                 , gdouble threshold
@@ -443,7 +510,7 @@ void           GVA_delace_drawable(gint32 drawable_id
                 , gint32 deinterlace
                 , gdouble threshold
                 );
-                
+
 gint32          GVA_percent_2_frame(gint32 total_frames, gdouble percent);
 gdouble         GVA_frame_2_percent(gint32 total_frames, gdouble framenr);
 
diff --git a/libgapvidapi/gap_vid_api_ffmpeg.c b/libgapvidapi/gap_vid_api_ffmpeg.c
old mode 100644
new mode 100755
index 002912d..9ac10a7
--- a/libgapvidapi/gap_vid_api_ffmpeg.c
+++ b/libgapvidapi/gap_vid_api_ffmpeg.c
@@ -942,6 +942,20 @@ p_private_ffmpeg_get_next_frame(t_GVA_Handle *gvahand, gboolean do_copy_raw_chun
   gboolean  l_potential_index_frame;
   gboolean  l_key_frame_detected;
 
+  static gint32 funcId = -1;
+  static gint32 funcIdReadPacket = -1;
+  static gint32 funcIdDecode = -1;
+  static gint32 funcIdSwScale = -1;
+  
+  GAP_TIMM_GET_FUNCTION_ID(funcId,           "p_private_ffmpeg_get_next_frame");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdReadPacket, "p_private_ffmpeg_get_next_frame.readAndDecodePacket");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdDecode,     "p_private_ffmpeg_get_next_frame.decode");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdSwScale,    "p_private_ffmpeg_get_next_frame.swScale");
+
+  GAP_TIMM_START_FUNCTION(funcId);
+  GAP_TIMM_START_FUNCTION(funcIdReadPacket);
+
+
   handle = (t_GVA_ffmpeg *)gvahand->decoder_handle;
 
   /* redirect picture_rgb pointers to current fcache element */
@@ -1117,6 +1131,8 @@ p_private_ffmpeg_get_next_frame(t_GVA_Handle *gvahand, gboolean do_copy_raw_chun
 
     avcodec_get_frame_defaults(&handle->big_picture_yuv);
 
+    GAP_TIMM_START_FUNCTION(funcIdDecode);
+
     /* decode a frame. return -1 on error, otherwise return the number of
      * bytes used. If no frame could be decompressed, *got_picture_ptr is
      * zero. Otherwise, it is non zero.
@@ -1136,6 +1152,8 @@ p_private_ffmpeg_get_next_frame(t_GVA_Handle *gvahand, gboolean do_copy_raw_chun
                                );
 #endif
 
+    GAP_TIMM_STOP_FUNCTION(funcIdDecode);
+
     if (gap_debug) 
     {
       
@@ -1262,8 +1280,14 @@ p_private_ffmpeg_get_next_frame(t_GVA_Handle *gvahand, gboolean do_copy_raw_chun
 
   }  /* end while packet_read and decode frame loop */
 
+  GAP_TIMM_STOP_FUNCTION(funcIdReadPacket);
+
+
+
   if((l_rc == 0)  && (l_got_picture))
   {
+    GAP_TIMM_START_FUNCTION(funcIdSwScale);
+
     if(gvahand->current_seek_nr > 1)
     {
       /* 1.st frame_len may contain headers (size may be too large) */
@@ -1288,7 +1312,7 @@ p_private_ffmpeg_get_next_frame(t_GVA_Handle *gvahand, gboolean do_copy_raw_chun
        *
        */
       handle->img_convert_ctx = sws_getCachedContext(handle->img_convert_ctx
-	                                 , gvahand->width
+                                         , gvahand->width
                                          , gvahand->height
                                          , handle->yuv_buff_pix_fmt    /* src pixelformat */
                                          , gvahand->width
@@ -1452,12 +1476,14 @@ p_private_ffmpeg_get_next_frame(t_GVA_Handle *gvahand, gboolean do_copy_raw_chun
                   , gvahand->current_frame_nr
                   );
     }
-
+    GAP_TIMM_STOP_FUNCTION(funcIdSwScale);
+    GAP_TIMM_STOP_FUNCTION(funcId);
     return(GVA_RET_OK);
 
   }
 
 
+  GAP_TIMM_STOP_FUNCTION(funcId);
 
   if(l_rc == 1)  { return(GVA_RET_EOF); }
 
@@ -3703,9 +3729,17 @@ p_ff_open_input(char *filename, t_GVA_Handle *gvahand, t_GVA_ffmpeg*  handle, gb
   AVInputFormat *iformat;
   int err, ii, ret;
   int rfps, rfps_base;
-
+  int thread_count;
+  
   if(gap_debug) printf("p_ff_open_input: START  vid_open:%d\n", (int)vid_open);
 
+  thread_count = 4;
+  
+//   gap_base_get_gimprc_int_value("num-processors"
+//                                    , DEFAULT_WORKER_THREADS
+//                                    , 1
+//                                    , MAX_WORKER_THREADS
+//                                    );
 
   /* open the input file with generic libav function
    * Opens a media file as input. The codec are not opened.
@@ -3754,6 +3788,7 @@ p_ff_open_input(char *filename, t_GVA_Handle *gvahand, t_GVA_ffmpeg*  handle, gb
   for(ii=0; ii < ic->nb_streams; ii++)
   {
     acc = ic->streams[ii]->codec;
+    //avcodec_thread_init(acc, thread_count);
 
     switch(acc->codec_type)
     {
@@ -3770,6 +3805,7 @@ p_ff_open_input(char *filename, t_GVA_Handle *gvahand, t_GVA_ffmpeg*  handle, gb
                 handle->aud_codec_context = acc;
 
                 handle->aud_stream = ic->streams[ii];
+                //avcodec_thread_init(handle->aud_stream->codec, thread_count);
               }
               gvahand->audio_cannels = acc->channels;
               gvahand->samplerate = acc->sample_rate;
@@ -3790,6 +3826,8 @@ p_ff_open_input(char *filename, t_GVA_Handle *gvahand, t_GVA_ffmpeg*  handle, gb
                 handle->vid_codec_context = acc;
 
                 handle->vid_stream = ic->streams[ii];
+                //avcodec_thread_init(handle->vid_stream->codec, thread_count);
+                
               }
               gvahand->height = acc->height;
               gvahand->width = acc->width;
diff --git a/libgapvidapi/gap_vid_api_mp_util.c b/libgapvidapi/gap_vid_api_mp_util.c
new file mode 100755
index 0000000..0a1ccac
--- /dev/null
+++ b/libgapvidapi/gap_vid_api_mp_util.c
@@ -0,0 +1,560 @@
+/* vid_api_mp_util.c
+ *
+ * GAP Video read API multiprocessor support utility procedures.
+ *
+ * 2010.11.21   hof created
+ *
+ */
+
+
+
+
+#define GVA_MAX_MEMCPD_THREADS 16
+
+
+typedef struct GapMultiPocessorCopyOrDelaceData {  /* memcpd */
+    GVA_RgbPixelBuffer *rgbBuffer;
+    guchar             *src_data;        /* source buffer data at same size and bpp as described by rgbBuffer */
+    gint                startCol;        /* relevant for deinterlacing copy */
+    gint                colWidth;        /* relevant for deinterlacing copy */
+    gint                memRow;          /* relevant for simple memcpy */
+    gint                memHeightInRows; /* relevant for simple memcpy */
+    gint                cpuId;
+  
+    GapTimmRecord       memcpyStats;
+    GapTimmRecord       delaceStats;
+    
+    
+    gint                isFinished;
+    
+} GapMultiPocessorCopyOrDelaceData;
+
+
+
+
+/* ---------------------------
+ * GVA_fcache_mutex_lock
+ * ---------------------------
+ * lock the fcache_mutex if present (e.g. is NOT NULL)
+ * Note: the fcache_mutex is NULL per default.
+ *       In case an application wants to use the GVA api fcache in multithread environment,
+ *       it must provide a mutex.
+ *       example how to provide the mutex:
+ *       (e.g gvahand->fcache_mutex = g_mutex_new() )
+ */
+void
+GVA_fcache_mutex_lock(t_GVA_Handle  *gvahand)
+{
+  if(gvahand->fcache_mutex)
+  {
+    GAP_TIMM_START_RECORD(&gvahand->fcacheMutexLockStats);
+
+    g_mutex_lock (gvahand->fcache_mutex);
+
+    GAP_TIMM_STOP_RECORD(&gvahand->fcacheMutexLockStats);
+  }
+
+}  /* end GVA_fcache_mutex_lock */
+
+
+/* ---------------------------
+ * GVA_fcache_mutex_trylock
+ * ---------------------------
+ * lock the fcache_mutex if present (e.g. is NOT NULL)
+ * return immediate FALSE in case the mutex is locked by another thread
+ * return TRUE in case the mutex was locked successfully (may sleep until other threads unlock the mutex)
+ *        TRUE will be immediatly returned in case
+ *        a) thread system is not initialized, e.g g_thread_init was not yet called
+ *        b) in case the gvahand->fcache_mutex is NULL (which is default after opening a videohandle)
+ */
+gboolean
+GVA_fcache_mutex_trylock(t_GVA_Handle  *gvahand)
+{
+  gboolean isSuccessful;
+
+  if(gvahand->fcache_mutex)
+  {
+    GAP_TIMM_START_RECORD(&gvahand->fcacheMutexLockStats);
+
+    isSuccessful = g_mutex_trylock (gvahand->fcache_mutex);
+
+    GAP_TIMM_STOP_RECORD(&gvahand->fcacheMutexLockStats);
+  }
+  else
+  {
+    /* not really locked, because no mutex is available.
+     * but in this case behave same as g_mutex_trylock
+     * when thread system is not initialized, e.g g_thread_init was not yet called
+     */
+    isSuccessful = TRUE;
+  }
+
+  return(isSuccessful);
+  
+}  /* end GVA_fcache_mutex_trylock */
+
+
+/* ---------------------------
+ * GVA_fcache_mutex_unlock
+ * ---------------------------
+ * unlock the fcache_mutex if present (e.g. is NOT NULL)
+ */
+void
+GVA_fcache_mutex_unlock(t_GVA_Handle  *gvahand)
+{
+  if(gvahand->fcache_mutex)
+  {
+    g_mutex_unlock (gvahand->fcache_mutex);
+  }
+}  /* end GVA_fcache_mutex_unlock */
+
+
+
+/* ------------------------------------------
+ * p_copyAndDeinterlaceIntoRgbBuffer
+ * ------------------------------------------
+ * make a deinterlaced copy of the source buffer rectangular area 
+ * starting at startCol
+ * Note that the source buffer is allocated at the full frame size and must
+ * match with the full size of the drawable that holds the currently processed tile.
+ *
+ * RESTRICTION: dst drawable and srcBuff must have the same width, height and bpp.
+ */
+static void
+p_copyAndDeinterlaceRgbBufferToPixelRegion (const guchar *src_data
+                    , GVA_RgbPixelBuffer *rgbBuffer
+                    , gint32 startCol, gint32 stripeWidthInCols)
+{
+  guint          row;
+  const guchar*  src;
+  guchar*        dest;
+  gint32         stripeWidthInBytes;
+  gint32         startOffestInBytes;
+
+  gint32  l_interpolate_flag;
+  gint32  l_mix_threshold;
+
+  l_interpolate_flag = gva_delace_calculate_interpolate_flag(rgbBuffer->deinterlace);
+  l_mix_threshold = gva_delace_calculate_mix_threshold(rgbBuffer->threshold);
+  
+  stripeWidthInBytes = (stripeWidthInCols * rgbBuffer->bpp);
+  startOffestInBytes = startCol * rgbBuffer->bpp;
+  
+  src = src_data + startOffestInBytes;
+  dest = rgbBuffer->data + startOffestInBytes;
+
+
+  for (row = 0; row < rgbBuffer->height; row++)
+  {
+     if ((row & 1) == l_interpolate_flag)
+     {
+       if(row == 0)
+       {
+         /* we have no prvious row, so we just copy the next row */
+         memcpy(dest, src + rgbBuffer->rowstride, stripeWidthInBytes);
+       }
+       else if (row == rgbBuffer->height -1 )
+       {
+         /* we have no next row, so we just copy the prvious row */
+         memcpy(dest, src - rgbBuffer->rowstride, stripeWidthInBytes);
+       }
+       else
+       {
+         /* we have both prev and next row within valid range
+          * and can calculate an interpolated tile row
+          */
+         gva_delace_mix_rows ( stripeWidthInCols
+                       , rgbBuffer->bpp
+                       , rgbBuffer->rowstride
+                       , l_mix_threshold
+                       , src - rgbBuffer->rowstride   /* prev_row */
+                       , src + rgbBuffer->rowstride   /* next_row */
+                       , dest                       /* mixed_row (to be filled) */
+                       );
+       }
+     }
+     else
+     {
+       /* copy original row */
+       memcpy(dest, src, stripeWidthInBytes);
+     }
+     
+     src  += rgbBuffer->rowstride;
+     dest += rgbBuffer->rowstride;
+  }
+  
+}  /* end p_copyAndDeinterlaceRgbBufferToPixelRegion */
+
+
+
+/* --------------------------------------------
+ * p_memcpy_or_delace_WorkerThreadFunction
+ * --------------------------------------------
+ * this function runs in concurrent parallel worker threads.
+ * each one of the parallel running threads processes another portion of the frame memory
+ * when deinterlacing is requird the portions are column stripes starting at startCol and are colWidth wide. 
+ * For simple memcpy the portions are memory blocks (row stripes starting at memRow Height)
+ *
+ * this procedure records runtime values using GAP_TIMM_ macros 
+ *  (this debug feature is only available in case runtime recording was configured at compiletime)
+ */
+static void
+p_memcpy_or_delace_WorkerThreadFunction(GapMultiPocessorCopyOrDelaceData *memcpd)
+{
+//  if(gap_debug)
+//  {
+//    printf("p_memcpy_or_delace_WorkerThreadFunction: START cpu[%d]\n"
+//      ,(int)memcpd->cpuId
+//      );
+//  }  
+     
+  if (memcpd->rgbBuffer->deinterlace != 0)
+  {
+    GAP_TIMM_START_RECORD(&memcpd->delaceStats);
+    
+    p_copyAndDeinterlaceRgbBufferToPixelRegion (memcpd->src_data
+                        , memcpd->rgbBuffer
+                        , memcpd->startCol
+                        , memcpd->colWidth
+                        );
+    GAP_TIMM_STOP_RECORD(&memcpd->delaceStats);
+  }
+  else
+  {
+    guchar*  src;
+    guchar*  dest;
+    gint32   startOffestInBytes;
+
+    GAP_TIMM_START_RECORD(&memcpd->memcpyStats);
+  
+    startOffestInBytes = memcpd->memRow * memcpd->rgbBuffer->rowstride;
+  
+    src = memcpd->src_data + startOffestInBytes;
+    dest = memcpd->rgbBuffer->data + startOffestInBytes;
+    
+    memcpy(dest, src, memcpd->memHeightInRows * memcpd->rgbBuffer->rowstride);
+
+    GAP_TIMM_STOP_RECORD(&memcpd->memcpyStats);
+  }
+
+//  if(gap_debug)
+//  {
+//    printf("p_memcpy_or_delace_WorkerThreadFunction: DONE cpu[%d]\n"
+//      ,(int)memcpd->cpuId
+//      );
+//  }  
+
+
+  memcpd->isFinished = TRUE;
+  
+   
+}  /* end p_memcpy_or_delace_WorkerThreadFunction */
+
+
+
+
+/* ----------------------------------------------------
+ * GVA_copy_or_deinterlace_fcache_data_to_rgbBuffer
+ * ----------------------------------------------------
+ * copy (or deinterlaced copy) the specified srcFrameData into the specified
+ * rgbBuffer.
+ * in case the specified numProcessors > 1 this procedure uses a thread pool
+ * to spread the work to the given number of processors.
+ *
+ * calling this procedure with rgbBuffer == NULL triggers logging
+ * of captured runtime statistic values per thread
+ * (in case gimp-gap was comiled with runtime recording configuration)
+ * (this is a debug feature for development and analyse purpose only)
+ */
+void
+GVA_copy_or_deinterlace_fcache_data_to_rgbBuffer(GVA_RgbPixelBuffer *rgbBuffer
+                                                , guchar *srcFrameData
+                                                , gint32 numProcessors)
+{
+  static GapMultiPocessorCopyOrDelaceData  memcpdArray[GVA_MAX_MEMCPD_THREADS];
+
+  static GThreadPool  *threadPool = NULL;
+  static gulong        usleepTime = 10;
+  static gint          numThreadsMax         = 1;
+  gboolean             isMultithreadEnabled;
+  gint                 numThreads;
+  gint                 colsPerCpu;
+  gint                 rowsPerCpu;
+  gint                 retry;
+  gint                 startCol;
+  gint                 colWidth;
+  gint                 startRow;
+  gint                 rowHeight;
+  gint                 ii;
+  GapMultiPocessorCopyOrDelaceData *memcpd;
+  GError *error;
+
+  static gint32 funcId = -1;
+  static gint32 funcIdPush = -1;
+  static gint32 funcIdSingle = -1;
+  static gint32 funcIdMainWait = -1;
+
+  GAP_TIMM_GET_FUNCTION_ID(funcId, "GVA_copy_or_deinterlace_fcache_data_to_rgbBuffer");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdPush, "GVA_copy_or_deinterlace_fcache_data_to_rgbBuffer.pushToReActivateThreads");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdSingle, "GVA_copy_or_deinterlace_fcache_data_to_rgbBuffer.singlecall");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdMainWait, "GVA_copy_or_deinterlace_fcache_data_to_rgbBuffer.main (Wait)");
+
+  error = NULL;
+
+  if(gap_debug)
+  {
+    printf("GVA_copy_or_deinterlace_fcache_data_to_rgbBuffer START numProcessors:%d\n"
+      ,(int)numProcessors
+      );
+  }
+  
+  /* rgbBuffer NULL pointer triggers a debug feature that logs the recorded runtime statistics to stdout.
+   * (but only in gimp-gap was configured for runtime recording at compiletime)
+   */
+  if(rgbBuffer == NULL)
+  {
+    for(ii=0; ii < numThreadsMax; ii++)
+    {
+      GapMultiPocessorCopyOrDelaceData *memcpd;
+      memcpd = &memcpdArray[ii];
+      
+      GAP_TIMM_PRINT_RECORD(&memcpd->memcpyStats,   "... Thrd GVA_fcache_to_drawable_multithread.memcpyStats");
+      GAP_TIMM_PRINT_RECORD(&memcpd->delaceStats, "... Thrd GVA_fcache_to_drawable_multithread.delaceStats");
+    }
+    
+    return;
+  }
+   
+
+  GAP_TIMM_START_FUNCTION(funcId);
+
+  numThreads = MIN(numProcessors, GVA_MAX_MEMCPD_THREADS);
+  numThreadsMax = MAX(numThreadsMax, numThreads);
+  
+  colsPerCpu = (rgbBuffer->width + (numThreads -1)) / numThreads;
+  rowsPerCpu = (rgbBuffer->height + (numThreads -1)) / numThreads;
+
+  /* check and init thread system */
+  if(numThreads > 1)
+  {
+    isMultithreadEnabled = gap_base_thread_init();
+  }
+  else
+  {
+    isMultithreadEnabled = FALSE;
+  }
+  
+  if((isMultithreadEnabled != TRUE)
+  || (colsPerCpu < 16)
+  || (rowsPerCpu < 16))
+  {
+    GAP_TIMM_START_FUNCTION(funcIdSingle);
+    /* singleproceesor variant calls the worker thread once with setup
+     * to process full buffer in one synchron call
+     * (no locks are set in this case and no condition will be sent)
+     */
+    memcpd = &memcpdArray[0];
+    memcpd->src_data = srcFrameData;
+    memcpd->rgbBuffer = rgbBuffer;
+    
+    memcpd->startCol        = 0;
+    memcpd->colWidth        = rgbBuffer->width;
+    memcpd->memRow          = 0;
+    memcpd->memHeightInRows = rgbBuffer->height;
+    memcpd->cpuId = ii;
+    memcpd->isFinished = FALSE;
+    
+    p_memcpy_or_delace_WorkerThreadFunction(memcpd);
+    
+    GAP_TIMM_STOP_FUNCTION(funcIdSingle);
+    GAP_TIMM_STOP_FUNCTION(funcId);
+    return;
+  }
+
+
+    
+  if (threadPool == NULL)
+  {
+    usleepTime = gap_base_get_gimprc_int_value("video-api-mp-copy-or-delace-usleep"
+                  , 10
+                  , 1
+                  , 10000);
+    
+    /* init the treadPool at first multiprocessing call
+     * (and keep the threads until end of main process..)
+     */
+    threadPool = g_thread_pool_new((GFunc) p_memcpy_or_delace_WorkerThreadFunction
+                                         ,NULL        /* user data */
+                                         ,GVA_MAX_MEMCPD_THREADS          /* max_threads */
+                                         ,TRUE        /* exclusive */
+                                         ,&error      /* GError **error */
+                                         );
+
+    for(ii=0; ii < GVA_MAX_MEMCPD_THREADS; ii++)
+    {
+      GapMultiPocessorCopyOrDelaceData *memcpd;
+      memcpd = &memcpdArray[ii];
+      
+      GAP_TIMM_INIT_RECORD(&memcpd->memcpyStats);
+      GAP_TIMM_INIT_RECORD(&memcpd->delaceStats);
+    }
+  }
+
+  
+  if(gap_debug)
+  {
+    printf("GVA_copy_or_deinterlace_fcache_data_to_rgbBuffer size:%d x %d numThreads:%d colsPerCpu:%d rowsPerCpu:%d\n"
+      ,(int)rgbBuffer->width
+      ,(int)rgbBuffer->height
+      ,(int)numThreads
+      ,(int)colsPerCpu
+      ,(int)rowsPerCpu
+      );
+  }
+
+  startCol = 0;
+  startRow = 0;
+  colWidth = colsPerCpu;
+  rowHeight = rowsPerCpu;
+  
+
+  GAP_TIMM_START_FUNCTION(funcIdPush);
+ 
+  /* build work packet-stripes and re-start one thread per stripe */
+  for(ii=0; ii < numThreads; ii++)
+  {
+    GapMultiPocessorCopyOrDelaceData *memcpd;
+
+    if(gap_debug)
+    {
+      printf("GVA_copy_or_deinterlace.. Cpu[%d] startCol:%d colWidth:%d startRow:%d rowHeight:%d delace:%d\n"
+        ,(int)ii
+        ,(int)startCol
+        ,(int)colWidth
+        ,(int)startRow
+        ,(int)rowHeight
+        ,(int)rgbBuffer->deinterlace
+        );
+    }
+    
+    memcpd = &memcpdArray[ii];
+    memcpd->src_data = srcFrameData;
+    memcpd->rgbBuffer = rgbBuffer;
+    
+    memcpd->startCol        = startCol;
+    memcpd->colWidth        = colWidth;
+    memcpd->memRow          = startRow;
+    memcpd->memHeightInRows = rowHeight;
+    memcpd->cpuId = ii;
+    memcpd->isFinished = FALSE;
+    
+    /* (re)activate next thread */
+    g_thread_pool_push (threadPool
+                       , memcpd    /* user Data for the worker thread*/
+                       , &error
+                       );
+    startCol += colsPerCpu;
+    if((startCol + colsPerCpu) >  rgbBuffer->width)
+    {
+      /* the last thread handles a vertical stripe with the remaining columns
+       */
+      colWidth = rgbBuffer->width - startCol;
+    }
+    startRow += rowsPerCpu;
+    if((startRow + rowsPerCpu) >  rgbBuffer->height)
+    {
+      /* the last thread handles a horizontal stripe with the remaining rows
+       */
+      colWidth = rgbBuffer->height - startRow;
+    }
+  }
+
+  GAP_TIMM_STOP_FUNCTION(funcIdPush);
+
+
+  /* now wait until all worker threads have finished thier tasks */
+  retry = 0;
+  while(TRUE)
+  {
+    gboolean isAllWorkDone;
+
+    if(gap_debug)
+    {
+      printf("GVA_copy_or_deinterlace_fcache_data_to_rgbBuffer: WAIT retry :%d\n"
+        ,(int)retry
+        );
+    }
+  
+    GAP_TIMM_START_FUNCTION(funcIdMainWait);
+
+    /* sleep a very short time before next check
+     * Note that longer sleep leaves more cpu resources for the prallel running threads
+     * but increases the raster for overall performance time.
+     * varaints using a mutex and wakeup cond did not perform better than this,
+     * because the parallel running threads had additonal mutex locking overhead
+     * (that is not necessary here where each tread operates on its own memory range
+     * of the frame data)
+     */
+    g_usleep(usleepTime);
+    
+    GAP_TIMM_STOP_FUNCTION(funcIdMainWait);
+
+    if(gap_debug)
+    {
+      printf("GVA_copy_or_deinterlace_fcache_data_to_rgbBuffer: WAKE-UP retry :%d\n"
+        , (int)retry
+        );
+    }
+
+
+    /* assume all threads finished work (this may already be the case after the first retry) */
+    isAllWorkDone = TRUE;
+    
+    for(ii=0; ii < numThreads; ii++)
+    {
+      memcpd = &memcpdArray[ii];
+      if(gap_debug)
+      {
+        printf("GVA_fcache_to_drawable_multithread: STATUS Cpu[%d] retry :%d  isFinished:%d\n"
+          ,(int)ii
+          ,(int)retry
+          ,(int)memcpd->isFinished
+          );
+      }
+      if(memcpd->isFinished != TRUE)
+      {
+        isAllWorkDone = FALSE;
+        break;
+      }
+    }
+
+    if(isAllWorkDone == TRUE)
+    {
+      break;
+    }
+
+    retry++;
+  }
+
+  GAP_TIMM_STOP_FUNCTION(funcId);
+
+  if(gap_debug)
+  {
+    printf("GVA_copy_or_deinterlace_fcache_data_to_rgbBuffer: DONE retry :%d\n"
+        , (int)retry
+        );
+  }
+  
+}  /* end GVA_copy_or_deinterlace_fcache_data_to_rgbBuffer */
+
+
+/* ----------------------------------------------------
+ * GVA_copy_or_delace_print_statistics
+ * ----------------------------------------------------
+ */
+void
+GVA_copy_or_delace_print_statistics()
+{
+  GVA_copy_or_deinterlace_fcache_data_to_rgbBuffer(NULL, NULL, 0);
+}
+
diff --git a/libgapvidutil/gap_gve_raw.c b/libgapvidutil/gap_gve_raw.c
old mode 100644
new mode 100755
index bf2a686..955579a
--- a/libgapvidutil/gap_gve_raw.c
+++ b/libgapvidutil/gap_gve_raw.c
@@ -33,6 +33,7 @@
  */
 
 
+#include <config.h>
 
 
 /* SYSTEM (UNIX) includes */
@@ -44,117 +45,168 @@
 #include "libgimp/gimp.h"
 
 /* GAP includes */
+#include "gap_base.h"
 #include "gap_gve_raw.h"
 
 
 /* the raw CODEC needs no extra LIB includes */
 
+typedef struct DrawableToRgbBufferProcessorData {  /* drgb */
+    GimpDrawable       *src_drawable;
+    GimpImageType       drawable_type;
+    GapRgbPixelBuffer  *rgbBuffer;
+    gint                startCol;
+    gint                colWidth;
+    gint                cpuId;
+    gboolean            isFinished;
+    
+} DrawableToRgbBufferProcessorData;
+
+extern int gap_debug;
 
 
 /*************************************************************
  *          TOOL FUNCTIONS                                   *
  *************************************************************/
 
+/* ------------------------------------
+ * gap_gve_init_GapRgbPixelBuffer
+ * ------------------------------------
+ *
+ */
+void
+gap_gve_init_GapRgbPixelBuffer(GapRgbPixelBuffer *rgbBuffer, guint width, guint height)
+{
+  rgbBuffer->bpp = 3;
+  rgbBuffer->width = width;
+  rgbBuffer->height = height;
+  rgbBuffer->rowstride = rgbBuffer->width * rgbBuffer->bpp;
+  
+}  /* end gap_gve_init_GapRgbPixelBuffer */
 
 /* ------------------------------------
- * gap_gve_raw_BGR_drawable_encode
+ * gap_gve_new_GapRgbPixelBuffer
  * ------------------------------------
- * Encode drawable to RAW Buffer (Bytesequence BGR)
  *
  */
-guchar *
-gap_gve_raw_BGR_drawable_encode(GimpDrawable *drawable, gint32 *RAW_size, gboolean vflip
-                        ,guchar *app0_buffer, gint32 app0_length)
+GapRgbPixelBuffer *
+gap_gve_new_GapRgbPixelBuffer(guint width, guint height)
 {
-  GimpPixelRgn pixel_rgn;
-  GimpImageType drawable_type;
-  guchar *RAW_data;
-  guchar *RAW_ptr;
-  guchar *pixelrow_data;
-  guint   l_row;
-  gint32  l_idx;
-  gint32  l_blue;
-  gint32  l_green;
-  gint32  l_red;
-  gint32  l_rowstride;
+  GapRgbPixelBuffer *rgbBuffer;
+  
+  rgbBuffer = g_new(GapRgbPixelBuffer, 1);
 
-  drawable_type = gimp_drawable_type (drawable->drawable_id);
-  gimp_pixel_rgn_init (&pixel_rgn, drawable, 0, 0, drawable->width, drawable->height, FALSE, FALSE);
+  gap_gve_init_GapRgbPixelBuffer(rgbBuffer, width, height);
 
-  l_rowstride = drawable->width * drawable->bpp;
-  pixelrow_data = (guchar *)g_malloc0(l_rowstride);
-  *RAW_size = drawable->width * drawable->height * 3;
+  rgbBuffer->data = (guchar *)g_malloc0((rgbBuffer->height * rgbBuffer->rowstride));
+  
+  return (rgbBuffer);
+  
+}  /* end gap_gve_new_GapRgbPixelBuffer */
 
-  RAW_data = (guchar *)g_malloc0((drawable->width * drawable->height * 3)
-           + app0_length);
-  if(app0_buffer)
-  {
-    memcpy(RAW_data, app0_buffer, app0_length);
-    *RAW_size += app0_length;
-  }
 
-  RAW_ptr = RAW_data + app0_length;
-  l_red   = 0;
-  l_green = 1;
-  l_blue  = 2;
-  if((drawable_type == GIMP_GRAY_IMAGE)
-  || (drawable_type == GIMP_GRAYA_IMAGE))
+
+/* ------------------------------------
+ * gap_gve_free_GapRgbPixelBuffer
+ * ------------------------------------
+ *
+ */
+void
+gap_gve_free_GapRgbPixelBuffer(GapRgbPixelBuffer *rgbBuffer)
+{
+  if(rgbBuffer)
   {
-    l_green = 0;
-    l_blue  = 0;
+    if(rgbBuffer->data)
+    {
+      g_free(rgbBuffer->data);
+    }
+    g_free(rgbBuffer);
   }
+}  /* end gap_gve_free_GapRgbPixelBuffer */
 
-  for(l_row = 0; l_row < drawable->height; l_row++)
+
+
+
+/* ----------------------------------------
+ * gap_gve_convert_GapRgbPixelBuffer_To_BGR
+ * ----------------------------------------
+ */
+void
+gap_gve_convert_GapRgbPixelBuffer_To_BGR(GapRgbPixelBuffer *rgbBuffer)
+{
+  gint32   pixelCount;
+  guchar  *ptrA;
+  guchar  *ptrB;
+  
+  ptrA = &rgbBuffer->data[0];
+  ptrB = &rgbBuffer->data[2];
+  
+  for (pixelCount = rgbBuffer->width * rgbBuffer->height; pixelCount > 0; pixelCount--)
   {
-     gint32 l_src_row;
+    guchar tmp;
+    
+    tmp = *ptrA;
+    *ptrA = *ptrB;
+    *ptrB = tmp;
+    
+    ptrA += rgbBuffer->bpp;
+    ptrB += rgbBuffer->bpp;
+  }
+  
+}  /* end gap_gve_convert_GapRgbPixelBuffer_To_BGR */
 
-     if(vflip)  { l_src_row = (drawable->height - 1) - l_row; }
-     else       { l_src_row = l_row;}
 
-     gimp_pixel_rgn_get_rect (&pixel_rgn, pixelrow_data
-                              , 0
-                              , l_src_row
-                              , drawable->width
-                              , 1);
-     for(l_idx=0;l_idx < l_rowstride; l_idx += drawable->bpp)
-     {
-       *(RAW_ptr++) = pixelrow_data[l_idx + l_blue];
-       *(RAW_ptr++) = pixelrow_data[l_idx + l_green];
-       *(RAW_ptr++) = pixelrow_data[l_idx + l_red];
-     }
+/* ----------------------------------------
+ * gap_gve_vflip_GapRgbPixelBuffer
+ * ----------------------------------------
+ */
+void
+gap_gve_vflip_GapRgbPixelBuffer(GapRgbPixelBuffer *rgbBuffer)
+{
+  gint32  row;
+  
+  for (row = 0; row < rgbBuffer->height / 2; row++)
+  {
+    gint    upperRow;
+    gint32  ii;
+    guchar  *ptrA;
+    guchar  *ptrB;
+    
+    upperRow = (rgbBuffer->height -1) - row;
+    ptrA = &rgbBuffer->data[row * rgbBuffer->rowstride];
+    ptrB = &rgbBuffer->data[upperRow * rgbBuffer->rowstride];
+    
+    for(ii = 0; ii < rgbBuffer->rowstride; ii++)
+    {
+      guchar tmp;
+      
+      tmp = *ptrA;
+      *(ptrA++) = *ptrB;
+      *(ptrB++) = tmp;
+    }
   }
-  g_free(pixelrow_data);
-  return(RAW_data);
-}    /* end gap_gve_raw_BGR_drawable_encode */
+  
+}  /* end gap_gve_vflip_GapRgbPixelBuffer */
 
 
-/* ------------------------------------
- * gap_gve_raw_RGB_drawable_encode
- * ------------------------------------
- * Encode drawable to RAW Buffer (Bytesequence RGB)
+
+/* --------------------------------------
+ * gap_gve_raw_RGB_or_BGR_drawable_encode
+ * --------------------------------------
+ * Encode drawable to RAW Buffer (Bytesequence RGB or BGR)
  *
  */
 guchar *
-gap_gve_raw_RGB_drawable_encode(GimpDrawable *drawable, gint32 *RAW_size, gboolean vflip
-                        ,guchar *app0_buffer, gint32 app0_length)
+gap_gve_raw_RGB_or_BGR_drawable_encode(GimpDrawable *drawable, gint32 *RAW_size, gboolean vflip
+                        ,guchar *app0_buffer, gint32 app0_length, gboolean convertToBGR)
 {
-  GimpPixelRgn pixel_rgn;
-  GimpImageType drawable_type;
+  GapRgbPixelBuffer  rgbBufferLocal;
+  GapRgbPixelBuffer *rgbBuffer;
   guchar *RAW_data;
-  guchar *RAW_ptr;
-  guchar *pixelrow_data;
-  guint   l_row;
-  gint32  l_idx;
-  gint32  l_blue;
-  gint32  l_green;
-  gint32  l_red;
-  gint32  l_rowstride;
-
-  drawable_type = gimp_drawable_type (drawable->drawable_id);
-  gimp_pixel_rgn_init (&pixel_rgn, drawable, 0, 0, drawable->width, drawable->height, FALSE, FALSE);
 
-  l_rowstride = drawable->width * drawable->bpp;
-  pixelrow_data = (guchar *)g_malloc0(l_rowstride);
+  rgbBuffer = &rgbBufferLocal;
+  gap_gve_init_GapRgbPixelBuffer(rgbBuffer, drawable->width, drawable->height);
+  
   *RAW_size = drawable->width * drawable->height * 3;
 
   RAW_data = (guchar *)g_malloc0((drawable->width * drawable->height * 3)
@@ -165,42 +217,56 @@ gap_gve_raw_RGB_drawable_encode(GimpDrawable *drawable, gint32 *RAW_size, gboole
     *RAW_size += app0_length;
   }
 
-  RAW_ptr = RAW_data + app0_length;
-  l_red   = 0;
-  l_green = 1;
-  l_blue  = 2;
-  if((drawable_type == GIMP_GRAY_IMAGE)
-  || (drawable_type == GIMP_GRAYA_IMAGE))
+  rgbBuffer->data = RAW_data + app0_length;;
+  gap_gve_drawable_to_RgbBuffer(drawable, rgbBuffer);
+  
+  if(convertToBGR)
   {
-    l_green = 0;
-    l_blue  = 0;
+    gap_gve_convert_GapRgbPixelBuffer_To_BGR(rgbBuffer);
   }
-
-  for(l_row = 0; l_row < drawable->height; l_row++)
+  
+  if(vflip == TRUE)
   {
-     gint32 l_src_row;
-
-     if(vflip)  { l_src_row = (drawable->height - 1) - l_row; }
-     else       { l_src_row = l_row;}
-
-     gimp_pixel_rgn_get_rect (&pixel_rgn, pixelrow_data
-                              , 0
-                              , l_src_row
-                              , drawable->width
-                              , 1);
-     for(l_idx=0;l_idx < l_rowstride; l_idx += drawable->bpp)
-     {
-       *(RAW_ptr++) = pixelrow_data[l_idx + l_red];
-       *(RAW_ptr++) = pixelrow_data[l_idx + l_green];
-       *(RAW_ptr++) = pixelrow_data[l_idx + l_blue];
-     }
+    gap_gve_vflip_GapRgbPixelBuffer(rgbBuffer);
   }
-  g_free(pixelrow_data);
+
   return(RAW_data);
+}    /* end gap_gve_raw_RGB_or_BGR_drawable_encode */
+
+
+/* ------------------------------------
+ * gap_gve_raw_BGR_drawable_encode
+ * ------------------------------------
+ * Encode drawable to RAW Buffer (Bytesequence BGR)
+ * the resulting data can b optionally vertically mirrored
+ * and optionally prefixed with header data provided in app0_buffer app0_length
+ */
+guchar *
+gap_gve_raw_BGR_drawable_encode(GimpDrawable *drawable, gint32 *RAW_size, gboolean vflip
+                        ,guchar *app0_buffer, gint32 app0_length)
+{
+  return (gap_gve_raw_RGB_or_BGR_drawable_encode(drawable, RAW_size, vflip, app0_buffer, app0_length, TRUE));
+}    /* end gap_gve_raw_BGR_drawable_encode */
+
+
+
+/* ------------------------------------
+ * gap_gve_raw_RGB_drawable_encode
+ * ------------------------------------
+ * Encode drawable to RAW Buffer (Bytesequence RGB)
+ * the resulting data can b optionally vertically mirrored
+ * and optionally prefixed with header data provided in app0_buffer app0_length
+ */
+guchar *
+gap_gve_raw_RGB_drawable_encode(GimpDrawable *drawable, gint32 *RAW_size, gboolean vflip
+                        ,guchar *app0_buffer, gint32 app0_length)
+{
+  return (gap_gve_raw_RGB_or_BGR_drawable_encode(drawable, RAW_size, vflip, app0_buffer, app0_length, FALSE));
 }    /* end gap_gve_raw_RGB_drawable_encode */
 
 
 
+
 /* ------------------------------------
  * gap_gve_raw_YUV444_drawable_encode
  * ------------------------------------
@@ -494,3 +560,469 @@ gap_gve_raw_YUV420_drawable_encode(GimpDrawable *drawable, gint32 *RAW_size, gbo
 
   return(RAW_data);
 }    /* end gap_gve_raw_YUV420_drawable_encode */
+
+
+
+/* ---------------------------------
+ * p_copyPixelRegionToRgbBuffer
+ * ---------------------------------
+ */
+static inline void
+p_copyPixelRegionToRgbBuffer (const GimpPixelRgn *srcPR
+                    ,const GapRgbPixelBuffer *dstBuff
+                    ,GimpImageType drawable_type)
+{
+  guint    row;
+  guchar*  src;
+  guchar*  dest;
+   
+  src  = srcPR->data;
+  dest = dstBuff->data 
+       + (srcPR->y * dstBuff->rowstride)
+       + (srcPR->x * dstBuff->bpp);
+
+  if(srcPR->bpp == dstBuff->bpp)
+  {
+    /* at same bbp size we can use fast memcpy */
+    for (row = 0; row < srcPR->h; row++)
+    {
+       memcpy(dest, src, srcPR->w * srcPR->bpp);
+       src  += srcPR->rowstride;
+       dest += dstBuff->rowstride;
+    }
+    return;
+  
+  }
+
+
+  if((srcPR->bpp != dstBuff->bpp)
+  && (dstBuff->bpp == 3))
+  {
+    guchar       *RAW_ptr;
+    gint32        l_idx;
+    gint32        l_red;
+    gint32        l_green;
+    gint32        l_blue;
+
+    l_red   = 0;
+    l_green = 1;
+    l_blue  = 2;
+    if((drawable_type == GIMP_GRAY_IMAGE)
+    || (drawable_type == GIMP_GRAYA_IMAGE))
+    {
+      l_green = 0;
+      l_blue  = 0;
+    }
+    
+    /* copy gray or rgb channel(s) from src tile to RGB dest buffer */
+    for (row = 0; row < srcPR->h; row++)
+    {
+      RAW_ptr = dest;
+      for(l_idx=0; l_idx < srcPR->rowstride; l_idx += srcPR->bpp)
+      {
+        *(RAW_ptr++) = src[l_idx + l_red];
+        *(RAW_ptr++) = src[l_idx + l_green];
+        *(RAW_ptr++) = src[l_idx + l_blue];
+      }
+
+      src  += srcPR->rowstride;
+      dest += dstBuff->rowstride;
+    }
+    return;
+  }
+
+  
+  printf("** ERROR p_copyPixelRegionToRgbBuffer: unsupported conversion from src bpp:%d to  dest bpp:%d\n"
+    , (int)srcPR->bpp
+    , (int)dstBuff->bpp
+    );
+  
+}  /* end p_copyPixelRegionToRgbBuffer */
+
+
+
+
+/* ------------------------------------
+ * gap_gve_drawable_to_RgbBuffer        for singleprocessor
+ * ------------------------------------
+ * Encode drawable to RGBBuffer (Bytesequence RGB)
+ *
+ * Performance notes:
+ * loggging runtime test with 720x480 videoframe was:
+ * id:021 gap_gve_drawable_to_RgbBuffer (singleporocessor tiled)            calls:001951 sum:8182955 min:2944 max:18477 avg:4194 usecs
+ * id:022 gap_gve_drawable_to_RgbBuffer (singleporocessor rect)             calls:001951 sum:9665986 min:3036 max:18264 avg:4954 usecs
+ *
+ * Note that this test used gimp-2.6 installation from Suse rpm package
+ * (this binary might have been compiled without multiprocessor support ?)
+ * TODO: repeat the test with self compiled gimp-2.6 to make sure sure that gimp core
+ *       uses multiple threads (that might result in faster processing when
+ *       the full frame size is processed at once in the gimp core)
+ *       
+ *  
+ */
+void
+gap_gve_drawable_to_RgbBuffer(GimpDrawable *drawable, GapRgbPixelBuffer *rgbBuffer)
+{
+  GimpImageType    drawable_type;
+  GimpPixelRgn     srcPR;
+  gpointer         pr;
+
+  static gboolean           isPerftestInitialized = FALSE;          
+  static gboolean           isPerftestApiTilesDefault;
+  static gboolean           isPerftestApiTiles;          /* copy tile-by-tile versus gimp_pixel_rgn_set_rect all at once */
+
+  static gint32 funcIdTile = -1;
+  static gint32 funcIdRect = -1;
+
+  GAP_TIMM_GET_FUNCTION_ID(funcIdTile, "gap_gve_drawable_to_RgbBuffer (tiled)");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdRect, "gap_gve_drawable_to_RgbBuffer (rect at once)");
+
+  /* PERFTEST configuration values to test performance of various strategies on multiprocessor machines */
+  if (isPerftestInitialized != TRUE)
+  {
+    isPerftestInitialized = TRUE;
+
+    if(gap_base_get_numProcessors() > 1)
+    {
+      /* copy full size as one big rectangle gives the gimp core the chance to process with more than one thread */
+      isPerftestApiTilesDefault = FALSE;
+    }
+    else
+    {
+      /* tile based copy was a little bit faster in tests where gimp-core used only one CPU */
+      isPerftestApiTilesDefault = TRUE;
+    }
+    isPerftestApiTiles = gap_base_get_gimprc_gboolean_value("isPerftestApiTiles", isPerftestApiTilesDefault);
+  }
+
+  if(isPerftestApiTiles != TRUE)
+  {
+    if((drawable->bpp == rgbBuffer->bpp)
+    && (drawable->width == rgbBuffer->width)
+    && (drawable->height == rgbBuffer->height))
+    {
+      GAP_TIMM_START_FUNCTION(funcIdRect);
+ 
+      gimp_pixel_rgn_init (&srcPR, drawable, 0, 0
+                       , drawable->width, drawable->height
+                       , FALSE     /* dirty */
+                       , FALSE     /* shadow */
+                       );
+      gimp_pixel_rgn_get_rect (&srcPR, rgbBuffer->data
+                       , 0
+                       , 0
+                       , drawable->width
+                       , drawable->height);
+ 
+      GAP_TIMM_STOP_FUNCTION(funcIdRect);
+      return;  
+    }
+  }
+
+
+  GAP_TIMM_START_FUNCTION(funcIdTile);
+  
+     
+  drawable_type = gimp_drawable_type (drawable->drawable_id);
+  gimp_pixel_rgn_init (&srcPR, drawable, 0, 0
+                        , drawable->width, drawable->height
+                        , FALSE     /* dirty */
+                        , FALSE     /* shadow */
+                         );
+  for (pr = gimp_pixel_rgns_register (1, &srcPR);
+       pr != NULL;
+       pr = gimp_pixel_rgns_process (pr))
+  {
+      p_copyPixelRegionToRgbBuffer (&srcPR, rgbBuffer, drawable_type);
+  }
+  
+  GAP_TIMM_STOP_FUNCTION(funcIdTile);
+
+
+
+}    /* end gap_gve_drawable_to_RgbBuffer */
+
+
+
+
+
+// /* --------------------------------------------
+//  * p_drawable_to_RgbBuffer_WorkerThreadFunction
+//  * --------------------------------------------
+//  * this function runs in concurrent parallel worker threads in multiprocessor environment.
+//  * each one of the parallel running threads processes another portion of the drawable
+//  * (e.g. stripes starting at startCol and colWidth wide. of the processed drawable differs for each thread) 
+//  */
+// static void
+// p_drawable_to_RgbBuffer_WorkerThreadFunction(DrawableToRgbBufferProcessorData *drgb)
+// {
+//   GimpPixelRgn     srcPR;
+//   gpointer  pr;
+//   GStaticMutex    *gimpMutex;
+// 
+//   if(gap_debug)
+//   {
+//     printf("RgbBuffer_WorkerThreadFunction: START cpu[%d] drawableID:%d (before mutex_lock)\n"
+//       ,(int)drgb->cpuId
+//       ,(int)drgb->src_drawable->drawable_id
+//       );
+//   }  
+//   gimpMutex = gap_base_get_gimp_mutex();
+//    
+//   g_static_mutex_lock(gimpMutex);
+//  
+//   gimp_pixel_rgn_init (&srcPR, drgb->src_drawable
+//                       , drgb->startCol, 0
+//                       , drgb->colWidth, drgb->src_drawable->height
+//                       , FALSE     /* dirty */
+//                       , FALSE     /* shadow */
+//                        );
+//   for (pr = gimp_pixel_rgns_register (1, &srcPR);
+//        pr != NULL;
+//        pr = gimp_pixel_rgns_process (pr))
+//   {
+//       g_static_mutex_unlock(gimpMutex);
+//       p_copyPixelRegionToRgbBuffer (&srcPR, drgb->rgbBuffer, drgb->drawable_type);
+//       if(gap_debug)
+//       {
+//         printf("RgbBuffer_WorkerThreadFunction: PR-LOOP cpu[%d] drawableID:%d (before mutex_lock)\n"
+//           ,(int)drgb->cpuId
+//           ,(int)drgb->src_drawable->drawable_id
+//           );
+//       }  
+//       g_static_mutex_lock(gimpMutex);
+//   }
+// 
+//   if(gap_debug)
+//   {
+//     printf("RgbBuffer_WorkerThreadFunction: DONE cpu[%d] drawableID:%d\n"
+//       ,(int)drgb->cpuId
+//       ,(int)drgb->src_drawable->drawable_id
+//       );
+//   }  
+// 
+//   drgb->isFinished = TRUE;
+//   
+//   g_static_mutex_unlock(gimpMutex);
+//    
+// }  /* end p_drawable_to_RgbBuffer_WorkerThreadFunction */
+// 
+// 
+// 
+// /* -----------------------------------------
+//  * gap_gve_drawable_to_RgbBuffer_multithread
+//  * -----------------------------------------
+//  * Encode drawable to RGBBuffer (Bytesequence RGB) on multiprocessor machines.
+//  * This implementation uses threads to spread the work to the configured
+//  * number of processors by using a thread pool.
+//  *
+//  * ==> tests on my 4 CPU system showed that this runs slower than the singleprocessor implementation
+//  *     main reason is:
+//  *       lot of time wasted with wait to get the mutex that must be used to synchronize
+//  *       access to the gimp plug-in communication
+//  */
+// void
+// gap_gve_drawable_to_RgbBuffer_multithread(GimpDrawable *drawable, GapRgbPixelBuffer *rgbBuffer)
+// {
+// #define GAP_GVE_MAX_THREADS 16
+//   static GThreadPool  *threadPool = NULL;
+//   GStaticMutex        *gimpMutex;
+//   gboolean             isMultithreadEnabled;
+//   gint                 numThreads;
+//   gint                 tilesPerRow;
+//   gint                 tilesPerCpu;
+//   gint                 colsPerCpu;
+//   gint                 retry;
+//   gint                 startCol;
+//   gint                 colWidth;
+//   gint                 ii;
+//   GimpImageType        drawable_type;
+//   DrawableToRgbBufferProcessorData  drgbArray[GAP_GVE_MAX_THREADS];
+//   DrawableToRgbBufferProcessorData *drgb;
+//   GError *error;
+// 
+//   static gint32 funcId = -1;
+// 
+//   GAP_TIMM_GET_FUNCTION_ID(funcId, "gap_gve_drawable_to_RgbBuffer_multithread");
+//   GAP_TIMM_START_FUNCTION(funcId);
+// 
+// 
+//   error = NULL;
+// 
+//    
+//   numThreads = MIN(gap_base_get_numProcessors(), GAP_GVE_MAX_THREADS);
+// 
+//   /* check and init thread system */
+//   isMultithreadEnabled = gap_base_thread_init();
+//   
+//   if((isMultithreadEnabled != TRUE)
+//   || (drawable->height < numThreads)
+//   || (numThreads < 2))
+//   {
+//     /* use singleproceesor implementation where multithread variant is not
+//      * available or would be slower than singleprocessor implementation
+//      */
+//     gap_gve_drawable_to_RgbBuffer(drawable, rgbBuffer);
+//     return;
+//   }
+//     
+//   if (threadPool == NULL)
+//   {
+//     GError *error = NULL;
+// 
+// 
+//     /* init the treadPool at first multiprocessing call
+//      * (and keep the threads until end of main process..)
+//      */
+//     threadPool = g_thread_pool_new((GFunc) p_drawable_to_RgbBuffer_WorkerThreadFunction
+//                                          ,NULL        /* user data */
+//                                          ,GAP_GVE_MAX_THREADS          /* max_threads */
+//                                          ,TRUE        /* exclusive */
+//                                          ,&error      /* GError **error */
+//                                          );
+//   }
+// 
+//   gimpMutex = gap_base_get_gimp_mutex();
+//   drawable_type = gimp_drawable_type (drawable->drawable_id);
+// 
+//   tilesPerRow = drawable->width / gimp_tile_width();
+//   tilesPerCpu = tilesPerRow / numThreads;
+//   colsPerCpu = tilesPerCpu * gimp_tile_width();
+//   if(colsPerCpu < drawable->width)
+//   {
+//     tilesPerCpu++;
+//     colsPerCpu = tilesPerCpu * gimp_tile_width();
+//   }
+//   
+//   if(gap_debug)
+//   {
+//     printf("gap_gve_drawable_to_RgbBuffer_multithread drawableId :%d size:%d x %d tileWidth:%d numThreads:%d tilesPerCpu:%d colsPerCpu:%d\n"
+//       ,(int)drawable->drawable_id
+//       ,(int)drawable->width
+//       ,(int)drawable->height
+//       ,(int)gimp_tile_width()
+//       ,(int)numThreads
+//       ,(int)tilesPerCpu
+//       ,(int)colsPerCpu
+//       );
+//   }
+// 
+//   startCol = 0;
+//   colWidth = colsPerCpu;
+//   
+//   if(gap_debug)
+//   {
+//     printf("gap_gve_drawable_to_RgbBuffer_multithread: before LOCK gimpMutex INITIAL drawableId :%d\n"
+//         ,(int)drawable->drawable_id
+//       );
+//   }
+//   g_static_mutex_lock(gimpMutex);
+// 
+//   for(ii=0; ii < numThreads; ii++)
+//   {
+//     DrawableToRgbBufferProcessorData *drgb;
+// 
+//     if(gap_debug)
+//     {
+//       printf("gap_gve_drawable_to_RgbBuffer_multithread drawableId :%d Cpu[%d] startCol:%d colWidth:%d\n"
+//         ,(int)drawable->drawable_id
+//         ,(int)ii
+//         ,(int)startCol
+//         ,(int)colWidth
+//         );
+//     }
+//     
+//     drgb = &drgbArray[ii];
+//     drgb->drawable_type = drawable_type;
+//     drgb->src_drawable = drawable;
+//     drgb->rgbBuffer = rgbBuffer;
+//     
+//     drgb->startCol = startCol;
+//     drgb->colWidth = colWidth;
+//     drgb->cpuId = ii;
+//     drgb->isFinished = FALSE;
+//     
+//     /* (re)activate thread */
+//     g_thread_pool_push (threadPool
+//                        , drgb    /* user Data for the worker thread*/
+//                        , &error
+//                        );
+//     startCol += colsPerCpu;
+//     if((startCol + colsPerCpu) >  drawable->width)
+//     {
+//       /* the last thread handles a stripe with the remaining columns
+//        * (note that the partitioning into stripes is done in tile with portions
+//        * to avoid concurrent thread access to the same tile.)
+//        */
+//       colWidth = drawable->width - startCol;
+//     }
+//   }
+// 
+//   g_static_mutex_unlock(gimpMutex);
+// 
+//   for(retry = 0; retry < 2000; retry++)
+//   {
+//     gboolean isAllWorkDone;
+// 
+//     /* assume all threads finished work (this may already be the case after firts retry) */
+//     isAllWorkDone = TRUE;
+//     
+//     // g_static_mutex_lock(gimpMutex);
+//     
+//     for(ii=0; ii < numThreads; ii++)
+//     {
+//       drgb = &drgbArray[ii];
+//       if(gap_debug)
+//       {
+//         printf("gap_gve_drawable_to_RgbBuffer_multithread: STATUS Cpu[%d] retry :%d  isFinished:%d\n"
+//           ,(int)ii
+//           ,(int)retry
+//           ,(int)drgb->isFinished
+//           );
+//       }
+//       if(drgb->isFinished != TRUE)
+//       {
+//         isAllWorkDone = FALSE;
+//         break;
+//       }
+//     }
+// 
+//     // g_static_mutex_unlock(gimpMutex);
+//      
+//     if(isAllWorkDone == TRUE)
+//     {
+//       break;
+//     }
+//     
+//     if(gap_debug)
+//     {
+//       printf("gap_gve_drawable_to_RgbBuffer_multithread: SLEEP retry :%d\n"
+//         ,(int)retry
+//         );
+//     }
+//   
+//     g_usleep (500);
+// 
+//     if(gap_debug)
+//     {
+//       printf("gap_gve_drawable_to_RgbBuffer_multithread: WAKE-UP drawableId:%d retry :%d\n"
+//         , (int)drawable->drawable_id
+//         , (int)retry
+//         );
+//     }
+// 
+//     
+//   }
+// 
+// 
+//   GAP_TIMM_STOP_FUNCTION(funcId);
+// 
+//   if(gap_debug)
+//   {
+//     printf("gap_gve_drawable_to_RgbBuffer_multithread: DONE drawableId:%d retry :%d\n"
+//         , (int)drawable->drawable_id
+//         , (int)retry
+//         );
+//   }
+//   
+// }    /* end gap_gve_drawable_to_RgbBuffer_multithread */
diff --git a/libgapvidutil/gap_gve_raw.h b/libgapvidutil/gap_gve_raw.h
index 000f3bb..0dbeba4 100644
--- a/libgapvidutil/gap_gve_raw.h
+++ b/libgapvidutil/gap_gve_raw.h
@@ -37,6 +37,14 @@
 #include "gtk/gtk.h"
 #include "libgimp/gimp.h"
 
+typedef struct GapRgbPixelBuffer
+{
+  guchar       *data;          /* pointer to region data */
+  guint         width;         /* width in pixels */
+  guint         height;        /* height in pixels */
+  guint         bpp;           /* bytes per pixel (always initialized with 3) */
+  guint         rowstride;     /* bytes per pixel row */
+} GapRgbPixelBuffer;
 
 /* ------------------------------------
  *  gap_gve_raw_BGR_drawable_encode
@@ -53,6 +61,15 @@ guchar *
 gap_gve_raw_BGR_drawable_encode(GimpDrawable *drawable, gint32 *RAW_size, gboolean vflip
                         ,guchar *app0_buffer, gint32 app0_length);
 
+
+/* ------------------------------------
+ * gap_gve_raw_RGB_drawable_encode
+ * ------------------------------------
+ * Encode drawable to RAW Buffer (Bytesequence RGB)
+ * NOTE: this implementation shall be used only in case app0 buffer or vflip feature is required
+ *       because it is rather slow due to its row by row processing.
+ *       (for faster implementation of RGB conversions see procedure gap_gve_drawable_to_RgbBuffer)
+ */
 guchar *
 gap_gve_raw_RGB_drawable_encode(GimpDrawable *drawable, gint32 *RAW_size, gboolean vflip
                         ,guchar *app0_buffer, gint32 app0_length);
@@ -114,4 +131,53 @@ guchar *
 gap_gve_raw_YUV420_drawable_encode(GimpDrawable *drawable, gint32 *RAW_size, gboolean vflip
                         ,guchar *app0_buffer, gint32 app0_length);
 
+
+
+/* ------------------------------------
+ * gap_gve_init_GapRgbPixelBuffer
+ * ------------------------------------
+ *
+ */
+void
+gap_gve_init_GapRgbPixelBuffer(GapRgbPixelBuffer *rgbBuffer, guint width, guint height);
+
+/* ------------------------------------
+ * gap_gve_new_GapRgbPixelBuffer
+ * ------------------------------------
+ *
+ */
+GapRgbPixelBuffer *
+gap_gve_new_GapRgbPixelBuffer(guint width, guint height);
+
+
+/* ------------------------------------
+ * gap_gve_free_GapRgbPixelBuffer
+ * ------------------------------------
+ *
+ */
+void
+gap_gve_free_GapRgbPixelBuffer(GapRgbPixelBuffer *rgbBuffer);
+
+
+/* ------------------------------------
+ * gap_gve_drawable_to_RgbBuffer
+ * ------------------------------------
+ * Encode drawable to RGBBuffer (Bytesequence RGB)
+ *
+ */
+void
+gap_gve_drawable_to_RgbBuffer(GimpDrawable *drawable, GapRgbPixelBuffer *rgbBuffer);
+
+/* -----------------------------------------
+ * gap_gve_drawable_to_RgbBuffer_multithread
+ * -----------------------------------------
+ * Encode drawable to RGBBuffer (Bytesequence RGB) on multiprocessor machines.
+ * This implementation uses threads to spread the work to the configured
+ * number of processors by using a thread pool.
+ *
+ */
+void
+gap_gve_drawable_to_RgbBuffer_multithread(GimpDrawable *drawable, GapRgbPixelBuffer *rgbBuffer);
+
 #endif
+
diff --git a/vid_common/gap_cme_gui.c b/vid_common/gap_cme_gui.c
index 7336ce5..70fbc5c 100644
--- a/vid_common/gap_cme_gui.c
+++ b/vid_common/gap_cme_gui.c
@@ -4339,7 +4339,8 @@ gap_cme_gui_master_encoder_dialog(GapCmeGlobalParams *gpp)
   if(gap_debug) printf("gap_cme_gui_master_encoder_dialog: Start\n");
 
 #ifdef GAP_USE_GTHREAD
-  g_thread_init (NULL);
+  /* check and init thread system */
+  gap_base_thread_init();
   gdk_threads_init ();
   gdk_threads_enter ();
 #endif
diff --git a/vid_enc_avi/gap_enc_avi_gui.c b/vid_enc_avi/gap_enc_avi_gui.c
index a87e27b..4e74f11 100644
--- a/vid_enc_avi/gap_enc_avi_gui.c
+++ b/vid_enc_avi/gap_enc_avi_gui.c
@@ -193,6 +193,8 @@ p_init_widget_values(GapGveAviGlobalParams *gpp)
                                , epp->APP0_marker);
   gtk_toggle_button_set_active (GTK_TOGGLE_BUTTON (gpp->raw_vflip_checkbutton)
                                , epp->raw_vflip);
+  gtk_toggle_button_set_active (GTK_TOGGLE_BUTTON (gpp->raw_bgr_checkbutton)
+                               , epp->raw_bgr);
 
   /* set initial value according to codec */
   {
@@ -303,6 +305,8 @@ p_set_codec_dependent_wgt_senistive(GapGveAviGlobalParams *gpp, gint32 idx)
 
   if(gpp->raw_vflip_checkbutton)
     gtk_widget_set_sensitive(gpp->raw_vflip_checkbutton, raw_sensitive);
+  if(gpp->raw_bgr_checkbutton)
+    gtk_widget_set_sensitive(gpp->raw_bgr_checkbutton, raw_sensitive);
 
 }  /* end p_set_codec_dependent_wgt_senistive  */
 
@@ -556,7 +560,9 @@ p_create_shell_window (GapGveAviGlobalParams *gpp)
                    , &epp->APP0_marker);
   gimp_help_set_help_data (checkbutton
                    , _("Write APP0 Marker for each encoded frame."
-                       " (The APP0 marker is evaluated by some windows programs for AVIs)")
+                       " The APP0 marker is evaluated by some windows programs for AVIs"
+                       " but can cause playback with wrong colors on some players"
+                       " (in most cases you should NOT write the APP0 marker)")
                    , NULL);
 
   master_row++;
@@ -884,6 +890,35 @@ p_create_shell_window (GapGveAviGlobalParams *gpp)
                        "or as is (suitable for gmplayer on linux)")
                    , NULL);
 
+  raw_row++;
+
+  /* the BGR label */
+  label = gtk_label_new (_("BGR (rgb):"));
+  gtk_widget_show (label);
+  gtk_misc_set_alignment (GTK_MISC (label), 0.0, 0.5);
+  gtk_table_attach (GTK_TABLE (table_raw), label, 0, 1, raw_row, raw_row+1,
+                    (GtkAttachOptions) (GTK_FILL),
+                    (GtkAttachOptions) (0), 0, 0);
+  gtk_label_set_justify (GTK_LABEL (label), GTK_JUSTIFY_RIGHT);
+
+
+  /* the bgr checkbutton */
+  checkbutton = gtk_check_button_new_with_label (" ");
+  gpp->raw_bgr_checkbutton = checkbutton;
+  gtk_widget_show (checkbutton);
+  gtk_table_attach (GTK_TABLE (table_raw), checkbutton, 1, 2, raw_row, raw_row+1,
+                    (GtkAttachOptions) (GTK_FILL),
+                    (GtkAttachOptions) (0), 0, 0);
+        g_object_set_data (G_OBJECT (checkbutton), "gpp"
+                          , (gpointer)gpp);
+        g_signal_connect (G_OBJECT (checkbutton), "toggled"
+                   , G_CALLBACK (on_checkbutton_toggled)
+                   , &epp->raw_bgr);
+  gimp_help_set_help_data (checkbutton
+                   , _("Check if you want to encode frames in BGR (prefered) or RGB colormodel "
+                       "(most players like WinDvD, VLC-player want BGR colormodel for RAW data) "
+                       "other players want RGB colromodel for RAW avi data)")
+                   , NULL);
 
 #ifdef ENABLE_LIBXVIDCORE
 
@@ -1199,6 +1234,7 @@ gap_enc_avi_gui_dialog(GapGveAviGlobalParams *gpp)
   gpp->xvid_max_key_interval_spinbutton = NULL;
   gpp->xvid_quality_spinbutton = NULL;
   gpp->raw_vflip_checkbutton = NULL;
+  gpp->raw_bgr_checkbutton = NULL;
 
 
   gpp->shell_window = p_create_shell_window (gpp);
diff --git a/vid_enc_avi/gap_enc_avi_main.c b/vid_enc_avi/gap_enc_avi_main.c
index fee831b..256009d 100644
--- a/vid_enc_avi/gap_enc_avi_main.c
+++ b/vid_enc_avi/gap_enc_avi_main.c
@@ -178,7 +178,8 @@ query ()
     {GIMP_PDB_INT32, "png_dont_recode_frames", "=1: store the frames _directly_ into the AVI where possible. "
                                  "(works only for codec_name PNG )"},
     {GIMP_PDB_INT32, "png_interlaced", "=1: interlaced png frames, 0= no interlace"},
-    {GIMP_PDB_INT32, "png_compression", "the compression of the coded pngs (0 - 9) where 9 is best and 0 is fast "}
+    {GIMP_PDB_INT32, "png_compression", "the compression of the coded pngs (0 - 9) where 9 is best and 0 is fast "},
+    {GIMP_PDB_INT32, "raw_bgr", "=1: bgr else rgb colormodel (only for codec_name RAW and RGB )"}
   };
   static int nargs_avi_enc_par = sizeof(args_avi_enc_par) / sizeof(args_avi_enc_par[0]);
 
@@ -390,6 +391,7 @@ run (const gchar *name,          /* name of plugin */
            epp->png_dont_recode_frames = param[l_ii++].data.d_int32;
            epp->png_interlaced    = param[l_ii++].data.d_int32;
            epp->png_compression   = param[l_ii++].data.d_int32;
+           epp->raw_bgr           = param[l_ii++].data.d_int32;
 
         }
       }
@@ -569,6 +571,7 @@ gap_enc_avi_main_init_default_params(GapGveAviValues *epp)
   epp->APP0_marker = TRUE;
 
   epp->raw_vflip    = 1;
+  epp->raw_bgr    = 1;
 }  /* end gap_enc_avi_main_init_default_params */
 
 
@@ -1013,18 +1016,30 @@ p_avi_encode(GapGveAviGlobalParams *gpp)
           ||  (strcmp(epp->codec_name, GAP_AVI_CODEC_RGB) == 0))
           {
             gboolean l_vflip;
+            gboolean l_convertToBGR;
 
-            /* fill buffer with raw 24bit data, optional flipped.
-             * it seems that some AVI players (for instance the WinDVD player)
-             *  require the inverse row order than gimp,
-             *  and other players (like gmplayer on unix) does not need vflipped images.
+            /* fill buffer with raw 24bit data, optional flipped or converted to BGR.
+             * it seems that some AVI players (for instance the WinDVD and VLC player)
+             *  require the inverse row order than gimp anf BGR colormodel
+             *  other players (like gmplayer on unix) does not want vflipped images.
              */
             l_vflip = FALSE;
             if(epp->raw_vflip != 0)
             {
               l_vflip = TRUE;
             }
-            buffer = gap_gve_raw_BGR_drawable_encode(l_drawable, &l_FRAME_size, l_vflip, l_app0_buffer, l_app0_len);
+            l_convertToBGR = FALSE;
+            if(epp->raw_bgr != 0)
+            {
+              l_convertToBGR = TRUE;
+            }
+            buffer = gap_gve_raw_RGB_or_BGR_drawable_encode(l_drawable
+                     , &l_FRAME_size
+                     , l_vflip
+                     , l_app0_buffer
+                     , l_app0_len
+                     , l_convertToBGR
+                     );
           }
 #ifdef ENABLE_LIBXVIDCORE
           else
diff --git a/vid_enc_avi/gap_enc_avi_main.h b/vid_enc_avi/gap_enc_avi_main.h
index 27d8d82..7cadd78 100644
--- a/vid_enc_avi/gap_enc_avi_main.h
+++ b/vid_enc_avi/gap_enc_avi_main.h
@@ -71,6 +71,7 @@ typedef struct {
 
   /* for the "RGB " (== raw) CODEC */
   gint32 raw_vflip;
+  gint32 raw_bgr;    /* TRUE: BGR (default) FALSE: RGB */
 
   /* for the "PNG " CODEC */
   gint32 png_dont_recode_frames;
@@ -115,6 +116,7 @@ typedef struct GapGveAviGlobalParams {   /* nick: gpp */
   GtkObject *xvid_quality_spinbutton_adj;
   
   GtkWidget *raw_vflip_checkbutton;
+  GtkWidget *raw_bgr_checkbutton;
 
   GtkWidget *png_dont_recode_checkbutton;
   GtkWidget *png_interlace_checkbutton;
diff --git a/vid_enc_ffmpeg/gap_enc_ffmpeg_callbacks.c b/vid_enc_ffmpeg/gap_enc_ffmpeg_callbacks.c
index 930e699..c909509 100644
--- a/vid_enc_ffmpeg/gap_enc_ffmpeg_callbacks.c
+++ b/vid_enc_ffmpeg/gap_enc_ffmpeg_callbacks.c
@@ -491,12 +491,11 @@ on_ff_presets_combo  (GtkWidget     *widget,
     printf("CB: on_ff_presets_combo index: %d\n", (int)l_idx);
   }
 
-  if((l_idx <= GAP_GVE_FFMPEG_PRESET_MAX_ELEMENTS) && (l_idx > 0))
+  if(l_idx > 0)
   {
     /* index 0 is used for OOPS do not use preset menu entry and is not a PRESET
-     * menu_index 1  does access presets[0]
      */
-    l_idx--;
+    l_idx;
 
     if(gap_debug)
     {
@@ -505,16 +504,19 @@ on_ff_presets_combo  (GtkWidget     *widget,
     gap_enc_ffmpeg_main_init_preset_params(&gpp->evl, l_idx);
     gap_enc_ffgui_init_main_dialog_widgets(gpp);                /* update all wdgets */
 
+
     /* switch back to index 0 (OOPS, do not change presets)
      * after presets were loaded.
      */
     gimp_int_combo_box_set_active (GIMP_INT_COMBO_BOX (widget), GAP_GVE_FFMPEG_PRESET_00_NONE);
   }
+
 }  /* end on_ff_presets_combo */
 
 
 
 
+
 /* --------------------------------
  * on_ff_aspect_combo
  * --------------------------------
diff --git a/vid_enc_ffmpeg/gap_enc_ffmpeg_gui.c b/vid_enc_ffmpeg/gap_enc_ffmpeg_gui.c
index c298271..29eacd1 100644
--- a/vid_enc_ffmpeg/gap_enc_ffmpeg_gui.c
+++ b/vid_enc_ffmpeg/gap_enc_ffmpeg_gui.c
@@ -687,6 +687,12 @@ p_init_combo_vals(GapGveFFMpegGlobalParams *gpp)
 {
   char *name;
 
+
+  if(gap_debug)
+  {
+    printf("p_init_combo_vals START\n");
+  }
+
   p_init_gint_combo_active(gpp, gpp->ff_motion_estimation_combo
                               , &gtab_motion_est[0]
                               , gpp->evl.motion_estimation
@@ -767,6 +773,11 @@ p_init_combo_vals(GapGveFFMpegGlobalParams *gpp)
   name = p_init_combo_actual_nameidx(gpp, gpp->ff_vid_codec_combo,  glist_vid_codec,  gpp->evl.vcodec_name);
   name = p_init_combo_actual_nameidx(gpp, gpp->ff_aud_codec_combo,  glist_aud_codec,  gpp->evl.acodec_name);
 
+  if(gap_debug)
+  {
+    printf("p_init_combo_vals DONE\n");
+  }
+
 }  /* end p_init_combo_vals */
 
 
@@ -3936,16 +3947,27 @@ p_create_ffmpeg_dialog_shell (GapGveFFMpegGlobalParams *gpp)
   /* the presets combo */
   combo = gimp_int_combo_box_new (
      _("** OOPS do not change any parameter **"),   GAP_GVE_FFMPEG_PRESET_00_NONE,
-     _("use DivX default presets"),                 GAP_GVE_FFMPEG_PRESET_01_DIVX_DEFAULT,
-     _("use DivX high quality presets"),            GAP_GVE_FFMPEG_PRESET_02_DIVX_BEST,
-     _("use DivX low quality presets"),             GAP_GVE_FFMPEG_PRESET_03_DIVX_LOW,
-     _("use DivX WINDOWS presets"),                 GAP_GVE_FFMPEG_PRESET_04_DIVX_MS,
-     _("use MPEG1 (VCD) presets"),                  GAP_GVE_FFMPEG_PRESET_05_MPEG1_VCD,
-     _("use MPEG1 high quality presets"),           GAP_GVE_FFMPEG_PRESET_06_MPEG1_BEST,
-     _("use MPEG2 (SVCD) presets"),                 GAP_GVE_FFMPEG_PRESET_07_MPEG2_SVCD,
-     _("use MPEG2 (DVD) presets"),                  GAP_GVE_FFMPEG_PRESET_08_MPEG2_DVD,
-     _("use REAL video presets"),                   GAP_GVE_FFMPEG_PRESET_09_REAL,
+     _("DivX default preset"),                 GAP_GVE_FFMPEG_PRESET_01_DIVX_DEFAULT,
+     _("DivX high quality preset"),            GAP_GVE_FFMPEG_PRESET_02_DIVX_BEST,
+     _("DivX low quality preset"),             GAP_GVE_FFMPEG_PRESET_03_DIVX_LOW,
+     _("DivX WINDOWS preset"),                 GAP_GVE_FFMPEG_PRESET_04_DIVX_MS,
+     _("MPEG1 (VCD) preset"),                  GAP_GVE_FFMPEG_PRESET_05_MPEG1_VCD,
+     _("MPEG1 high quality preset"),           GAP_GVE_FFMPEG_PRESET_06_MPEG1_BEST,
+     _("MPEG2 (SVCD) preset"),                 GAP_GVE_FFMPEG_PRESET_07_MPEG2_SVCD,
+     _("MPEG2 (DVD) preset"),                  GAP_GVE_FFMPEG_PRESET_08_MPEG2_DVD,
+     _("REAL video preset"),                   GAP_GVE_FFMPEG_PRESET_09_REAL,
      NULL);
+  {
+    GapGveFFMpegValues *epp;
+    
+    for(epp = gap_ffpar_getPresetList(); epp != NULL; epp = epp->next)
+    {
+      gimp_int_combo_box_append (GIMP_INT_COMBO_BOX (combo),
+                                 GIMP_INT_STORE_VALUE, epp->presetId,
+                                 GIMP_INT_STORE_LABEL, &epp->presetName[0],
+                                 -1);
+    }
+  }
   gpp->ff_presets_combo                   = combo;
 
   gimp_int_combo_box_connect (GIMP_INT_COMBO_BOX (combo),
diff --git a/vid_enc_ffmpeg/gap_enc_ffmpeg_main.c b/vid_enc_ffmpeg/gap_enc_ffmpeg_main.c
old mode 100644
new mode 100755
index 2fea114..5fe8693
--- a/vid_enc_ffmpeg/gap_enc_ffmpeg_main.c
+++ b/vid_enc_ffmpeg/gap_enc_ffmpeg_main.c
@@ -88,6 +88,8 @@
 #include "gap_enc_ffmpeg_par.h"
 
 #include "gap_audio_wav.h"
+#include "gap_base.h"
+
 
 /* FFMPEG defaults */
 #define DEFAULT_PASS_LOGFILENAME "ffmpeg2pass"
@@ -122,6 +124,10 @@
 #define MAX_VIDEO_STREAMS 1
 #define MAX_AUDIO_STREAMS 16
 
+#define ENCODER_QUEUE_RINGBUFFER_SIZE 4
+ 
+
+
 typedef struct t_audio_work
 {
   FILE *fp_inwav;
@@ -157,8 +163,6 @@ typedef struct t_ffmpeg_video
  AVCodecContext  *vid_codec_context;
  AVCodec         *vid_codec;
  AVFrame         *big_picture_codec;
- uint8_t         *yuv420_buffer;
- int              yuv420_buffer_size;
  uint8_t         *video_buffer;
  int              video_buffer_size;
  uint8_t         *video_dummy_buffer;
@@ -181,7 +185,7 @@ typedef struct t_ffmpeg_audio
 
 
 
-typedef struct t_ffmpeg_handle
+typedef struct t_ffmpeg_handle         /* ffh */
 {
  AVFormatContext *output_context;
  AVOutputFormat  *file_oformat;
@@ -209,9 +213,66 @@ typedef struct t_ffmpeg_handle
  int countVideoFramesWritten;
  uint8_t  *convert_buffer;
  gint32    validEncodeFrameNr;
+ 
+ gboolean      isMultithreadEnabled;
+
 } t_ffmpeg_handle;
 
 
+typedef enum
+{
+   EQELEM_STATUS_FREE
+  ,EQELEM_STATUS_READY
+  ,EQELEM_STATUS_LOCK        
+} EncoderQueueElemStatusEnum;
+
+
+typedef struct EncoderQueueElem  /* eq_elem */
+{
+  AVFrame                      *eq_big_picture_codec[MAX_VIDEO_STREAMS];   /* picture data to feed the encoder codec */
+  gint32                        encode_frame_nr;
+  gint                          vid_track;
+  gboolean                      force_keyframe;
+  
+  EncoderQueueElemStatusEnum    status;
+  GMutex                       *elemMutex;
+  void                         *next;
+
+} EncoderQueueElem;
+
+
+typedef struct EncoderQueue    /* eque */
+{
+  gint                numberOfElements;
+  EncoderQueueElem   *eq_root;           /* 1st element in the ringbuffer */
+  EncoderQueueElem   *eq_write_ptr;      /* reserved for the frame fetcher thread to writes into the ringbuffer */
+  EncoderQueueElem   *eq_read_ptr;       /* reserved for the encoder thread to read frames from the ringbuffer */
+
+  t_ffmpeg_handle     *ffh;
+  t_awk_array         *awp;
+  gint                 runningThreadsCounter;
+  
+  GCond               *frameEncodedCond;  /* sent each time the encoder finished one frame */
+  GMutex              *poolMutex;
+
+
+  /* debug attributes reserved for runtime measuring in the main thread */
+  GapTimmRecord       mainElemMutexWaits;
+  GapTimmRecord       mainPoolMutexWaits;
+  GapTimmRecord       mainEnqueueWaits;
+  GapTimmRecord       mainPush2;
+  GapTimmRecord       mainWriteFrame;
+  GapTimmRecord       mainDrawableToRgb;
+  GapTimmRecord       mainReadFrame;
+
+
+  /* debug attributes reserved for runtime measuring in the encoder thread */
+  GapTimmRecord       ethreadElemMutexWaits;
+  GapTimmRecord       ethreadPoolMutexWaits;
+  GapTimmRecord       ethreadEncodeFrame;
+
+} EncoderQueue;
+
 
 /* ------------------------
  * global gap DEBUG switch
@@ -223,6 +284,8 @@ typedef struct t_ffmpeg_handle
 
 int gap_debug = 0;
 
+static GThreadPool         *encoderThreadPool = NULL;
+
 GapGveFFMpegGlobalParams global_params;
 int global_nargs_ffmpeg_enc_par;
 
@@ -233,7 +296,7 @@ static void run(const gchar *name
               , gint *nreturn_vals
               , GimpParam **return_vals);
 
-
+static int    p_base_get_thread_id_as_int();
 static void   p_debug_print_dump_AVCodecContext(AVCodecContext *codecContext);
 static int    p_av_metadata_set(AVMetadata **pm, const char *key, const char *value, int flags);
 static void   p_set_flag(gint32 value_bool32, int *flag_ptr, int maskbit);
@@ -285,7 +348,31 @@ static t_ffmpeg_handle * p_ffmpeg_open(GapGveFFMpegGlobalParams *gpp
                                       );
 static int    p_ffmpeg_write_frame_chunk(t_ffmpeg_handle *ffh, gint32 encoded_size, gint vid_track);
 static void   p_convert_colormodel(t_ffmpeg_handle *ffh, AVPicture *picture_codec, guchar *rgb_buffer, gint vid_track);
-static int    p_ffmpeg_write_frame(t_ffmpeg_handle *ffh, GimpDrawable *drawable, gboolean force_keyframe, gint vid_track, gboolean useYUV420P);
+
+static int    p_ffmpeg_encodeAndWriteVideoFrame(t_ffmpeg_handle *ffh, AVFrame *picture_codec
+                     , gboolean force_keyframe, gint vid_track, gint32 encode_frame_nr);
+
+static void   p_ffmpeg_convert_GapStoryFetchResult_to_AVFrame(t_ffmpeg_handle *ffh
+                     , AVFrame *picture_codec
+                     , GapStoryFetchResult *gapStoryFetchResult
+                     , gint vid_track
+                     );
+
+static void   p_create_EncoderQueueRingbuffer(EncoderQueue *eque);
+static EncoderQueue * p_init_EncoderQueueResources(t_ffmpeg_handle *ffh, t_awk_array *awp);
+static void           p_debug_print_RingbufferStatus(EncoderQueue *eque);
+static void           p_waitUntilEncoderQueIsProcessed(EncoderQueue *eque);
+static void           p_free_EncoderQueueResources(EncoderQueue     *eque);
+
+static void   p_fillQueueElem(EncoderQueue *eque, GapStoryFetchResult *gapStoryFetchResult, gboolean force_keyframe, gint vid_track);
+static void   p_encodeCurrentQueueElem(EncoderQueue *eque);
+static void   p_encoderWorkerThreadFunction (EncoderQueue *eque);
+static int    p_ffmpeg_write_frame_and_audio_multithread(EncoderQueue *eque, GapStoryFetchResult *gapStoryFetchResult, gboolean force_keyframe, gint vid_track);
+
+
+
+
+static int    p_ffmpeg_write_frame(t_ffmpeg_handle *ffh, GapStoryFetchResult *gapStoryFetchResult, gboolean force_keyframe, gint vid_track);
 static int    p_ffmpeg_write_audioframe(t_ffmpeg_handle *ffh, guchar *audio_buf, int frame_bytes, gint aud_track);
 static void   p_ffmpeg_close(t_ffmpeg_handle *ffh);
 static gint   p_ffmpeg_encode(GapGveFFMpegGlobalParams *gpp);
@@ -696,6 +783,21 @@ run (const gchar      *name,
 
 }       /* end run */
 
+static guint64
+p_timespecDiff(GTimeVal *startTimePtr, GTimeVal *endTimePtr)
+{
+  return ((endTimePtr->tv_sec * G_USEC_PER_SEC) + endTimePtr->tv_usec) -
+           ((startTimePtr->tv_sec * G_USEC_PER_SEC) + startTimePtr->tv_usec);
+}
+
+static int
+p_base_get_thread_id_as_int()
+{
+  int tid;
+  
+  tid = gap_base_get_thread_id();
+  return (tid);
+}
 
 /* ---------------------------------------
  * p_debug_print_dump_AVCodecContext
@@ -1014,9 +1116,38 @@ p_gimp_get_data(const char *key, void *buffer, gint expected_size)
 
 
 /* ----------------------------------
- * gap_enc_ffmpeg_main_init_preset_params
+ * p_initPresetFromPresetIndex
  * ----------------------------------
+ * copy preset specified by presetId
+ * to the preset buffer provided by epp.
+ * Note: in case presetId not found epp is left unchanged.
+ */
+static void
+p_initPresetFromPresetIndex(GapGveFFMpegValues *epp, gint presetId)
+{
+  GapGveFFMpegValues *eppAtId;
+
+  for(eppAtId = gap_ffpar_getPresetList(); eppAtId != NULL; eppAtId = eppAtId->next)
+  {
+    if (eppAtId->presetId == presetId)
+    {
+      break;
+    }
+  }
+  
+  if(eppAtId != NULL)
+  {
+    memcpy(epp, eppAtId, sizeof(GapGveFFMpegValues));
+  }
+
+}  /* end p_initPresetFromPresetIndex */
+
+
+/* --------------------------------------
+ * gap_enc_ffmpeg_main_init_preset_params
+ * --------------------------------------
  * Encoding parameter Presets (Preset Values are just a guess and are not OK, have to check ....)
+ * ID
  * 0 .. DivX default (ffmpeg defaults, OK)
  * 1 .. DivX Best
  * 2 .. DivX Low
@@ -1026,6 +1157,8 @@ p_gimp_get_data(const char *key, void *buffer, gint expected_size)
  * 6 .. MPEG2 SVCD
  * 7 .. MPEG2 DVD
  * 8 .. Real Video default
+ *
+ * Higher IDs may be available when preset files are installed.
  */
 void
 gap_enc_ffmpeg_main_init_preset_params(GapGveFFMpegValues *epp, gint preset_idx)
@@ -1091,11 +1224,36 @@ gap_enc_ffmpeg_main_init_preset_params(GapGveFFMpegValues *epp, gint preset_idx)
   static float  tab_mux_max_delay[GAP_GVE_FFMPEG_PRESET_MAX_ELEMENTS]     =  {   0.7,       0.7,      0.7,    0.7,         0.7,     0.7,     0.7,        0.7,      0.7 };
   static gint32 tab_use_scann_offset[GAP_GVE_FFMPEG_PRESET_MAX_ELEMENTS]  =  {     0,         0,        0,      0,           0,       0,       1,          0,        0 };
 
+  static char*  tab_presetName[GAP_GVE_FFMPEG_PRESET_MAX_ELEMENTS] = {
+      "DivX default preset"
+     ,"DivX high quality preset"
+     ,"DivX low quality preset"
+     ,"DivX WINDOWS preset"
+     ,"MPEG1 (VCD) preset"
+     ,"MPEG1 high quality preset"
+     ,"MPEG2 (SVCD) preset"
+     ,"MPEG2 (DVD) preset"
+     ,"REAL video preset"
+  };
+
+  l_idx = preset_idx -1;
+  if ((preset_idx < 1) || (preset_idx >= GAP_GVE_FFMPEG_PRESET_MAX_ELEMENTS))
+  {
+    /* use hardcoded preset 0 as base initialisation for non-hardcoded Id presets 
+     */
+    l_idx = 0;
+  }
+  else
+  {
+    g_snprintf(epp->presetName,  sizeof(epp->presetName),  tab_presetName[l_idx]);    /* "name of the preset */
+  }
 
-  l_idx = CLAMP(preset_idx, 0, GAP_GVE_FFMPEG_PRESET_MAX_ELEMENTS -1);
   if(gap_debug)
   {
-    printf("gap_enc_ffmpeg_main_init_preset_params L_IDX:%d\n", (int)l_idx);
+    printf("gap_enc_ffmpeg_main_init_preset_params L_IDX:%d preset_idx:%d\n"
+        , (int)l_idx
+        , (int)preset_idx
+        );
   }
 
   g_snprintf(epp->format_name, sizeof(epp->format_name), tab_format_name[l_idx]);   /* "avi" */
@@ -1318,6 +1476,24 @@ gap_enc_ffmpeg_main_init_preset_params(GapGveFFMpegValues *epp, gint preset_idx)
   epp->partition_X264_PART_P4X4         = 0; /* 0: FALSE */
   epp->partition_X264_PART_B8X8         = 0; /* 0: FALSE */
 
+  if (preset_idx >= GAP_GVE_FFMPEG_PRESET_MAX_ELEMENTS)
+  {
+    if(gap_debug)
+    {
+      printf("gap_enc_ffmpeg_main_init_preset_params before p_initPresetFromPresetIndex\n"
+          );
+    }
+    p_initPresetFromPresetIndex(epp, preset_idx);
+  }
+
+  if(gap_debug)
+  {
+    printf("gap_enc_ffmpeg_main_init_preset_params DONE L_IDX:%d preset_idx:%d\n"
+        , (int)l_idx
+        , (int)preset_idx
+        );
+  }
+
 }   /* end gap_enc_ffmpeg_main_init_preset_params */
 
 
@@ -1810,8 +1986,6 @@ p_ffmpeg_open_init(t_ffmpeg_handle *ffh, GapGveFFMpegGlobalParams *gpp)
     ffh->vst[ii].vid_codec_context = NULL;
     ffh->vst[ii].vid_codec = NULL;
     ffh->vst[ii].big_picture_codec = avcodec_alloc_frame();
-    ffh->vst[ii].yuv420_buffer = NULL;
-    ffh->vst[ii].yuv420_buffer_size = 0;
     ffh->vst[ii].video_buffer = NULL;
     ffh->vst[ii].video_buffer_size = 0;
     ffh->vst[ii].video_dummy_buffer = NULL;
@@ -2628,6 +2802,7 @@ p_ffmpeg_open(GapGveFFMpegGlobalParams *gpp
   }
 
   ffh = g_malloc0(sizeof(t_ffmpeg_handle));
+  ffh->isMultithreadEnabled = FALSE;
   ffh->max_vst = MIN(MAX_VIDEO_STREAMS, video_tracks);
   ffh->max_ast = MIN(MAX_AUDIO_STREAMS, awp->audio_tracks);
 
@@ -2806,10 +2981,6 @@ p_ffmpeg_open(GapGveFFMpegGlobalParams *gpp
     ffh->vst[ii].video_dummy_buffer_size =  size * 4;  /* (1024*1024); */
     ffh->vst[ii].video_dummy_buffer = g_malloc0(ffh->vst[ii].video_dummy_buffer_size);
 
-    /* allocate yuv420_buffer (for the RAW image data YUV 4:2:0) */
-    ffh->vst[ii].yuv420_buffer_size = size + (size / 2);
-    ffh->vst[ii].yuv420_buffer = g_malloc0(ffh->vst[ii].yuv420_buffer_size);
-
     ffh->vst[ii].yuv420_dummy_buffer_size = size + (size / 2);
     ffh->vst[ii].yuv420_dummy_buffer = g_malloc0(ffh->vst[ii].yuv420_dummy_buffer_size);
 
@@ -3097,18 +3268,17 @@ p_convert_colormodel(t_ffmpeg_handle *ffh, AVPicture *picture_codec, guchar *rgb
 
 }  /* end p_convert_colormodel */
 
-/* --------------------
- * p_ffmpeg_write_frame
- * --------------------
+
+/* ---------------------------------
+ * p_ffmpeg_encodeAndWriteVideoFrame
+ * ---------------------------------
  * encode one videoframe using the selected codec and write
  * the encoded frame to the mediafile as packet.
- * Passing NULL as drawable is used to flush one frame from the codecs internal buffer
- * (typically required after the last frame has been already feed to the codec)
  */
 static int
-p_ffmpeg_write_frame(t_ffmpeg_handle *ffh, GimpDrawable *drawable, gboolean force_keyframe, gint vid_track, gboolean useYUV420P)
+p_ffmpeg_encodeAndWriteVideoFrame(t_ffmpeg_handle *ffh, AVFrame *picture_codec
+   , gboolean force_keyframe, gint vid_track, gint32 encode_frame_nr)
 {
-  AVFrame *picture_codec;
   int encoded_size;
   int ret;
   int ii;
@@ -3116,117 +3286,13 @@ p_ffmpeg_write_frame(t_ffmpeg_handle *ffh, GimpDrawable *drawable, gboolean forc
   ii = ffh->vst[vid_track].video_stream_index;
   ret = 0;
 
-  if(gap_debug)
-  {
-     AVCodec  *codec;
-
-     codec = ffh->vst[ii].vid_codec_context->codec;
-
-     printf("\n-------------------------\n");
-     printf("p_ffmpeg_write_frame: START codec: %d track:%d countVideoFramesWritten:%d frame_nr:%d (validFrameNr:%d)\n"
-           , (int)codec
-           , (int)vid_track
-           , (int)ffh->countVideoFramesWritten
-           , (int)ffh->encode_frame_nr
-           , (int)ffh->validEncodeFrameNr
-           );
-     printf("name: %s\n", codec->name);
-     if(gap_debug)
-     {
-       printf("type: %d\n", codec->type);
-       printf("id: %d\n",   codec->id);
-       printf("priv_data_size: %d\n",   codec->priv_data_size);
-       printf("capabilities: %d\n",   codec->capabilities);
-       printf("init fptr: %d\n",   (int)codec->init);
-       printf("encode fptr: %d\n",   (int)codec->encode);
-       printf("close fptr: %d\n",   (int)codec->close);
-       printf("decode fptr: %d\n",   (int)codec->decode);
-    }
-  }
-
-  /* picture to feed the codec */
-  picture_codec = ffh->vst[ii].big_picture_codec;
-
-  /* in case drawable is NULL
-   * we feed the previous handled picture (e.g the last of the input)
-   * again and again to the codec
-   * Note that this procedure typically is called with NULL  drawbale
-   * until all frames in its internal buffer are writen to the output video.
-   */
-
-  if(drawable != NULL)
-  {
-   if ((useYUV420P == TRUE) && (ffh->vst[ii].vid_codec_context->pix_fmt == PIX_FMT_YUV420P))
-   {
-     if(gap_debug)
-     {
-       printf("USE PIX_FMT_YUV420P (no pix_fmt convert needed)\n");
-     }
-
-
-
-     /* fill the yuv420_buffer with current frame image data 
-      * NOTE: gap_gve_raw_YUV420P_drawable_encode does not work on some machines
-      * and gives low quality results. (therefore the useYUV420P flag is FALSE per default)
-      */
-     gap_gve_raw_YUV420P_drawable_encode(drawable, ffh->vst[0].yuv420_buffer);
-
-
-     /* most of the codecs wants YUV420
-      * (we can use the picture in ffh->vst[ii].yuv420_buffer without pix_fmt conversion
-      */
-     avpicture_fill(picture_codec
-                   ,ffh->vst[ii].yuv420_buffer
-                   ,PIX_FMT_YUV420P          /* PIX_FMT_RGB24, PIX_FMT_RGBA32, PIX_FMT_BGRA32 */
-                   ,ffh->frame_width
-                   ,ffh->frame_height
-                   );
-   }
-   else
-   {
-     guchar     *rgb_buffer;
-     gint32      rgb_size;
-
-     rgb_buffer = gap_gve_raw_RGB_drawable_encode(drawable, &rgb_size, FALSE /* no vflip */
-                                                 , NULL  /* app0_buffer */
-                                                 , 0     /*  app0_length */
-                                                 );
-
-     if (ffh->vst[ii].vid_codec_context->pix_fmt == PIX_FMT_RGB24)
-     {
-       if(gap_debug)
-       {
-         printf("USE PIX_FMT_RGB24 (no pix_fmt convert needed)\n");
-       }
-       avpicture_fill(picture_codec
-                   ,rgb_buffer
-                   ,PIX_FMT_RGB24          /* PIX_FMT_RGB24, PIX_FMT_BGR24, PIX_FMT_RGBA32, PIX_FMT_BGRA32 */
-                   ,ffh->frame_width
-                   ,ffh->frame_height
-                   );
-     }
-     else
-     {
-       p_convert_colormodel(ffh, picture_codec, rgb_buffer, vid_track);
-     }
-
-
-     if(gap_debug)
-     {
-       printf("before g_free rgb_buffer\n");
-     }
-
-     g_free(rgb_buffer);
-   }
-  }
-
   /* AVFrame is the new structure introduced in FFMPEG 0.4.6,
    * (same as AVPicture but with additional members at the end)
    */
-  ffh->vst[ii].big_picture_codec->quality = ffh->vst[ii].vid_stream->quality;
+  picture_codec->quality = ffh->vst[ii].vid_stream->quality;
 
   if((force_keyframe)
-  || (ffh->encode_frame_nr == 1))
+  || (encode_frame_nr == 1))
   {
     /* TODO: howto force the encoder to write an I frame ??
      *   ffh->vst[ii].big_picture_codec->key_frame could be ignored
@@ -3234,11 +3300,11 @@ p_ffmpeg_write_frame(t_ffmpeg_handle *ffh, GimpDrawable *drawable, gboolean forc
      *   reported by the encoder.
      */
     /* ffh->vst[ii].vid_codec_context->key_frame = 1; */
-    ffh->vst[ii].big_picture_codec->key_frame = 1;
+    picture_codec->key_frame = 1;
   }
   else
   {
-    ffh->vst[ii].big_picture_codec->key_frame = 0;
+    picture_codec->key_frame = 0;
   }
 
 
@@ -3246,28 +3312,29 @@ p_ffmpeg_write_frame(t_ffmpeg_handle *ffh, GimpDrawable *drawable, gboolean forc
   {
     if(gap_debug)
     {
-      printf("before avcodec_encode_video  big_picture_codec:%d\n"
-         ,(int)ffh->vst[ii].big_picture_codec
+      printf("p_ffmpeg_encodeAndWriteVideoFrame: TID:%d before avcodec_encode_video  picture_codec:%d\n"
+         , p_base_get_thread_id_as_int()
+         ,(int)picture_codec
          );
     }
 
 
     /* some codecs (x264) just pass through pts information obtained from
-     * the AVFrame struct (big_picture_codec)
+     * the AVFrame struct (picture_codec)
      * therefore init the pts code in this structure.
      * (a valid pts is essential to get encoded frame results in the correct order)
      */
-    //ffh->vst[ii].big_picture_codec->pts = p_calculate_current_timecode(ffh);
-    ffh->vst[ii].big_picture_codec->pts = ffh->encode_frame_nr -1;
+    picture_codec->pts = encode_frame_nr -1;
 
     encoded_size = avcodec_encode_video(ffh->vst[ii].vid_codec_context
                            ,ffh->vst[ii].video_buffer, ffh->vst[ii].video_buffer_size
-                           ,ffh->vst[ii].big_picture_codec);
+                           ,picture_codec);
 
 
     if(gap_debug)
     {
-      printf("after avcodec_encode_video  encoded_size:%d\n"
+      printf("p_ffmpeg_encodeAndWriteVideoFrame: TID:%d after avcodec_encode_video  encoded_size:%d\n"
+         , p_base_get_thread_id_as_int()
          ,(int)encoded_size
          );
     }
@@ -3307,12 +3374,15 @@ p_ffmpeg_write_frame(t_ffmpeg_handle *ffh, GimpDrawable *drawable, gboolean forc
           st = ffh->output_context->streams[pkt.stream_index];
           if (pkt.pts == AV_NOPTS_VALUE)
           {
-            printf("** HOF: Codec delivered invalid pts  AV_NOPTS_VALUE !\n");
+            printf("p_ffmpeg_encodeAndWriteVideoFrame: TID:%d ** HOF: Codec delivered invalid pts  AV_NOPTS_VALUE !\n"
+                  , p_base_get_thread_id_as_int()
+                  );
           }
 
-          printf("before av_interleaved_write_frame video encoded_size:%d\n"
+          printf("p_ffmpeg_encodeAndWriteVideoFrame: TID:%d  before av_interleaved_write_frame video encoded_size:%d\n"
                 " pkt.stream_index:%d pkt.pts:%lld dts:%lld coded_frame->pts:%lld  c->time_base:%d den:%d\n"
                 " st->pts.num:%lld, st->pts.den:%lld st->pts.val:%lld\n"
+             , p_base_get_thread_id_as_int()
              , (int)encoded_size
              , pkt.stream_index
              , pkt.pts
@@ -3334,7 +3404,10 @@ p_ffmpeg_write_frame(t_ffmpeg_handle *ffh, GimpDrawable *drawable, gboolean forc
         
         if(gap_debug)
         {
-          printf("after av_interleaved_write_frame  encoded_size:%d\n", (int)encoded_size );
+          printf("p_ffmpeg_encodeAndWriteVideoFrame: TID:%d after av_interleaved_write_frame  encoded_size:%d\n"
+            , p_base_get_thread_id_as_int()
+            , (int)encoded_size 
+            );
         }
       }
     }
@@ -3347,15 +3420,1078 @@ p_ffmpeg_write_frame(t_ffmpeg_handle *ffh, GimpDrawable *drawable, gboolean forc
     fprintf(ffh->vst[ii].passlog_fp, "%s", ffh->vst[ii].vid_codec_context->stats_out);
   }
 
+  return(ret);
+  
+}  /* end p_ffmpeg_encodeAndWriteVideoFrame */
+
+/* -----------------------------------------------
+ * p_ffmpeg_convert_GapStoryFetchResult_to_AVFrame
+ * -----------------------------------------------
+ * convert the specified gapStoryFetchResult into the specified AVFrame (picture_codec)
+ * the encoded frame to the mediafile as packet.
+ *
+ */
+static void
+p_ffmpeg_convert_GapStoryFetchResult_to_AVFrame(t_ffmpeg_handle *ffh
+ , AVFrame *picture_codec
+ , GapStoryFetchResult *gapStoryFetchResult
+ , gint vid_track)
+{
+  int ii;
+  GapRgbPixelBuffer  rgbBufferLocal;
+  GapRgbPixelBuffer *rgbBuffer;
+  guchar            *frameData;
+
+  ii = ffh->vst[vid_track].video_stream_index;
+
+  rgbBuffer = &rgbBufferLocal;
+  frameData = NULL;
+ 
+  if(gapStoryFetchResult->resultEnum == GAP_STORY_FETCH_RESULT_IS_RAW_RGB888)
+  {
+    if(gap_debug)
+    {
+      printf("p_ffmpeg_convert_GapStoryFetchResult_to_AVFrame: RESULT_IS_RAW_RGB888\n");
+    }
+    if(gapStoryFetchResult->raw_rgb_data == NULL)
+    {
+      printf("** ERROR p_ffmpeg_convert_GapStoryFetchResult_to_AVFrame  RGB88 raw_rgb_data is NULL!\n");
+      return;
+    }
+    rgbBuffer->data = gapStoryFetchResult->raw_rgb_data;
+  }
+  else
+  {
+    GimpDrawable      *drawable;
+    
+    drawable = gimp_drawable_get (gapStoryFetchResult->layer_id);
+    if(drawable->bpp != 3)
+    {
+      printf("** ERROR drawable bpp value %d is not supported (only bpp == 3 is supporeted!\n"
+        ,(int)drawable->bpp
+        );
+    }
+    gap_gve_init_GapRgbPixelBuffer(rgbBuffer, drawable->width, drawable->height);
+ 
+    frameData = (guchar *)g_malloc0((rgbBuffer->height * rgbBuffer->rowstride));
+    rgbBuffer->data = frameData;
+
+    if(gap_debug)
+    {
+      printf("p_ffmpeg_convert_GapStoryFetchResult_to_AVFrame: before gap_gve_drawable_to_RgbBuffer rgb_buffer\n");
+    }
+ 
+    /* tests with framesize 720 x 480 on my 4 CPU development machine showed that
+     *    gap_gve_drawable_to_RgbBuffer_multithread runs 1.5 times
+     *    slower than the singleprocessor implementation  (1.25 times slower on larger frames 1440 x 960)
+     * possible reasons may be
+     * a) too much overhead to init multithread stuff
+     * b) too much time spent waiting for unlocking the mutex.
+     * TODO in case a ==> remove code for gap_gve_drawable_to_RgbBuffer_multithread
+     *      in case b ==> further tuning to reduce wait cycles.
+     */
+    // gap_gve_drawable_to_RgbBuffer_multithread(drawable, rgbBuffer);
+    gap_gve_drawable_to_RgbBuffer(drawable, rgbBuffer);
+    gimp_drawable_detach (drawable);
+ 
+    /* destroy the fetched (tmp) image */
+    gimp_image_delete(gapStoryFetchResult->image_id);
+  }
+
+  if (ffh->vst[ii].vid_codec_context->pix_fmt == PIX_FMT_RGB24)
+  {
+    if(gap_debug)
+    {
+      printf("p_ffmpeg_convert_GapStoryFetchResult_to_AVFrame: USE PIX_FMT_RGB24 (no pix_fmt convert needed)\n");
+    }
+    avpicture_fill(picture_codec
+                ,rgbBuffer->data
+                ,PIX_FMT_RGB24          /* PIX_FMT_RGB24, PIX_FMT_BGR24, PIX_FMT_RGBA32, PIX_FMT_BGRA32 */
+                ,ffh->frame_width
+                ,ffh->frame_height
+                );
+  }
+  else
+  {
+    if(gap_debug)
+    {
+      printf("p_ffmpeg_convert_GapStoryFetchResult_to_AVFrame: before p_convert_colormodel rgb_buffer\n");
+    }
+    p_convert_colormodel(ffh, picture_codec, rgbBuffer->data, vid_track);
+  }
+
+
+  if(frameData != NULL)
+  {
+    if(gap_debug)
+    {
+      printf("p_ffmpeg_convert_GapStoryFetchResult_to_AVFrame: before g_free frameData\n");
+    }
+    g_free(frameData);
+  }
+
+}  /* end p_ffmpeg_convert_GapStoryFetchResult_to_AVFrame */
+
+
+/* -------------------------------------------
+ * p_create_EncoderQueueRingbuffer
+ * -------------------------------------------
+ * this procedure allocates the encoder queue ringbuffer
+ * for use in multiprocessor environment.
+ * The Encoder queue ringbuffer is created 
+ * with all AVFrame buffers for N elements allocated
+ * and all elements have the initial status EQELEM_STATUS_FREE
+ *
+ * Initial                                                 After writing the 1st frame (main thread)
+ *                                                       
+ *                          +---------------------+		                          +------------------------+
+ *  eque->eq_write_ptr  --->| EQELEM_STATUS_FREE  |<---+                                  | EQELEM_STATUS_FREE     |<---+  
+ *  (main thread)           +---------------------+    |	                          +------------------------+    |
+ *                                  |                  |	                                  |                     |
+ *                                  V                  |	                                  V                     |
+ *                          +---------------------+    |	  eque->eq_write_ptr  --->+------------------------+    |
+ *  eque->eq_read_ptr   --->| EQELEM_STATUS_FREE  |    | 	  eque->eq_read_ptr   --->| EQELEM_STATUS_READY ** |    | 
+ *  (encoder thread)        +---------------------+    |	                          +------------------------+    |
+ *                                  |                  |	                                  |                     |
+ *                                  V                  |	                                  V                     |
+ *                          +---------------------+    |	                          +------------------------+    |
+ *                          | EQELEM_STATUS_FREE  |    | 	                          | EQELEM_STATUS_FREE     |    | 
+ *                          +---------------------+    |	                          +------------------------+    |
+ *                                  |                  |	                                  |                     |
+ *                                  V                  |	                                  V                     |
+ *                          +---------------------+    |	                          +------------------------+    |
+ *                          | EQELEM_STATUS_FREE  |    | 	                          | EQELEM_STATUS_FREE     |    | 
+ *                          +---------------------+    |	                          +------------------------+    |
+ *                                  |                  |	                                  |                     |
+ *                                  +------------------+	                                  +---------------------+
+ *
+ */
+static void
+p_create_EncoderQueueRingbuffer(EncoderQueue *eque)
+{
+  gint ii;
+  gint jj;
+  EncoderQueueElem *eq_elem_one;
+  EncoderQueueElem *eq_elem;
+  
+  eq_elem_one = NULL;
+  eq_elem = NULL;
+  for(jj=0; jj < ENCODER_QUEUE_RINGBUFFER_SIZE; jj++)
+  {
+    eq_elem = g_new(EncoderQueueElem, 1);
+    eq_elem->encode_frame_nr = 0;
+    eq_elem->vid_track = 1;
+    eq_elem->force_keyframe = FALSE;
+    eq_elem->status = EQELEM_STATUS_FREE;
+    eq_elem->elemMutex = g_mutex_new();
+    eq_elem->next = eque->eq_root;
+    
+    if(eq_elem_one == NULL)
+    {
+      eq_elem_one = eq_elem;
+    }
+    eque->eq_root = eq_elem;
+    
+    for (ii=0; ii < MAX_VIDEO_STREAMS; ii++)
+    {
+      eq_elem->eq_big_picture_codec[ii] = avcodec_alloc_frame();
+    }
+  }
+  
+  /* close the pointer ring */
+  if(eq_elem_one)
+  {
+    eq_elem_one->next = eq_elem;
+  }
+  
+  /* init read and write pointers
+   * note that write access typically start with advancing the write pointer
+   * followed by write data to eq_big_picture_codec.
+   */
+  eque->eq_write_ptr = eque->eq_root;
+  eque->eq_read_ptr  = eque->eq_root->next;
+
+}  /* end p_create_EncoderQueueRingbuffer */
+
+
+/* -------------------------------------------
+ * p_init_EncoderQueueResources
+ * -------------------------------------------
+ * this procedure creates a thread pool and an EncoderQueue ringbuffer in case 
+ * the gimprc parameters are configured for multiprocessor support.
+ * (otherwise those resources are not allocated and were initalized with NULL pointers)
+ * 
+ */
+static EncoderQueue *
+p_init_EncoderQueueResources(t_ffmpeg_handle *ffh, t_awk_array *awp)
+{
+  EncoderQueue     *eque;
+  
+  eque = g_new(EncoderQueue ,1);
+  eque->numberOfElements = 0;
+  eque->eq_root      = NULL;
+  eque->eq_write_ptr = NULL;
+  eque->eq_read_ptr  = NULL;
+  eque->ffh          = ffh;
+  eque->awp          = awp;
+  eque->runningThreadsCounter = 0;
+  eque->frameEncodedCond   = NULL;
+  eque->poolMutex          = NULL;
+  eque->frameEncodedCond   = NULL;
+
+  GAP_TIMM_INIT_RECORD(&eque->mainElemMutexWaits);
+  GAP_TIMM_INIT_RECORD(&eque->mainPoolMutexWaits);
+  GAP_TIMM_INIT_RECORD(&eque->mainEnqueueWaits);
+  GAP_TIMM_INIT_RECORD(&eque->mainPush2);
+  GAP_TIMM_INIT_RECORD(&eque->mainWriteFrame);
+  GAP_TIMM_INIT_RECORD(&eque->mainDrawableToRgb);
+  GAP_TIMM_INIT_RECORD(&eque->mainReadFrame);
+
+  GAP_TIMM_INIT_RECORD(&eque->ethreadElemMutexWaits);
+  GAP_TIMM_INIT_RECORD(&eque->ethreadPoolMutexWaits);
+  GAP_TIMM_INIT_RECORD(&eque->ethreadEncodeFrame);
+  
+  if (ffh->isMultithreadEnabled)
+  {
+    /* check and init thread system */
+    ffh->isMultithreadEnabled = gap_base_thread_init();
+    if(gap_debug)
+    {
+      printf("p_init_EncoderQueueResources: isMultithreadEnabled: %d\n"
+        ,(int)ffh->isMultithreadEnabled
+        );
+    }
+  }
+  
+  if (ffh->isMultithreadEnabled)
+  {
+    p_create_EncoderQueueRingbuffer(eque);
+    eque->poolMutex          = g_mutex_new ();
+    eque->frameEncodedCond   = g_cond_new ();
+  }
+
+  return (eque);
+  
+}  /* end p_init_EncoderQueueResources */
+
+
+/* ------------------------------
+ * p_debug_print_RingbufferStatus
+ * ------------------------------
+ * print status of all encoder queue ringbuffer elements
+ * to stdout for debug purpose.
+ * (all log lines are printed to a buffer and printed to stdout
+ * at once to prevent mix up stdout with output from other threads)
+ */
+static void
+p_debug_print_RingbufferStatus(EncoderQueue *eque)
+{
+  EncoderQueueElem    *eq_elem;
+  EncoderQueueElem    *eq_elem_next;
+  GString             *logString;
+  gint tid;
+  gint ii;
+      
+  tid = p_base_get_thread_id_as_int();
+  
+  logString = g_string_new(
+    "--------------------------------\n"
+    "p_debug_print_RingbufferStatus\n"
+    "--------------------------------\n"
+  );
+
+  eq_elem_next = NULL;
+  ii=0;
+  for(eq_elem = eque->eq_root; eq_elem != NULL; eq_elem = eq_elem_next)
+  {
+    g_string_append_printf(logString, "TID:%d EQ_ELEM[%d]: %d  EQ_ELEM->next:%d STATUS:%d encode_frame_nr:%d"
+          , (int)tid
+          , (int)ii
+          , (int)eq_elem
+          , (int)eq_elem->next
+          , (int)eq_elem->status
+          , (int)eq_elem->encode_frame_nr
+          );
+    if(eq_elem == eque->eq_write_ptr)
+    {
+      g_string_append_printf(logString, " <-- eq_write_ptr");
+    }
+    
+    if(eq_elem == eque->eq_read_ptr)
+    {
+      g_string_append_printf(logString, " <-- eq_read_ptr");
+    }
+    
+    g_string_append_printf(logString, "\n");
+    
+    
+    eq_elem_next = eq_elem->next;
+    if(eq_elem_next == eque->eq_root)
+    {
+      /* this was the last element in the ringbuffer.
+       */
+      eq_elem_next = NULL;
+    }
+    ii++;
+  }
+
+  printf("%s\n", logString->str);
+  
+  g_string_free(logString, TRUE);
+
+}  /* end p_debug_print_RingbufferStatus */
+
+/* -----------------------------------------
+ * p_waitUntilEncoderQueIsProcessed
+ * -----------------------------------------
+ * check if encoder thread is still running
+ * if yes then wait until  finished (e.q. until
+ * all enqued frames have been processed)
+ * 
+ */
+static void
+p_waitUntilEncoderQueIsProcessed(EncoderQueue *eque)
+{
+  gint retryCount;
+
+  if(eque == NULL)
+  {
+    return;
+  }
+
+  if(gap_debug)
+  {
+    printf("p_waitUntilEncoderQueIsProcessed: MainTID:%d\n"
+            ,p_base_get_thread_id_as_int()
+            );
+    p_debug_print_RingbufferStatus(eque);
+  }
+
+  retryCount = 0;
+  g_mutex_lock (eque->poolMutex);
+  while(eque->runningThreadsCounter > 0)
+  {
+    if(gap_debug)
+    {
+      printf("p_waitUntilEncoderQueIsProcessed: WAIT MainTID:%d until encoder thread finishes queue processing. eq_write_ptr:%d STATUS:%d retry:%d \n"
+        , p_base_get_thread_id_as_int()
+        , (int)eque->eq_write_ptr
+        , (int)eque->eq_write_ptr->status
+        , (int)retryCount
+        );
+      fflush(stdout);
+    }
+
+    g_cond_wait (eque->frameEncodedCond, eque->poolMutex);
+
+    if(gap_debug)
+    {
+      printf("p_waitUntilEncoderQueIsProcessed: WAKE-UP MainTID:%d after encoder thread finished queue processing. eq_write_ptr:%d STATUS:%d retry:%d \n"
+        , p_base_get_thread_id_as_int()
+        , (int)eque->eq_write_ptr
+        , (int)eque->eq_write_ptr->status
+        , (int)retryCount
+        );
+      fflush(stdout);
+    }
+
+    retryCount++;
+    if(retryCount >= 250)
+    {
+      printf("** WARNING encoder thread not yet finished after %d wait retries\n"
+        ,(int)retryCount
+        );
+      break;
+    }
+  }
+  g_mutex_unlock (eque->poolMutex);
+
+}  /* end p_waitUntilEncoderQueIsProcessed */
+
+
+/* -------------------------------------------
+ * p_free_EncoderQueueResources
+ * -------------------------------------------
+ * this procedure frees the resources for the specified EncoderQueue.
+ * Note: this does NOT include the ffh reference.
+ * 
+ */
+static void
+p_free_EncoderQueueResources(EncoderQueue     *eque)
+{
+  t_ffmpeg_handle     *ffh;
+  EncoderQueueElem    *eq_elem;
+  EncoderQueueElem    *eq_elem_next;
+  gint                 ii;
+  
+  if(eque == NULL)
+  {
+    return;
+  }
+
+  if(eque->frameEncodedCond != NULL)
+  {
+    if(gap_debug)
+    {
+      printf("p_free_EncoderQueueResources: before g_cond_free(eque->frameEncodedCond)\n");
+    }
+    g_cond_free(eque->frameEncodedCond);
+    eque->frameEncodedCond = NULL;
+    if(gap_debug)
+    {
+      printf("p_free_EncoderQueueResources: after g_cond_free(eque->frameEncodedCond)\n");
+    }
+  }
+
+  eq_elem_next = NULL;
+  for(eq_elem = eque->eq_root; eq_elem != NULL; eq_elem = eq_elem_next)
+  {
+    for (ii=0; ii < MAX_VIDEO_STREAMS; ii++)
+    {
+      if(gap_debug)
+      {
+        printf("p_free_EncoderQueueResources: g_free big_picture[%d] of eq_elem:%d\n"
+          ,(int)ii
+          ,(int)eq_elem
+          );
+      }
+      g_free(eq_elem->eq_big_picture_codec[ii]);
+      if(gap_debug)
+      {
+        printf("p_free_EncoderQueueResources: g_mutex_free of eq_elem:%d\n"
+          ,(int)eq_elem
+          );
+      }
+      g_mutex_free(eq_elem->elemMutex);
+    }
+    if(gap_debug)
+    {
+      printf("p_free_EncoderQueueResources: g_free eq_elem:%d\n"
+        ,(int)eq_elem
+        );
+    }
+    eq_elem_next = eq_elem->next;
+    if(eq_elem_next == eque->eq_root)
+    {
+      /* this was the last element in the ringbuffer that points to
+       * the (already free'd root); it is time to break the loop now.
+       */
+      eq_elem_next = NULL;
+    }
+    g_free(eq_elem);
+  }
+  eque->eq_root      = NULL;
+  eque->eq_write_ptr = NULL;
+  eque->eq_read_ptr  = NULL;
+  
+}  /* end p_free_EncoderQueueResources */
+
+
+/* -------------------------------------
+ * p_fillQueueElem
+ * -------------------------------------
+ * fill element eque->eq_write_ptr with imag data and information
+ * that is required for the encoder.
+ */
+static void
+p_fillQueueElem(EncoderQueue *eque, GapStoryFetchResult *gapStoryFetchResult, gboolean force_keyframe, gint vid_track)
+{
+  EncoderQueueElem *eq_write_ptr;
+  AVFrame *picture_codec;
+  GimpDrawable *drawable;
+
+  eq_write_ptr = eque->eq_write_ptr;
+  eq_write_ptr->encode_frame_nr = eque->ffh->encode_frame_nr;
+  eq_write_ptr->vid_track = vid_track;
+  eq_write_ptr->force_keyframe = force_keyframe;
+  picture_codec = eq_write_ptr->eq_big_picture_codec[vid_track];
+
+  GAP_TIMM_START_RECORD(&eque->mainDrawableToRgb);
+  if(gap_debug)
+  {
+    printf("p_fillQueueElem: START drawable:%d eq_write_ptr:%d picture_codec:%d vid_track:%d encode_frame_nr:%d\n"
+      ,(int)drawable
+      ,(int)eq_write_ptr
+      ,(int)picture_codec
+      ,(int)eq_write_ptr->vid_track
+      ,(int)eq_write_ptr->encode_frame_nr
+      );
+  }
+  
+  /* fill the AVFrame data at eq_write_ptr */
+  if(gapStoryFetchResult != NULL)
+  {
+    p_ffmpeg_convert_GapStoryFetchResult_to_AVFrame(eque->ffh
+                 , picture_codec
+                 , gapStoryFetchResult
+                 , vid_track
+                 );
+    if(gap_debug)
+    {
+      printf("p_fillQueueElem: DONE drawable:%d drawableId:%d eq_write_ptr:%d picture_codec:%d vid_track:%d encode_frame_nr:%d\n"
+        ,(int)drawable
+        ,(int)drawable->drawable_id
+        ,(int)eq_write_ptr
+        ,(int)picture_codec
+        ,(int)eq_write_ptr->vid_track
+        ,(int)eq_write_ptr->encode_frame_nr
+        );
+    }
+  }
+  GAP_TIMM_STOP_RECORD(&eque->mainDrawableToRgb);
+  
+}  /* end p_fillQueueElem */
+
+
+/* -------------------------------------
+ * p_encodeCurrentQueueElem
+ * -------------------------------------
+ * Encode current queue element at eq_read_ptr
+ * and write encoded videoframe to the mediafile.
+ */
+static void
+p_encodeCurrentQueueElem(EncoderQueue *eque)
+{
+  EncoderQueueElem *eq_read_ptr;
+  AVFrame *picture_codec;
+  gint     vid_track;
+  
+  eq_read_ptr = eque->eq_read_ptr;
+  
+  vid_track = eq_read_ptr->vid_track;
+  picture_codec = eq_read_ptr->eq_big_picture_codec[vid_track];
+
+  if(gap_debug)
+  {
+    printf("p_encodeCurrentQueueElem: TID:%d START eq_read_ptr:%d STATUS:%d ffh:%d picture_codec:%d vid_track:%d encode_frame_nr:%d\n"
+      , p_base_get_thread_id_as_int()
+      ,(int)eq_read_ptr
+      ,(int)eq_read_ptr->status
+      ,(int)eque->ffh
+      ,(int)picture_codec
+      ,(int)eq_read_ptr->vid_track
+      ,(int)eq_read_ptr->encode_frame_nr
+      );
+  }
+  
+  p_ffmpeg_encodeAndWriteVideoFrame(eque->ffh
+                                  , picture_codec
+                                  , eque->eq_read_ptr->force_keyframe
+                                  , vid_track
+                                  , eque->eq_read_ptr->encode_frame_nr
+                                  );
+
+  if(gap_debug)
+  {
+    printf("p_encodeCurrentQueueElem: TID:%d DONE eq_read_ptr:%d picture_codec:%d vid_track:%d encode_frame_nr:%d\n"
+      , p_base_get_thread_id_as_int()
+      ,(int)eq_read_ptr
+      ,(int)picture_codec
+      ,(int)eq_read_ptr->vid_track
+      ,(int)eq_read_ptr->encode_frame_nr
+      );
+  }
+}  /* end p_encodeCurrentQueueElem */
+
+
+/* -------------------------------------------
+ * p_encoderWorkerThreadFunction
+ * -------------------------------------------
+ * this procedure runs as thread pool function to encode video and audio
+ * frames, Encoding is based on libavformat/libavcodec.
+ * videoframe input is taken from the EncoderQueue ringbuffer
+ *  (that is filled parallel by the main thread)
+ * audioframe input is directly fetched from an input audifile.
+ *
+ * After encoding the first available frame this thread tries
+ * to encode following frames when available.
+ * In case the elemMutex can not be locked for those additional frames
+ * it gives up immediate to avoid deadlocks. (such frames are handled in the next call
+ * when the thread is restarted from the thread pool)
+ * 
+ *
+ * The encoding is done with the selected codec, the compressed data is written
+ * to the mediafile as packet.
+ *
+ * Note: the read pointer is reserved for exclusive use in this thread
+ * therefore advance of this pointer can be done without locks.
+ * but accessing the element data (status or buffer) requires locking at element level
+ * because the main thread does acces the same data via the write pointer.
+ *
+ * 
+ */
+static void
+p_encoderWorkerThreadFunction (EncoderQueue *eque)
+{
+  EncoderQueueElem     *nextElem;
+  gint32                encoded_frame_nr;
+  if(gap_debug)
+  {
+    printf("p_encoderWorkerThreadFunction: TID:%d before elemMutex eq_read_ptr:%d\n"
+          , p_base_get_thread_id_as_int()
+          , (int)eque->eq_read_ptr
+          );
+  }
+  /* lock at element level */
+  if(g_mutex_trylock (eque->eq_read_ptr->elemMutex) != TRUE)
+  {
+    GAP_TIMM_START_RECORD(&eque->ethreadElemMutexWaits);
+
+    g_mutex_lock (eque->eq_read_ptr->elemMutex);
+  
+    GAP_TIMM_STOP_RECORD(&eque->ethreadElemMutexWaits);
+  }
+  
+  
+  
+  if(gap_debug)
+  {
+    printf("p_encoderWorkerThreadFunction: TID:%d  after elemMutex lock eq_read_ptr:%d STATUS:%d encode_frame_nr:%d\n"
+          , p_base_get_thread_id_as_int()
+          , (int)eque->eq_read_ptr
+          , (int)eque->eq_read_ptr->status
+          , (int)eque->eq_read_ptr->encode_frame_nr
+          );
+    p_debug_print_RingbufferStatus(eque);
+  }
+
+ENCODER_LOOP:
+
+  /* check next element is ready and advance read pointer if true.
+   *
+   * encoding of the last n-frames is typically triggerd
+   * by setting the same frame n times to EQELEM_STATUS_READY
+   * repeated without advance to the next element.
+   * therefore the read pointer advance is done only in case
+   * when the next element with EQELEM_STATUS_READY is already available.
+   */
+  if(eque->eq_read_ptr->status == EQELEM_STATUS_FREE)
+  {
+    nextElem = eque->eq_read_ptr->next;
+    g_mutex_unlock (eque->eq_read_ptr->elemMutex);
+
+    if(TRUE != g_mutex_trylock (nextElem->elemMutex))
+    {
+        goto ENCODER_STOP;
+    }
+    else
+    {
+      if(nextElem->status == EQELEM_STATUS_READY)
+      {
+        eque->eq_read_ptr = nextElem;
+      }
+      else
+      {
+        /* next element is not ready, 
+         */
+        g_mutex_unlock (nextElem->elemMutex);
+        goto ENCODER_STOP;
+      }
+    }
+  }
+
+  if(eque->eq_read_ptr->status == EQELEM_STATUS_READY)
+  {
+    eque->eq_read_ptr->status = EQELEM_STATUS_LOCK;
+
+
+    GAP_TIMM_START_RECORD(&eque->ethreadEncodeFrame);
+
+    /* Encode and write the current element (e.g. videoframe) at eq_read_ptr */
+    p_encodeCurrentQueueElem(eque);
+
+    if(eque->ffh->countVideoFramesWritten > 0)
+    {
+      /* fetch, encode and write one audioframe */
+      p_process_audio_frame(eque->ffh, eque->awp);
+    }
+
+    GAP_TIMM_STOP_RECORD(&eque->ethreadEncodeFrame);
+
+
+    /* setting EQELEM_STATUS_FREE enables re-use (e.g. overwrite)
+     * of this element's data buffers.
+     */
+    eque->eq_read_ptr->status = EQELEM_STATUS_FREE;
+    encoded_frame_nr = eque->eq_read_ptr->encode_frame_nr;
+    
+    /* encoding of the last n-frames is typically triggerd
+     * by setting the same frame n times to EQELEM_STATUS_READY
+     * repeated without advance to the next element.
+     * therefore the read pointer advance is done only in case
+     * when the next element with EQELEM_STATUS_READY is already available.
+     */
+    nextElem = eque->eq_read_ptr->next;
+
+    /* advance the lock to next element */
+    g_mutex_unlock (eque->eq_read_ptr->elemMutex);
+
+    if(TRUE != g_mutex_trylock (nextElem->elemMutex))
+    {
+        goto ENCODER_STOP;
+    }
+    else
+    {
+      if (nextElem->status == EQELEM_STATUS_READY)
+      {
+        eque->eq_read_ptr = nextElem;
+
+        if(TRUE == g_mutex_trylock (eque->poolMutex))
+        {
+          g_cond_signal  (eque->frameEncodedCond);
+          g_mutex_unlock (eque->poolMutex);
+        }
+        /* next frame already available, contine the encoder loop */ 
+        goto ENCODER_LOOP;
+      }
+      else
+      {
+        /* unlock because next element is not ready, 
+         */
+        g_mutex_unlock (nextElem->elemMutex);
+      }
+    }
+
+  }
+  else
+  {
+    g_mutex_unlock (eque->eq_read_ptr->elemMutex);
+  }
+  
+
+  /* no element in ready status available.
+   * This can occure in followinc scenarios:
+   * a) all frames are encoded
+   *    in this case the main thread will free up resources and exit
+   *    or
+   * b) encoding was faster than fetching/rendering (in the main thread)
+   *    in this case the main thread will reactivate this thread pool function
+   *    when the next frame is enqued and reached the ready status.
+   *
+   * send signal to wake up main thread (even if nothing was actually encoded)
+   */
+
+ENCODER_STOP:
+
+  /* lock at pool level */
+  if(g_mutex_trylock (eque->poolMutex) != TRUE)
+  {
+    GAP_TIMM_START_RECORD(&eque->ethreadPoolMutexWaits);
+    g_mutex_lock (eque->poolMutex);
+    GAP_TIMM_STOP_RECORD(&eque->ethreadPoolMutexWaits);
+  }
+
+  if(gap_debug)
+  {
+    printf("p_encoderWorkerThreadFunction: TID:%d  send frameEncodedCond encoded_frame_nr:%d\n"
+          , p_base_get_thread_id_as_int()
+          , encoded_frame_nr
+          );
+  }
+  eque->runningThreadsCounter--;
+  g_cond_signal  (eque->frameEncodedCond);
+  g_mutex_unlock (eque->poolMutex);
+
+  if(gap_debug)
+  {
+    printf("p_encoderWorkerThreadFunction: TID:%d  DONE eq_read_ptr:%d encoded_frame_nr:%d\n"
+          , p_base_get_thread_id_as_int()
+          , (int)eque->eq_read_ptr
+          , encoded_frame_nr
+          );
+  }
+
+}  /* end p_encoderWorkerThreadFunction */
+
+
+/* ------------------------------------------
+ * p_ffmpeg_write_frame_and_audio_multithread
+ * ------------------------------------------
+ * trigger encoding one videoframe and one audioframe (in case audio is uesd)
+ * The videoframe is enqueued and processed in parallel by the encoder worker thread.
+ * Passing NULL as gapStoryFetchResult is used to flush one frame from the codecs internal buffer
+ * (typically required after the last frame has been already feed to the codec)
+ */
+static int
+p_ffmpeg_write_frame_and_audio_multithread(EncoderQueue *eque
+   , GapStoryFetchResult *gapStoryFetchResult, gboolean force_keyframe, gint vid_track)
+{
+  GError *error = NULL;
+  gint retryCount;
+
+  GAP_TIMM_START_RECORD(&eque->mainWriteFrame);
+
+  retryCount = 0;
+
+  if(encoderThreadPool == NULL)
+  {
+    /* init thread pool for one encoder thread.
+     * The current implementation does not support 2 or more concurrent queue writer threads
+     * -- more threads would crash or produce unusable video --
+     * But independent form this setup some of the codecs in libavcodec do support
+     * multiple threads that are configuread with the treads encoder parameter.
+     */
+    encoderThreadPool = g_thread_pool_new((GFunc) p_encoderWorkerThreadFunction
+                                         ,NULL        /* user data */
+                                         ,1           /* max_threads */
+                                         ,TRUE        /* exclusive */
+                                         ,&error      /* GError **error */
+                                         );
+  }
+
+  if(gapStoryFetchResult != NULL)
+  {
+    /* a new frame is availble as gimp drawable
+     * and shall be enqueued at next position in the 
+     * EncoderQueue ringbuffer. Therefore first advance
+     * write position. 
+     */
+    eque->eq_write_ptr = eque->eq_write_ptr->next;
+  }
+
+  if(gap_debug)
+  {
+    printf("p_ffmpeg_write_frame_and_audio_multithread: before elemMutex lock eq_write_ptr:%d STATUS:%d retry:%d encode_frame_nr:%d\n"
+          , (int)eque->eq_write_ptr
+          , (int)eque->eq_write_ptr->status
+          , (int)retryCount
+          , (int)eque->ffh->encode_frame_nr
+          );
+    fflush(stdout);
+  }
+  
+  /* lock at element level (until element is filled and has reached EQELEM_STATUS_READY) */
+  if(g_mutex_trylock (eque->eq_write_ptr->elemMutex) != TRUE)
+  {
+    GAP_TIMM_START_RECORD(&eque->mainElemMutexWaits);
+
+    g_mutex_lock (eque->eq_write_ptr->elemMutex);
+
+    GAP_TIMM_STOP_RECORD(&eque->mainElemMutexWaits);
+  }
+  
+  if(gap_debug)
+  {
+    printf("p_ffmpeg_write_frame_and_audio_multithread: after elemMutex lock eq_write_ptr:%d STATUS:%d retry:%d encode_frame_nr:%d\n"
+          , (int)eque->eq_write_ptr
+          , (int)eque->eq_write_ptr->status
+          , (int)retryCount
+          , (int)eque->ffh->encode_frame_nr
+          );
+  }
+  
+  while(eque->eq_write_ptr->status != EQELEM_STATUS_FREE)
+  {
+    GAP_TIMM_START_RECORD(&eque->mainEnqueueWaits);
+    
+    g_mutex_unlock (eque->eq_write_ptr->elemMutex);
+
+    /* lock at pool level for checking numer of running encoder threads (0 or 1 expected) */
+    if(g_mutex_trylock (eque->poolMutex) != TRUE)
+    {
+      GAP_TIMM_START_RECORD(&eque->mainPoolMutexWaits);
+      g_mutex_lock (eque->poolMutex);
+      GAP_TIMM_STOP_RECORD(&eque->mainPoolMutexWaits);
+    }
+
+    if(eque->runningThreadsCounter <= 0)
+    {
+      if(gap_debug)
+      {
+        printf("p_ffmpeg_write_frame_and_audio_multithread: PUSH-1 (re)start worker thread eq_write_ptr:%d STATUS:%d retry:%d encode_frame_nr:%d\n"
+          , (int)eque->eq_write_ptr
+          , (int)eque->eq_write_ptr->status
+          , (int)retryCount
+          , (int)eque->ffh->encode_frame_nr
+          );
+      }
+      /* (re)activate encoder thread */
+      eque->runningThreadsCounter++;
+      g_thread_pool_push (encoderThreadPool
+                         , eque    /* VideoPrefetchData */
+                         , &error
+                         );
+    }
+
+    /* ringbuffer queue is currently full, 
+     * have to wait until encoder thread finished processing for at least one frame
+     * and sets the status to EQELEM_STATUS_FREE.
+     * g_cond_wait Waits until this thread is woken up on frameEncodedCond. 
+     * The mutex is unlocked before falling asleep and locked again before resuming. 
+     */
+    if(gap_debug)
+    {
+      printf("p_ffmpeg_write_frame_and_audio_multithread: WAIT MainTID:%d retry:%d encode_frame_nr:%d\n"
+        ,  p_base_get_thread_id_as_int()
+        , (int)retryCount
+        , (int)eque->ffh->encode_frame_nr
+        );
+    }
+    g_cond_wait (eque->frameEncodedCond, eque->poolMutex);
+    if(gap_debug)
+    {
+      printf("p_ffmpeg_write_frame_and_audio_multithread: WAKE-UP MainTID:%d retry:%d encode_frame_nr:%d\n"
+        ,  p_base_get_thread_id_as_int()
+        , (int)retryCount
+        , (int)eque->ffh->encode_frame_nr
+        );
+    }
+    g_mutex_unlock (eque->poolMutex);
+
+    retryCount++;
+    if(retryCount > 100)
+    {
+      printf("** INTERNAL ERROR: failed to enqueue frame data after %d reties!\n"
+         ,(int)retryCount
+         );
+      exit (1);
+    }
+
+    /* lock at element level (until element is filled and has reached EQELEM_STATUS_READY) */
+    g_mutex_lock (eque->eq_write_ptr->elemMutex);
+
+    GAP_TIMM_STOP_RECORD(&eque->mainEnqueueWaits);
+
+  }
+
+
+  if(gap_debug)
+  {
+    printf("p_ffmpeg_write_frame_and_audio_multithread: FILL-QUEUE eq_write_ptr:%d STATUS:%d retry:%d encode_frame_nr:%d\n"
+      , (int)eque->eq_write_ptr
+      , (int)eque->eq_write_ptr->status
+      , (int)retryCount
+      , (int)eque->ffh->encode_frame_nr
+      );
+  }
+
+  /* convert gapStoryFetchResult and put resulting data into element eque->eq_write_ptr */
+  p_fillQueueElem(eque, gapStoryFetchResult, force_keyframe, vid_track);
+
+  eque->eq_write_ptr->status = EQELEM_STATUS_READY;
+
+  g_mutex_unlock (eque->eq_write_ptr->elemMutex);
+  
+
+  /* try lock at pool level to check if encoder thread is already running
+   * and (re)start if this is not the case.
+   * in case g_mutex_trylock returns FALSE, the mutex is already locked
+   * and it can be assumed that the encoder Thread is the lock holder and already running
+   */
+  if(TRUE == g_mutex_trylock (eque->poolMutex))
+  {
+    if(eque->runningThreadsCounter <= 0)
+    {
+      GAP_TIMM_START_RECORD(&eque->mainPush2);
+      
+      if(gap_debug)
+      {
+        printf("p_ffmpeg_write_frame_and_audio_multithread: PUSH-2 (re)start worker thread eq_write_ptr:%d STATUS:%d retry:%d encode_frame_nr:%d\n"
+          , (int)eque->eq_write_ptr
+          , (int)eque->eq_write_ptr->status
+          , (int)retryCount
+          , (int)eque->ffh->encode_frame_nr
+          );
+      }
+
+      /* (re)activate encoder thread */
+      eque->runningThreadsCounter++;
+      g_thread_pool_push (encoderThreadPool
+                         , eque    /* VideoPrefetchData */
+                         , &error
+                         );
+
+      GAP_TIMM_STOP_RECORD(&eque->mainPush2);
+    }
+
+    g_mutex_unlock (eque->poolMutex);
+
+  }
+
+  GAP_TIMM_STOP_RECORD(&eque->mainWriteFrame);
+
+}  /* end p_ffmpeg_write_frame_and_audio_multithread */
+
+
+
+
+/* --------------------
+ * p_ffmpeg_write_frame  SINGLEPROCESSOR
+ * --------------------
+ * encode one videoframe using the selected codec and write
+ * the encoded frame to the mediafile as packet.
+ * Passing NULL as gapStoryFetchResult is used to flush one frame from the codecs internal buffer
+ * (typically required after the last frame has been already feed to the codec)
+ */
+static int
+p_ffmpeg_write_frame(t_ffmpeg_handle *ffh, GapStoryFetchResult *gapStoryFetchResult
+  , gboolean force_keyframe, gint vid_track)
+{
+  AVFrame *picture_codec;
+  int encoded_size;
+  int ret;
+  int ii;
+
+  ii = ffh->vst[vid_track].video_stream_index;
+  ret = 0;
+
   if(gap_debug)
   {
-    printf("before free picture structures\n\n");
+     AVCodec  *codec;
+
+     codec = ffh->vst[ii].vid_codec_context->codec;
+
+     printf("\n-------------------------\n");
+     printf("p_ffmpeg_write_frame: START codec: %d track:%d countVideoFramesWritten:%d frame_nr:%d (validFrameNr:%d)\n"
+           , (int)codec
+           , (int)vid_track
+           , (int)ffh->countVideoFramesWritten
+           , (int)ffh->encode_frame_nr
+           , (int)ffh->validEncodeFrameNr
+           );
+     printf("name: %s\n", codec->name);
+     if(gap_debug)
+     {
+       printf("type: %d\n", codec->type);
+       printf("id: %d\n",   codec->id);
+       printf("priv_data_size: %d\n",   codec->priv_data_size);
+       printf("capabilities: %d\n",   codec->capabilities);
+       printf("init fptr: %d\n",   (int)codec->init);
+       printf("encode fptr: %d\n",   (int)codec->encode);
+       printf("close fptr: %d\n",   (int)codec->close);
+       printf("decode fptr: %d\n",   (int)codec->decode);
+    }
+  }
+
+  /* picture to feed the codec */
+  picture_codec = ffh->vst[ii].big_picture_codec;
+
+  /* in case gapStoryFetchResult is NULL
+   * we feed the previous handled picture (e.g the last of the input)
+   * again and again to the codec
+   * Note that this procedure typically is called with NULL  drawbale
+   * until all frames in its internal buffer are writen to the output video.
+   */
+
+  if(gapStoryFetchResult != NULL)
+  {
+    p_ffmpeg_convert_GapStoryFetchResult_to_AVFrame(ffh
+               , picture_codec
+               , gapStoryFetchResult
+               , vid_track
+               );
   }
 
+
+  ret = p_ffmpeg_encodeAndWriteVideoFrame(ffh, picture_codec, force_keyframe, vid_track, ffh->encode_frame_nr);
+
   return (ret);
+
 }  /* end p_ffmpeg_write_frame */
 
 
+
+
 /* -------------------------
  * p_ffmpeg_write_audioframe
  * -------------------------
@@ -3556,19 +4692,6 @@ p_ffmpeg_close(t_ffmpeg_handle *ffh)
          printf("[%d] after g_free video_dummy_buffer\n", ii);
        }
     }
-    if(ffh->vst[ii].yuv420_buffer)
-    {
-       if(gap_debug)
-       {
-         printf("[%d] before g_free yuv420_buffer\n", ii);
-       }
-       g_free(ffh->vst[ii].yuv420_buffer);
-       ffh->vst[ii].yuv420_buffer = NULL;
-       if(gap_debug)
-       {
-         printf("[%d] after g_free yuv420_buffer\n", ii);
-       }
-    }
     if(ffh->vst[ii].yuv420_dummy_buffer)
     {
        if(gap_debug)
@@ -3783,12 +4906,10 @@ p_ffmpeg_encode_pass(GapGveFFMpegGlobalParams *gpp, gint32 current_pass, GapGveM
 {
 #define GAP_FFENC_USE_YUV420P "GAP_FFENC_USE_YUV420P"
   GapGveFFMpegValues   *epp = NULL;
-  t_ffmpeg_handle     *ffh = NULL;
+  t_ffmpeg_handle      *ffh = NULL;
+  EncoderQueue         *eque = NULL;
   GapGveStoryVidHandle        *l_vidhand = NULL;
-  gint32        l_tmp_image_id = -1;
-  gint32        l_layer_id = -1;
-  GimpDrawable *l_drawable = NULL;
-  long          l_cur_frame_nr;
+  long          l_master_frame_nr;
   long          l_step, l_begin, l_end;
   int           l_rc;
   gint32        l_max_master_frame_nr;
@@ -3800,25 +4921,25 @@ p_ffmpeg_encode_pass(GapGveFFMpegGlobalParams *gpp, gint32 current_pass, GapGveM
   t_awk_array   *awp;
   GapCodecNameElem    *l_vcodec_list;
   const char          *l_env;
-  gboolean             l_useYUV420P;
+  GapStoryFetchResult  gapStoryFetchResultLocal;
+  GapStoryFetchResult *gapStoryFetchResult;
+
+
+  static gint32 funcId = -1;
+  static gint32 funcIdVidFetch = -1;
+  static gint32 funcIdVidEncode = -1;
+  static gint32 funcIdVidCopy11 = -1;
+
+  GAP_TIMM_GET_FUNCTION_ID(funcId,          "p_ffmpeg_encode_pass");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdVidFetch,  "p_ffmpeg_encode_pass.VideoFetch");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdVidEncode, "p_ffmpeg_encode_pass.VideoEncode");
+  GAP_TIMM_GET_FUNCTION_ID(funcIdVidCopy11, "p_ffmpeg_encode_pass.VideoCopyLossless");
+
+  GAP_TIMM_START_FUNCTION(funcId);
 
   epp = &gpp->evl;
   awp = &l_awk_arr;
 
-  l_useYUV420P = FALSE;
-  l_env = g_getenv(GAP_FFENC_USE_YUV420P);
-  if(l_env != NULL)
-  {
-    if((*l_env != 'n') && (*l_env != 'N'))
-    {
-      printf("** Warning: environment variable %s turns on YUV420P pre-convert feature\n"
-             "  this may run a bit faster but typically reduces the quality of the resulting video\n"
-             , GAP_FFENC_USE_YUV420P
-             );
-      l_useYUV420P = TRUE;
-    }
-  }
-
   encStatusPtr->current_pass = current_pass;
   encStatusPtr->frames_processed = 0;
 
@@ -3856,7 +4977,6 @@ p_ffmpeg_encode_pass(GapGveFFMpegGlobalParams *gpp, gint32 current_pass, GapGveM
 
 
   l_rc = 0;
-  l_layer_id = -1;
 
   /* make list of frameranges (input frames to feed the encoder) */
   {
@@ -3896,12 +5016,53 @@ p_ffmpeg_encode_pass(GapGveFFMpegGlobalParams *gpp, gint32 current_pass, GapGveM
     return(-1);     /* FFMPEG open Failed */
   }
 
+  gapStoryFetchResult = &gapStoryFetchResultLocal;
+  gapStoryFetchResult->raw_rgb_data = NULL;
+  gapStoryFetchResult->video_frame_chunk_data = ffh->vst[0].video_buffer;
 
 
   /* Calculations for encoding the sound */
   p_sound_precalculations(ffh, awp, gpp);
 
 
+  ffh->isMultithreadEnabled = gap_base_get_gimprc_gboolean_value(
+                                 GAP_GIMPRC_VIDEO_ENCODER_FFMPEG_MULTIPROCESSOR_ENABLE
+                               , FALSE  /* default */
+                                 );
+  if(gap_debug)
+  {
+    printf("pass: (1) isMultithreadEnabled: %d\n"
+      ,(int)ffh->isMultithreadEnabled
+      );
+  }
+
+  if((ffh->isMultithreadEnabled)
+  && (epp->dont_recode_flag != TRUE))
+  {
+    eque = p_init_EncoderQueueResources(ffh, awp);
+  }
+  else
+  {
+    if(ffh->isMultithreadEnabled)
+    {
+      printf("WARNING: multiprocessor support is not implemented for lossless video encoding\n");
+      printf("         therefore single cpu processing will be done due to the lossles render option\n");
+    }
+    /* lossless encoding is not supported in multiprocessor environment
+     * (for 1:1 copy there is no benfit to be expected 
+     * when parallel running encoder thread is used)
+     */
+    ffh->isMultithreadEnabled = FALSE;
+  }
+
+
+  if(gap_debug)
+  {
+    printf("pass: (2) isMultithreadEnabled: %d\n"
+      ,(int)ffh->isMultithreadEnabled
+      );
+  }
+
   /* special setup (makes it possible to code sequences backwards)
    * (NOTE: Audio is NEVER played backwards)
    */
@@ -3917,12 +5078,11 @@ p_ffmpeg_encode_pass(GapGveFFMpegGlobalParams *gpp, gint32 current_pass, GapGveM
   l_end   = gpp->val.range_to;
   l_max_master_frame_nr = abs(l_end - l_begin) + 1;
 
-  l_cur_frame_nr = l_begin;
+  l_master_frame_nr = l_begin;
   ffh->encode_frame_nr = 1;
   ffh->countVideoFramesWritten = 0;
   while(l_rc >= 0)
   {
-    gboolean l_fetch_ok;
     gboolean l_force_keyframe;
     gint32   l_video_frame_chunk_size;
     gint32   l_video_frame_chunk_hdr_size;
@@ -3937,77 +5097,114 @@ p_ffmpeg_encode_pass(GapGveFFMpegGlobalParams *gpp, gint32 current_pass, GapGveM
       printf("\nFFenc: before gap_story_render_fetch_composite_image_or_chunk\n");
     }
 
-    l_fetch_ok = gap_story_render_fetch_composite_image_or_chunk(l_vidhand
-                                           , l_cur_frame_nr
-                                           , (gint32)  gpp->val.vid_width
-                                           , (gint32)  gpp->val.vid_height
-                                           , gpp->val.filtermacro_file
-                                           , &l_layer_id           /* output */
-                                           , &l_tmp_image_id       /* output */
-                                           , epp->dont_recode_flag
-                                           , l_vcodec_list           /* list of compatible vcodec_names */
-                                           , &l_force_keyframe
-                                           , ffh->vst[0].video_buffer
-                                           , &l_video_frame_chunk_size
-                                           , ffh->vst[0].video_buffer_size    /* IN max size */
-                                           , gpp->val.framerate
-                                           , l_max_master_frame_nr
-                                           , &l_video_frame_chunk_hdr_size
-                                           , l_check_flags
-                                           );
+    GAP_TIMM_START_FUNCTION(funcIdVidFetch);
+    if(eque)
+    {
+      GAP_TIMM_START_RECORD(&eque->mainReadFrame);
+    }
+
+    
+
+    gap_story_render_fetch_composite_image_or_buffer_or_chunk(l_vidhand
+                    , l_master_frame_nr  /* starts at 1 */
+                    , (gint32)  gpp->val.vid_width       /* desired Video Width in pixels */
+                    , (gint32)  gpp->val.vid_height      /* desired Video Height in pixels */
+                    , gpp->val.filtermacro_file
+                    , epp->dont_recode_flag              /* IN: TRUE try to fetch comressed chunk if possible */
+                    , TRUE                               /* IN: enable_rgb888_flag TRUE rgb88 result where possible */
+                    , l_vcodec_list                      /* IN: list of video_codec names that are compatible to the calling encoder program */
+                    , ffh->vst[0].video_buffer_size      /* IN: video_frame_chunk_maxsize sizelimit (larger chunks are not fetched) */
+                    , gpp->val.framerate
+                    , l_max_master_frame_nr              /* the number of frames that will be encoded in total */
+                    , l_check_flags                      /* IN: check_flags combination of GAP_VID_CHCHK_FLAG_* flag values */
+                    , gapStoryFetchResult                /* OUT: struct with feth result */
+                 );
+                 
+    l_force_keyframe = gapStoryFetchResult->force_keyframe;
+
+    GAP_TIMM_STOP_FUNCTION(funcIdVidFetch);
+    if(eque)
+    {
+      GAP_TIMM_STOP_RECORD(&eque->mainReadFrame);
+    }
 
     if(gap_debug)
     {
-      printf("\nFFenc: after gap_story_render_fetch_composite_image_or_chunk image_id:%d layer_id:%d\n"
-        , (int)l_tmp_image_id
-        , (int) l_layer_id
+      printf("\nFFenc: after gap_story_render_fetch_composite_image_or_chunk master_frame_nr:%d image_id:%d layer_id:%d resultEnum:%d\n"
+        , (int) l_master_frame_nr
+        , (int) gapStoryFetchResult->image_id
+        , (int) gapStoryFetchResult->layer_id
+        , (int) gapStoryFetchResult->resultEnum
         );
     }
 
     /* this block is done foreach handled video frame */
-    if(l_fetch_ok)
+    if(gapStoryFetchResult->resultEnum != GAP_STORY_FETCH_RESULT_IS_ERROR)
     {
 
-      if (l_video_frame_chunk_size > 0)
+      if (gapStoryFetchResult->resultEnum == GAP_STORY_FETCH_RESULT_IS_COMPRESSED_CHUNK)
       {
         l_cnt_reused_frames++;
         if (gap_debug)
         {
-          printf("DEBUG: 1:1 copy of frame %d\n", (int)l_cur_frame_nr);
+          printf("DEBUG: 1:1 copy of frame %d\n", (int)l_master_frame_nr);
         }
+        l_video_frame_chunk_size = gapStoryFetchResult->video_frame_chunk_size;
+        l_video_frame_chunk_hdr_size = gapStoryFetchResult->video_frame_chunk_hdr_size;
+
+        GAP_TIMM_START_FUNCTION(funcIdVidCopy11);
 
         /* dont recode, just copy video chunk to output videofile */
         p_ffmpeg_write_frame_chunk(ffh, l_video_frame_chunk_size, 0 /* vid_track */);
+
+        GAP_TIMM_STOP_FUNCTION(funcIdVidCopy11);
+
+        /* encode AUDIO FRAME (audio data for playbacktime of one frame) */
+        if(ffh->countVideoFramesWritten > 0)
+        {
+          p_process_audio_frame(ffh, awp);
+        }
       }
       else   /* encode one VIDEO FRAME */
       {
         l_cnt_encoded_frames++;
-        l_drawable = gimp_drawable_get (l_layer_id);
 
         if (gap_debug)
         {
           printf("DEBUG: %s encoding frame %d\n"
                 , epp->vcodec_name
-                , (int)l_cur_frame_nr
+                , (int)l_master_frame_nr
                 );
         }
 
         /* store the compressed video frame */
-        if (gap_debug) printf("GAP_FFMPEG: Writing frame nr. %d\n", (int)l_cur_frame_nr);
+        if (gap_debug)
+        {
+          printf("GAP_FFMPEG: Writing frame nr. %d\n", (int)l_master_frame_nr);
+        }
 
-        p_ffmpeg_write_frame(ffh, l_drawable, l_force_keyframe, 0, /* vid_track */  l_useYUV420P);
-        gimp_drawable_detach (l_drawable);
-        /* destroy the tmp image */
-        gimp_image_delete(l_tmp_image_id);
-      }
 
-      /* encode AUDIO FRAME (audio data for playbacktime of one frame) */
-      if(ffh->countVideoFramesWritten > 0)
-      {
-        p_process_audio_frame(ffh, awp);
+        GAP_TIMM_START_FUNCTION(funcIdVidEncode);
+
+        if(ffh->isMultithreadEnabled)
+        {
+          p_ffmpeg_write_frame_and_audio_multithread(eque, gapStoryFetchResult, l_force_keyframe, 0 /* vid_track */ );
+        }
+        else
+        {
+          p_ffmpeg_write_frame(ffh, gapStoryFetchResult, l_force_keyframe, 0 /* vid_track */ );
+          if(ffh->countVideoFramesWritten > 0)
+          {
+            p_process_audio_frame(ffh, awp);
+          }
+        }
+
+
+        GAP_TIMM_STOP_FUNCTION(funcIdVidEncode);
       }
 
 
+
     }
     else  /* if fetch_ok */
     {
@@ -4024,14 +5221,17 @@ p_ffmpeg_encode_pass(GapGveFFMpegGlobalParams *gpp, gint32 current_pass, GapGveM
       gap_gve_misc_do_master_encoder_progress(encStatusPtr);
     }
 
-    /* terminate on cancel reqeuset (CANCEL button was pressed in the master encoder dialog) */
-    if(gap_gve_misc_is_master_encoder_cancel_request(encStatusPtr))
+    /* terminate on cancel reqeuset (CANCEL button was pressed in the master encoder dialog)
+     * or in case of errors.
+     */
+    if((gap_gve_misc_is_master_encoder_cancel_request(encStatusPtr))
+    || (l_rc < 0))
     {
        break;
     }
 
     /* detect regular end */
-    if((l_cur_frame_nr == l_end) || (l_rc < 0))
+    if(l_master_frame_nr == l_end)
     {
        /* handle encoder latency (encoders typically hold some frames in internal buffers
         * that must be flushed after the last input frame was feed to the codec)
@@ -4042,6 +5242,15 @@ p_ffmpeg_encode_pass(GapGveFFMpegGlobalParams *gpp, gint32 current_pass, GapGveM
        int flushTries;
        int flushCount;
 
+       if(ffh->isMultithreadEnabled)
+       {
+         /* wait until encoder thread has processed all enqued frames so far.
+          * (otherwise the ffh->countVideoFramesWritten information will
+          * not count the still unprocessed remaining frames in the queue)
+          */
+         p_waitUntilEncoderQueIsProcessed(eque);
+       }
+
        flushTries = 2 + (ffh->validEncodeFrameNr - ffh->countVideoFramesWritten);
        
        for(flushCount = 0; flushCount < flushTries; flushCount++)
@@ -4056,20 +5265,39 @@ p_ffmpeg_encode_pass(GapGveFFMpegGlobalParams *gpp, gint32 current_pass, GapGveM
           * and some codecs (mpeg2video) complain about "non monotone timestamps"  otherwise.
           */
          ffh->encode_frame_nr++;
-         p_ffmpeg_write_frame(ffh, NULL, l_force_keyframe, 0, /* vid_track */  l_useYUV420P);
 
-         /* continue encode AUDIO FRAME (audio data for playbacktime of one frame)
-          */
-          if(ffh->countVideoFramesWritten > 0)
-          {
-            p_process_audio_frame(ffh, awp);
-          }
+         GAP_TIMM_START_FUNCTION(funcIdVidEncode);
+
+         if(ffh->isMultithreadEnabled)
+         {
+           p_ffmpeg_write_frame_and_audio_multithread(eque, NULL, l_force_keyframe, 0 /* vid_track */ );
+           if(gap_debug)
+           {
+             printf("p_ffmpeg_encode_pass: Flush-Loop for Codec remaining frames MainTID:%d, flushCount:%d\n"
+                ,p_base_get_thread_id_as_int()
+                ,(int)flushCount
+                );
+             p_debug_print_RingbufferStatus(eque);
+           }
+         }
+         else
+         {
+           p_ffmpeg_write_frame(ffh, NULL, l_force_keyframe, 0 /* vid_track */ );
+           /* continue encode AUDIO FRAME (audio data for playbacktime of one frame)
+            */
+           if(ffh->countVideoFramesWritten > 0)
+           {
+             p_process_audio_frame(ffh, awp);
+           }
+         }
+
+         GAP_TIMM_STOP_FUNCTION(funcIdVidEncode);
          
        }
        break;
     }
     /* advance to next frame */
-    l_cur_frame_nr += l_step;
+    l_master_frame_nr += l_step;
     ffh->encode_frame_nr++;
 
   }  /* end loop foreach frame */
@@ -4080,6 +5308,12 @@ p_ffmpeg_encode_pass(GapGveFFMpegGlobalParams *gpp, gint32 current_pass, GapGveM
     {
       printf("before: p_ffmpeg_close\n");
     }
+    
+    if(ffh->isMultithreadEnabled)
+    {
+      p_waitUntilEncoderQueIsProcessed(eque);
+    }
+    
     p_ffmpeg_close(ffh);
     if(gap_debug)
     {
@@ -4117,6 +5351,40 @@ p_ffmpeg_encode_pass(GapGveFFMpegGlobalParams *gpp, gint32 current_pass, GapGveM
     printf("total handled frames: %d\n", (int)l_cnt_encoded_frames + l_cnt_reused_frames);
   }
 
+
+  GAP_TIMM_STOP_FUNCTION(funcId);
+  GAP_TIMM_PRINT_FUNCTION_STATISTICS();
+
+  if(eque)
+  {
+    g_usleep(30000);  /* sleep 0.3 seconds */
+    
+    /*  print MAIN THREAD runtime statistics */
+    GAP_TIMM_PRINT_RECORD(&eque->mainReadFrame,         "... mainReadFrame");
+    GAP_TIMM_PRINT_RECORD(&eque->mainWriteFrame,        "... mainWriteFrame");
+    GAP_TIMM_PRINT_RECORD(&eque->mainDrawableToRgb,     "... mainWriteFrame.DrawableToRgb");
+    GAP_TIMM_PRINT_RECORD(&eque->mainElemMutexWaits,    "... mainElemMutexWaits");
+    GAP_TIMM_PRINT_RECORD(&eque->mainPoolMutexWaits,    "... mainPoolMutexWaits");
+    GAP_TIMM_PRINT_RECORD(&eque->mainEnqueueWaits,      "... mainEnqueueWaits");
+    GAP_TIMM_PRINT_RECORD(&eque->mainPush2,             "... mainPush2 (re-start times of encoder thread)");
+
+    /*  print Encoder THREAD runtime statistics */
+    GAP_TIMM_PRINT_RECORD(&eque->ethreadElemMutexWaits, "... ethreadElemMutexWaits");
+    GAP_TIMM_PRINT_RECORD(&eque->ethreadPoolMutexWaits, "... ethreadPoolMutexWaits");
+    GAP_TIMM_PRINT_RECORD(&eque->ethreadEncodeFrame,    "... ethreadEncodeFrame");
+
+    p_free_EncoderQueueResources(eque);
+    g_free(eque);
+  }
+ 
+  if (gapStoryFetchResult->raw_rgb_data != NULL)
+  {
+    /* finally free the rgb data that was optionally allocated in storyboard fetch calls.
+     * The raw_rgb_data is typically allocated at first direct RGB888 fetch (if any)
+     * and reused in all further direct RGB888 fetches.
+     */
+    g_free(gapStoryFetchResult->raw_rgb_data);
+  }
   return l_rc;
 }    /* end p_ffmpeg_encode_pass */
 
diff --git a/vid_enc_ffmpeg/gap_enc_ffmpeg_main.h b/vid_enc_ffmpeg/gap_enc_ffmpeg_main.h
index 369fbba..f9f2cb9 100644
--- a/vid_enc_ffmpeg/gap_enc_ffmpeg_main.h
+++ b/vid_enc_ffmpeg/gap_enc_ffmpeg_main.h
@@ -192,11 +192,15 @@
 
 
 #define GAP_GVE_FF_QP2LAMBDA   FF_QP2LAMBDA
-
+#define GAP_ENCODER_PRESET_NAME_MAX_LENGTH 60
+#define GAP_ENCODER_PRESET_FILENAME_MAX_LENGTH 1024
 
 
 /* GapGveFFMpegValues ffmpeg specific encoder params */
 typedef struct {
+  gint32  presetId;
+  char    presetName[GAP_ENCODER_PRESET_NAME_MAX_LENGTH];
+  char    presetFileName[GAP_ENCODER_PRESET_FILENAME_MAX_LENGTH];
   char    current_vid_extension[80];
 
   /* ffmpeg options */
@@ -423,6 +427,9 @@ typedef struct {
   gint32 partition_X264_PART_P4X4;
   gint32 partition_X264_PART_B8X8;
 
+
+  void *next;
+
 } GapGveFFMpegValues;
 
 
diff --git a/vid_enc_ffmpeg/gap_enc_ffmpeg_par.c b/vid_enc_ffmpeg/gap_enc_ffmpeg_par.c
index ff45e86..f0909af 100644
--- a/vid_enc_ffmpeg/gap_enc_ffmpeg_par.c
+++ b/vid_enc_ffmpeg/gap_enc_ffmpeg_par.c
@@ -39,8 +39,14 @@
 #include "gap_enc_ffmpeg_main.h"
 #include "gap_enc_ffmpeg_par.h"
 
+#define GAP_FFENC_FILEHEADER_LINE "# GIMP / GAP FFMPEG videoencoder parameter file"
+
+
 extern int gap_debug;
 
+static GapGveFFMpegValues *eppRoot = NULL;
+static gint nextPresetId = GAP_GVE_FFMPEG_PRESET_MAX_ELEMENTS;
+
 
 /* --------------------------
  * p_set_keyword_bool32
@@ -447,8 +453,8 @@ gap_ffpar_set(const char *filename, GapGveFFMpegValues *epp)
   p_set_master_keywords(keylist, epp);
   l_rc = gap_val_rewrite_file(keylist
                           ,filename
-                          ,"# GIMP / GAP FFMPEG videoencoder parameter file"   /*  hdr_text */
-                          ,")"                                                 /* terminate char */
+                          ,GAP_FFENC_FILEHEADER_LINE   /*  hdr_text */
+                          ,")"                         /* terminate char */
                           );
 
   gap_val_free_keylist(keylist);
@@ -467,3 +473,160 @@ gap_ffpar_set(const char *filename, GapGveFFMpegValues *epp)
 
   return(l_rc);
 }  /* end gap_ffpar_set */
+
+
+
+
+/* ----------------------------------
+ * gap_ffpar_isValidPresetFile
+ * ----------------------------------
+ * check if specified filename is a preset file.
+ */
+gboolean
+gap_ffpar_isValidPresetFile(const char *fullPresetFilename)
+{
+
+  if (!g_file_test(fullPresetFilename, G_FILE_TEST_IS_DIR))
+  {
+    FILE        *l_fp;
+    char         buffer[500];
+    
+    /* load first few bytes into Buffer */
+    l_fp = g_fopen(fullPresetFilename, "rb");              /* open read */
+    if(l_fp != NULL)
+    {
+      fread(&buffer[0], 1, (size_t)sizeof(buffer -1), l_fp);
+      fclose(l_fp);
+      if(memcmp(&buffer[0], GAP_FFENC_FILEHEADER_LINE, strlen(GAP_FFENC_FILEHEADER_LINE -1)) == 0)
+      {
+        return(TRUE);
+      }
+    }
+  }
+  return(FALSE);
+
+}  /* end gap_ffpar_isValidPresetFile */
+
+
+/* ----------------------------------
+ * p_find_preset_by_filename
+ * ----------------------------------
+ */
+static GapGveFFMpegValues *
+p_find_preset_by_filename(const char *fullPresetFilename)
+{
+  GapGveFFMpegValues *epp;
+
+  for(epp = eppRoot; epp != NULL; epp = epp->next)
+  {
+    if(strncmp(&epp->presetFileName[0], fullPresetFilename, GAP_ENCODER_PRESET_NAME_MAX_LENGTH -1) == 0)
+    {
+      return(epp);
+    }
+  }
+  return (NULL);
+  
+}  /* end p_find_preset_by_filename */
+
+/* ----------------------------------
+ * gap_ffpar_getPresetList
+ * ----------------------------------
+ * returns a list of all available encoder presets.
+ * the list includes harcoded internal presets
+ * and optional presets files found in $GIMPDIR/video_encoder_presets
+ *
+ * presetId is generated as unique value > offset for hardcoded presets
+ * presetName derived from filename.
+ */
+GapGveFFMpegValues *
+gap_ffpar_getPresetList()
+{
+   GapGveFFMpegValues *epp;
+   GapGveFFMpegValues *eppPrev;
+   char          *presetDir;
+   GDir          *dirPtr;
+   const gchar   *entry;
+
+   eppPrev = eppRoot;
+   for(epp = eppRoot; epp != NULL; epp = epp->next)
+   {
+     eppPrev = epp;
+   }
+   
+   presetDir = g_build_filename(gimp_directory(), GAP_VIDEO_ENCODER_PRESET_DIR, NULL);
+   dirPtr = g_dir_open( presetDir, 0, NULL );
+   if(dirPtr != NULL)
+   {
+     while ( (entry = g_dir_read_name( dirPtr )) != NULL )
+     {
+       char          *fullPresetFilename;
+       
+       fullPresetFilename = g_build_filename(presetDir, entry, NULL);
+       
+       if(gap_debug)
+       {
+         printf("FILE:%s ENTRY:%s\n", fullPresetFilename, entry);
+       }
+       
+       if(gap_ffpar_isValidPresetFile(fullPresetFilename) == TRUE)
+       {
+         epp = p_find_preset_by_filename(fullPresetFilename);
+         if(epp != NULL)
+         {
+           /* read the parameters form preset file 
+            * to refresh values (that might have changed since last check)
+            * but keep presetId and presetName
+            */
+           gap_ffpar_get(fullPresetFilename, epp);
+         }
+         else
+         {
+           /* create a new entry in the static list of presets */
+           
+           epp = g_new(GapGveFFMpegValues ,1);
+          
+           /* read the parameters form preset file */
+           gap_ffpar_get(fullPresetFilename, epp);
+          
+           /* generate name and id (from filename and position in the list) */
+           epp->presetId = nextPresetId;
+           g_snprintf(&epp->presetName[0]
+                    , (GAP_ENCODER_PRESET_NAME_MAX_LENGTH -1)
+                    , "%d %s"
+                    , epp->presetId
+                    , entry
+                    );
+           g_snprintf(&epp->presetFileName[0]
+                    , (GAP_ENCODER_PRESET_FILENAME_MAX_LENGTH -1)
+                    , "%s"
+                    , fullPresetFilename
+                    );
+           epp->next = NULL;
+
+           if(gap_debug)
+           {
+            printf("PRESET:%s\n", &epp->presetName[0]);
+           }
+          
+           /* append epp to the list */
+           if(eppPrev == NULL)
+           {
+             eppRoot = epp;
+           }
+           else
+           {
+             eppPrev->next = epp;
+           }
+           eppPrev = epp;
+           nextPresetId++;
+         }  
+       }
+       g_free(fullPresetFilename);
+     }
+     g_dir_close( dirPtr );
+   }
+   g_free(presetDir);
+   
+   return(eppRoot);
+  
+}  /* end gap_ffpar_getPresetList */
diff --git a/vid_enc_ffmpeg/gap_enc_ffmpeg_par.h b/vid_enc_ffmpeg/gap_enc_ffmpeg_par.h
index 2caf6ac..a62fa76 100644
--- a/vid_enc_ffmpeg/gap_enc_ffmpeg_par.h
+++ b/vid_enc_ffmpeg/gap_enc_ffmpeg_par.h
@@ -30,8 +30,13 @@
 #include "libgimp/gimp.h"
 #include "gap_enc_ffmpeg_main.h"
 
+#define GAP_VIDEO_ENCODER_PRESET_DIR "video_encoder_presets"
+
 int   gap_ffpar_set(const char *filename, GapGveFFMpegValues *ffpar_ptr);
 void  gap_ffpar_get(const char *filename, GapGveFFMpegValues *ffpar_ptr);
 
+gboolean  gap_ffpar_isValidPresetFile(const char *fullPresetFilename);
+GapGveFFMpegValues * gap_ffpar_getPresetList();
+
 
 #endif



[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]