[mousetrap] Added Backend support



commit 3c1635e31ba22ba9e19b85ec90afa3b213dc2f62
Author: Flavio Percoco Premoli <flaper87 gmail com>
Date:   Sun Jul 12 21:37:30 2009 +0200

    Added Backend support

 .gitignore                 |    1 +
 src/mousetrap/mousetrap.py |    2 +-
 src/mousetrap/ui/main.py   |   10 +-
 src/ocvfw/_ocv.py          |  364 +++++++++++++++++++++++++++-----------------
 src/ocvfw/commons.py       |   17 ++
 src/ocvfw/dev/camera.py    |  140 ++++++++++-------
 src/ocvfw/idm/eyes.py      |   11 +-
 src/ocvfw/idm/forehead.py  |   12 +-
 8 files changed, 342 insertions(+), 215 deletions(-)
---
diff --git a/.gitignore b/.gitignore
index a1d6b6e..ab2c86f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,3 +7,4 @@ compile
 *.patch
 *~
 *.swp
+*.dropbox
diff --git a/src/mousetrap/mousetrap.py b/src/mousetrap/mousetrap.py
index 8671f4a..8ecf10a 100644
--- a/src/mousetrap/mousetrap.py
+++ b/src/mousetrap/mousetrap.py
@@ -114,7 +114,7 @@ class Controller():
         Arguments:
         - self: The main object pointer.
         """
-        self.itf.update_frame(self.idm.get_image(), self.idm.get_pointer())
+        self.itf.update_frame(self.idm.get_capture(), self.idm.get_pointer())
         return True
 
     def update_pointers(self):
diff --git a/src/mousetrap/ui/main.py b/src/mousetrap/ui/main.py
index e0ae56f..52151ff 100644
--- a/src/mousetrap/ui/main.py
+++ b/src/mousetrap/ui/main.py
@@ -170,7 +170,7 @@ class MainGui( gtk.Window ):
         debug.debug("ui.main", "Addons loaded")
 
 
-    def update_frame(self, img, point):
+    def update_frame(self, cap, point):
         """
         Updates the image
 
@@ -179,15 +179,11 @@ class MainGui( gtk.Window ):
         - img: The IPLimage object.
         """
 
-        if not img:
+        if not cap.image():
             return False
 
-        #self.script.update_items(point)
-        buff = gtk.gdk.pixbuf_new_from_data( img.imageData, gtk.gdk.COLORSPACE_RGB, False, 8, \
-                                             int(img.width), int(img.height), img.widthStep )
-
         #sets new pixbuf
-        self.cap_image.set_from_pixbuf(buff)
+        self.cap_image.set_from_pixbuf(cap.to_gtk_buff().scale_simple(200, 160, gtk.gdk.INTERP_BILINEAR))
 
 #     def recalcPoint( self, widget, flip = ''):
 #         """
diff --git a/src/ocvfw/_ocv.py b/src/ocvfw/_ocv.py
index d4ddbfa..d836f8b 100644
--- a/src/ocvfw/_ocv.py
+++ b/src/ocvfw/_ocv.py
@@ -18,7 +18,6 @@
 # You should have received a copy of the GNU General Public License
 # along with Ocvfw.  If not, see <http://www.gnu.org/licenses/>>.
 
-
 """Little  Framework for OpenCV Library."""
 
 __id__        = "$Id$"
@@ -29,21 +28,10 @@ __license__   = "GPLv2"
 
 import time
 import debug
+import commons as co
 
-
-try:
-    from opencv import cv
-    from opencv import highgui
-except:
-    print "This modules depends of opencv libraries"
-
-
-class Ocvfw:
-    """
-    This Class controlls the main camera functions.
-    It works as a little framework for OpenCV.
-    """
-
+class OcvfwBase:
+    
     def __init__( self ):
         """
         Initialize the module and set its main variables.
@@ -54,108 +42,125 @@ class Ocvfw:
         self.img_lkpoints = { "current" : [],
                               "last"    : [],
                               "points"  : [] }
+
+        self.__lk_swap = False
         self.imageScale   = 1.5
 
-    def add_message(self, message, font=cv.CV_FONT_HERSHEY_COMPLEX, poss=None):
+    def set(self, key, value):
         """
-        Write a message into the image.
-
-        Arguments:
-        - self: The main object pointer.
-        - message: A string with the message.
-        - font: An OpenCV font to use.
-        - poss: The position of the message in the image. NOTE: Not enabled yet.
         """
-
-        font = cv.cvInitFont ( font, 1, 1, 0.0, 1, cv.CV_AA)
-        textSize, ymin = cv.cvGetTextSize (message, font)
-        pt1 = cv.cvPoint ( ( self.img.width - textSize.width ) / 2 , 20 )
-        cv.cvPutText (self.img, message, pt1, font, cv.cvScalar (255, 0, 0))
-
-    def get_haar_points(self, haarCascade, method=cv.CV_HAAR_DO_CANNY_PRUNING):
+        if hasattr(self, "%s" % key):
+            getattr(self, "%s" % key)(value)
+            debug.debug("OcvfwBase", "Changed %s value to %s" % (key, value))
+            return True
+        
+        debug.debug("OcvfwBase", "%s not found" % (key))
+        return False
+
+    def lk_swap(self, set=None):
         """
-        Search for points matching the haarcascade selected.
+        Enables/Disable the lk points swapping action.
 
         Arguments:
         - self: The main object pointer.
-        - haarCascade: The selected cascade.
-        - methode: The search method to use. DEFAULT: cv.CV_HAAR_DO_CANNY_PRUNING.
-
-        Returns a list with the matches.
+        - set: The new value. If None returns the current state.
         """
+        
+        if set is None:
+            return self.__lk_swap
+        
+        self.__lk_swap = set
 
-        cascade = cv.cvLoadHaarClassifierCascade( haarCascade, self.imgSize )
-
-        if not cascade:
-            debug.exception( "ocvfw", "The Haar Classifier Cascade load failed" )
-
-        cv.cvResize( self.img, self.small_img, cv.CV_INTER_LINEAR )
+    def new_image(self, size, num, ch):
+        """
+        Creates a new image 
+        """
 
-        cv.cvClearMemStorage( self.storage )
+        if type(size) == "<type 'tuple'>":
+            size = co.cv.cvSize( size[0], size[1])
 
-        points = cv.cvHaarDetectObjects( self.small_img, cascade, self.storage, 1.2, 2, method, cv.cvSize(20, 20) )
+        return co.cv.cvCreateImage( size, num, ch)
 
-        if points:
-            matches = [ [ cv.cvPoint( int(r.x*self.imageScale), int(r.y*self.imageScale)), \
-                          cv.cvPoint( int((r.x+r.width)*self.imageScale), int((r.y+r.height)*self.imageScale) )] \
-                          for r in points]
-            debug.debug( "ocvfw", "cmGetHaarPoints: detected some matches" )
-            return matches
-
-    def get_haar_roi_points(self, haarCascade, rect, origSize=(0, 0), method=cv.CV_HAAR_DO_CANNY_PRUNING):
+    def set_camera_idx(self, idx):
         """
-        Search for points matching the haarcascade selected.
+        Changes the camera device index.
 
         Arguments:
         - self: The main object pointer.
-        - haarCascade: The selected cascade.
-        - methode: The search method to use. DEFAULT: cv.CV_HAAR_DO_CANNY_PRUNING.
-
-        Returns a list with the matches.
+        - idx: The camera index. For Example: 0 for /dev/video0
         """
+        self.idx = idx
 
-        cascade = cv.cvLoadHaarClassifierCascade( haarCascade, self.imgSize )
+    def wait_key(self, num):
+        """
+        Simple call to the co.hg.cvWaitKey function, which has to be called periodically.
 
-        if not cascade:
-            debug.exception( "ocvfw", "The Haar Classifier Cascade load failed" )
+        Arguments:
+        - self: The main object pointer.
+        - num: An int value.
+        """
+        return co.hg.cvWaitKey(num)
+    
+    def start_camera(self, params = None):
+        """
+        Starts the camera capture using co.hg.
 
-        cv.cvClearMemStorage(self.storage)
+        Arguments:
+        - params: A list with the capture properties. NOTE: Not implemented yet.
+        """
+        self.capture = co.hg.cvCreateCameraCapture( int(self.idx) )
+        debug.debug( "ocvfw", "cmStartCamera: Camera Started" )
+    
+    def query_image(self, bgr=False, flip=False):
+        """
+        Queries the new frame.
 
-        imageROI = cv.cvGetSubRect(self.img, rect)
+        Arguments:
+        - self: The main object pointer.
+        - bgr: If True. The image will be converted from RGB to BGR.
 
-        if cascade:
-            points = cv.cvHaarDetectObjects( imageROI, cascade, self.storage,
-                                    1.2, 2, method, cv.cvSize(20,20) )
-        else:
-            debug.exception( "ocvfw", "The Haar Classifier Cascade load Failed (ROI)" )
+        Returns The image even if it was stored in self.img
+        """
 
-        if points:
-            matches = [ [ cv.cvPoint( int(r.x+origSize[0]), int(r.y+origSize[1])), \
-                          cv.cvPoint( int(r.x+r.width+origSize[0]), int(r.y+r.height+origSize[1] ))] \
-                          for r in points]
+        frame = co.hg.cvQueryFrame( self.capture )
 
-            debug.debug( "ocvfw", "cmGetHaarROIPoints: detected some matches" )
-            return matches
+        if not  self.img:
+            self.storage        = co.cv.cvCreateMemStorage(0)
+            self.imgSize        = co.cv.cvGetSize (frame)
+            self.img            = co.cv.cvCreateImage ( self.imgSize, 8, 3 )
+            #self.img.origin     = frame.origin
+            self.grey           = co.cv.cvCreateImage ( self.imgSize, 8, 1 )
+            self.yCrCb          = co.cv.cvCreateImage ( self.imgSize, 8, 3 )
+            self.prevGrey       = co.cv.cvCreateImage ( self.imgSize, 8, 1 )
+            self.pyramid        = co.cv.cvCreateImage ( self.imgSize, 8, 1 )
+            self.prevPyramid    = co.cv.cvCreateImage ( self.imgSize, 8, 1 )
+            self.small_img       = co.cv.cvCreateImage( co.cv.cvSize( co.cv.cvRound ( self.imgSize.width/self.imageScale),
+                                    co.cv.cvRound ( self.imgSize.height/self.imageScale) ), 8, 3 )
+        self.img = frame
+        co.cv.cvCvtColor(self.img, self.grey, co.cv.CV_BGR2GRAY)
 
+        self.wait_key(10)
+        return True
+    
     def set_lkpoint(self, point):
         """
         Set a point to follow it using the L. Kallman method.
 
         Arguments:
         - self: The main object pointer.
-        - point: A cv.cvPoint Point.
+        - point: A co.cv.cvPoint Point.
         """
 
-        cvPoint = cv.cvPoint( point.x, point.y )
+        cvPoint = co.cv.cvPoint( point.x, point.y )
 
-        self.img_lkpoints["current"] = [ cv.cvPointTo32f ( cvPoint ) ]
+        self.img_lkpoints["current"] = [ co.cv.cvPointTo32f ( cvPoint ) ]
 
         if self.img_lkpoints["current"]:
-            cv.cvFindCornerSubPix (
+            co.cv.cvFindCornerSubPix (
                 self.grey,
                 self.img_lkpoints["current"],
-                cv.cvSize (20, 20), cv.cvSize (-1, -1),
-                cv.cvTermCriteria (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03))
+                co.cv.cvSize (20, 20), co.cv.cvSize (-1, -1),
+                co.cv.cvTermCriteria (co.cv.CV_TERMCRIT_ITER | co.cv.CV_TERMCRIT_EPS, 20, 0.03))
 
             point.set_opencv( cvPoint )
             self.img_lkpoints["points"].append(point)
@@ -190,11 +195,11 @@ class Ocvfw:
         """
 
         # calculate the optical flow
-        self.img_lkpoints["current"], status = cv.cvCalcOpticalFlowPyrLK (
+        self.img_lkpoints["current"], status = co.cv.cvCalcOpticalFlowPyrLK (
             self.prevGrey, self.grey, self.prevPyramid, self.pyramid,
             self.img_lkpoints["last"], len( self.img_lkpoints["last"] ),
-            cv.cvSize (20, 20), 3, len( self.img_lkpoints["last"] ), None,
-            cv.cvTermCriteria (cv.CV_TERMCRIT_ITER|cv.CV_TERMCRIT_EPS, 20, 0.03), 0)[0]
+            co.cv.cvSize (20, 20), 3, len( self.img_lkpoints["last"] ), None,
+            co.cv.cvTermCriteria (co.cv.CV_TERMCRIT_ITER|co.cv.CV_TERMCRIT_EPS, 20, 0.03), 0)[0]
 
         # initializations
         counter = 0
@@ -207,7 +212,7 @@ class Ocvfw:
 
             # this point is a correct point
             current = self.img_lkpoints["points"][counter]
-            current.set_opencv(cv.cvPoint(int(point.x), int(point.y)))
+            current.set_opencv(co.cv.cvPoint(int(point.x), int(point.y)))
 
             new_points.append( point )
 
@@ -225,15 +230,6 @@ class Ocvfw:
         # set back the self.imgPoints we keep
         self.img_lkpoints["current"] = new_points
 
-    def wait_key(self, num):
-        """
-        Simple call to the highgui.cvWaitKey function, which has to be called periodically.
-
-        Arguments:
-        - self: The main object pointer.
-        - num: An int value.
-        """
-        return highgui.cvWaitKey(num)
 
     def swap_lkpoints(self):
         """
@@ -250,46 +246,140 @@ class Ocvfw:
         self.img_lkpoints["last"], self.img_lkpoints["current"] = \
                                    self.img_lkpoints["current"], self.img_lkpoints["last"]
 
-    def start_camera(self, idx, params = None):
+
+class OcvfwCtypes(OcvfwBase):
+    """
+    This Class controlls the main camera functions.
+    It works as a little framework for Openco.cv.
+
+    This Backend uses ctypes opencv python bindings.
+    """
+    
+
+    def __init__(self):
+        """
+        Initialize the module and set its main variables.
+        """
+        co.cv = __import__("ctypesopencv.cv",
+                        globals(),
+                        locals(),
+                        [''])
+        
+        co.hg = __import__("ctypesopencv.highgui",
+                        globals(),
+                        locals(),
+                        [''])
+ 
+        OcvfwBase.__init__(self)
+
+
+class OcvfwPython(OcvfwBase):
+    """
+    This Class controlls the main camera functions.
+    It works as a little framework for Openco.cv.
+
+    This Backend uses normal opencv python bindings.
+    """
+
+    co.cv = __import__("opencv.cv",
+                        globals(),
+                        locals(),
+                        [''])
+        
+    co.hg = __import__("opencv.highgui",
+                        globals(),
+                        locals(),
+                        [''])
+
+    def __init__( self ):
         """
-        Starts the camera capture using highgui.
+        Initialize the module and set its main variables.
+        """
+
+        OcvfwBase.__init__(self)
+
+    def add_message(self, message, font=co.cv.CV_FONT_HERSHEY_COMPLEX, poss=None):
+        """
+        Write a message into the image.
 
         Arguments:
-        - params: A list with the capture properties. NOTE: Not implemented yet.
+        - self: The main object pointer.
+        - message: A string with the message.
+        - font: An OpenCV font to use.
+        - poss: The position of the message in the image. NOTE: Not enabled yet.
         """
-        self.capture = highgui.cvCreateCameraCapture( int(idx) )
-        debug.debug( "ocvfw", "cmStartCamera: Camera Started" )
 
-    def query_image(self, bgr=False, flip=False):
+        font = co.cv.cvInitFont ( font, 1, 1, 0.0, 1, co.cv.CV_AA)
+        textSize, ymin = co.cv.cvGetTextSize (message, font)
+        pt1 = co.cv.cvPoint ( ( self.img.width - textSize.width ) / 2 , 20 )
+        co.cv.cvPutText (self.img, message, pt1, font, co.cv.cvScalar (255, 0, 0))
+
+    def get_haar_points(self, haarCascade, method=co.cv.CV_HAAR_DO_CANNY_PRUNING):
         """
-        Queries the new frame.
+        Search for points matching the haarcascade selected.
 
         Arguments:
         - self: The main object pointer.
-        - bgr: If True. The image will be converted from RGB to BGR.
+        - haarCascade: The selected cascade.
+        - methode: The search method to use. DEFAULT: co.cv.CV_HAAR_DO_CANNY_PRUNING.
 
-        Returns The image even if it was stored in self.img
+        Returns a list with the matches.
         """
 
-        frame = highgui.cvQueryFrame( self.capture )
+        cascade = co.cv.cvLoadHaarClassifierCascade( haarCascade, self.imgSize )
+
+        if not cascade:
+            debug.exception( "ocvfw", "The Haar Classifier Cascade load failed" )
+
+        co.cv.cvResize( self.img, self.small_img, co.cv.CV_INTER_LINEAR )
+
+        co.cv.cvClearMemStorage( self.storage )
+
+        points = co.cv.cvHaarDetectObjects( self.small_img, cascade, self.storage, 1.2, 2, method, co.cv.cvSize(20, 20) )
+
+        if points:
+            matches = [ [ co.cv.cvPoint( int(r.x*self.imageScale), int(r.y*self.imageScale)), \
+                          co.cv.cvPoint( int((r.x+r.width)*self.imageScale), int((r.y+r.height)*self.imageScale) )] \
+                          for r in points]
+            debug.debug( "ocvfw", "cmGetHaarPoints: detected some matches" )
+            return matches
+
+    def get_haar_roi_points(self, haarCascade, rect, origSize=(0, 0), method=co.cv.CV_HAAR_DO_CANNY_PRUNING):
+        """
+        Search for points matching the haarcascade selected.
+
+        Arguments:
+        - self: The main object pointer.
+        - haarCascade: The selected cascade.
+        - methode: The search method to use. DEFAULT: co.cv.CV_HAAR_DO_CANNY_PRUNING.
+
+        Returns a list with the matches.
+        """
+
+        cascade = co.cv.cvLoadHaarClassifierCascade( haarCascade, self.imgSize )
+
+        if not cascade:
+            debug.exception( "ocvfw", "The Haar Classifier Cascade load failed" )
+
+        co.cv.cvClearMemStorage(self.storage)
+
+        imageROI = co.cv.cvGetSubRect(self.img, rect)
+
+        if cascade:
+            points = co.cv.cvHaarDetectObjects( imageROI, cascade, self.storage,
+                                    1.2, 2, method, co.cv.cvSize(20,20) )
+        else:
+            debug.exception( "ocvfw", "The Haar Classifier Cascade load Failed (ROI)" )
+
+        if points:
+            matches = [ [ co.cv.cvPoint( int(r.x+origSize[0]), int(r.y+origSize[1])), \
+                          co.cv.cvPoint( int(r.x+r.width+origSize[0]), int(r.y+r.height+origSize[1] ))] \
+                          for r in points]
+
+            debug.debug( "ocvfw", "cmGetHaarROIPoints: detected some matches" )
+            return matches
 
-        if not  self.img:
-            self.storage        = cv.cvCreateMemStorage(0)
-            self.imgSize        = cv.cvGetSize (frame)
-            self.img            = cv.cvCreateImage ( self.imgSize, 8, 3 )
-            #self.img.origin     = frame.origin
-            self.grey           = cv.cvCreateImage ( self.imgSize, 8, 1 )
-            self.yCrCb          = cv.cvCreateImage ( self.imgSize, 8, 3 )
-            self.prevGrey       = cv.cvCreateImage ( self.imgSize, 8, 1 )
-            self.pyramid        = cv.cvCreateImage ( self.imgSize, 8, 1 )
-            self.prevPyramid    = cv.cvCreateImage ( self.imgSize, 8, 1 )
-            self.small_img       = cv.cvCreateImage( cv.cvSize( cv.cvRound ( self.imgSize.width/self.imageScale),
-                                    cv.cvRound ( self.imgSize.height/self.imageScale) ), 8, 3 )
-        self.img = frame
-        cv.cvCvtColor(self.img, self.grey, cv.CV_BGR2GRAY)
 
-        self.wait_key(10)
-        return True
 
 
     ##########################################
@@ -316,8 +406,8 @@ class Ocvfw:
         timestamp = time.clock()/1.0
 
         if imgRoi:
-            img     = cv.cvGetSubRect( self.img, imgRoi )
-            imgSize = cv.cvSize( imgRoi.width, imgRoi.height )
+            img     = co.cv.cvGetSubRect( self.img, imgRoi )
+            imgSize = co.cv.cvSize( imgRoi.width, imgRoi.height )
             self.imgRoi = img
         else:
             img     = self.img
@@ -330,21 +420,21 @@ class Ocvfw:
             self.mhiD       = 1
             self.maxTD      = 0.5
             self.minTD      = 0.05
-            self.mask       = cv.cvCreateImage( imgSize,  8, 1 )
-            self.mhi        = cv.cvCreateImage( imgSize, 32, 1 )
-            self.orient     = cv.cvCreateImage( imgSize, 32, 1 )
-            self.segmask    = cv.cvCreateImage( imgSize, 32, 1 )
+            self.mask       = co.cv.cvCreateImage( imgSize,  8, 1 )
+            self.mhi        = co.cv.cvCreateImage( imgSize, 32, 1 )
+            self.orient     = co.cv.cvCreateImage( imgSize, 32, 1 )
+            self.segmask    = co.cv.cvCreateImage( imgSize, 32, 1 )
 
-            cv.cvZero( self.mhi )
+            co.cv.cvZero( self.mhi )
 
             for i in range( n_ ):
-                self.buf[i] = cv.cvCreateImage( imgSize, 8, 1 )
-                cv.cvZero( self.buf[i] )
+                self.buf[i] = co.cv.cvCreateImage( imgSize, 8, 1 )
+                co.cv.cvZero( self.buf[i] )
 
         idx1 = self.lastFm
 
         # convert frame to grayscale
-        cv.cvCvtColor( img, self.buf[self.lastFm], cv.CV_BGR2GRAY )
+        co.cv.cvCvtColor( img, self.buf[self.lastFm], co.cv.CV_BGR2GRAY )
 
         # index of (self.lastFm - (n_-1))th frame
         idx2 = ( self.lastFm + 1 ) % n_
@@ -353,21 +443,21 @@ class Ocvfw:
         silh = self.buf[idx2]
 
         # Get difference between frames
-        cv.cvAbsDiff( self.buf[idx1], self.buf[idx2], silh )
+        co.cv.cvAbsDiff( self.buf[idx1], self.buf[idx2], silh )
 
         # Threshold it
-        cv.cvThreshold( silh, silh, 30, 1, cv.CV_THRESH_BINARY )
+        co.cv.cvThreshold( silh, silh, 30, 1, co.cv.CV_THRESH_BINARY )
 
         # Update MHI
-        cv.cvUpdateMotionHistory( silh, self.mhi, timestamp, self.mhiD )
+        co.cv.cvUpdateMotionHistory( silh, self.mhi, timestamp, self.mhiD )
 
-        cv.cvCvtScale( self.mhi, self.mask, 255./self.mhiD, (self.mhiD - timestamp)*255./self.mhiD )
+        co.cv.cvCvtScale( self.mhi, self.mask, 255./self.mhiD, (self.mhiD - timestamp)*255./self.mhiD )
 
-        cv.cvCalcMotionGradient( self.mhi, self.mask, self.orient, self.maxTD, self.minTD, 3 )
+        co.cv.cvCalcMotionGradient( self.mhi, self.mask, self.orient, self.maxTD, self.minTD, 3 )
 
-        cv.cvClearMemStorage( self.storage )
+        co.cv.cvClearMemStorage( self.storage )
 
-        seq = cv.cvSegmentMotion( self.mhi, self.segmask, self.storage, timestamp, self.maxTD )
+        seq = co.cv.cvSegmentMotion( self.mhi, self.segmask, self.storage, timestamp, self.maxTD )
 
         for i in range(0, seq.total):
             if i < 0:  # case of the whole image
@@ -380,10 +470,10 @@ class Ocvfw:
                 if( mRect.width + mRect.height < 30 ):
                     continue
 
-            center = cv.cvPoint( (mRect.x + mRect.width/2), (mRect.y + mRect.height/2) )
+            center = co.cv.cvPoint( (mRect.x + mRect.width/2), (mRect.y + mRect.height/2) )
 
-            silhRoi = cv.cvGetSubRect(silh, mRect)
-            count = cv.cvNorm( silhRoi, None, cv.CV_L1, None )
+            silhRoi = co.cv.cvGetSubRect(silh, mRect)
+            count = co.cv.cvNorm( silhRoi, None, co.cv.CV_L1, None )
 
              # calculate number of points within silhouette ROI
             if( count < mRect.width * mRect.height * 0.05 ):
diff --git a/src/ocvfw/commons.py b/src/ocvfw/commons.py
index 7a6b5d7..159d6de 100644
--- a/src/ocvfw/commons.py
+++ b/src/ocvfw/commons.py
@@ -27,6 +27,7 @@ __copyright__ = "Copyright (c) 2008 Flavio Percoco Premoli"
 __license__   = "GPLv2"
 
 import os
+import debug
 
 abs_path = os.path.abspath(os.path.dirname(__file__))
 
@@ -39,5 +40,21 @@ colors = { "gray" : { "ch" : 1 },
            "rgb"  : { "ch" : 3 },
            "bgr"  : { "ch" : 3 }}
 
+# CV common lib
+cv = None
+
+# Highgui common lib
+hg = None
+
 def get_ch(color):
     return colors[color]["ch"]
+
+def singleton(cls):
+    instances = {}
+    def getinstance():
+        if cls not in instances:
+            instances[cls] = cls()
+            debug.debug("Commons", "New Singleton Add (%s)" % cls)
+        return instances[cls]
+    return getinstance
+
diff --git a/src/ocvfw/dev/camera.py b/src/ocvfw/dev/camera.py
index eb7cdd1..0833118 100644
--- a/src/ocvfw/dev/camera.py
+++ b/src/ocvfw/dev/camera.py
@@ -28,49 +28,48 @@ __copyright__ = "Copyright (c) 2008 Flavio Percoco Premoli"
 __license__   = "GPLv2"
 
 import gobject
-
 from warnings import *
-from .. import debug, commons
-from opencv import cv
-from opencv import highgui as hg
-from .._ocv import Ocvfw as ocv
-
-
-class __Camera(ocv):
-
-    def init(self):
-        """
-        Initialize the camera.
-
-        Arguments:
-        - self: The main object pointer.
-        - idx: The camera device index.
-        - fps: The frames per second to be queried.
-        - async: Enable/Disable asynchronous image querying. Default: False
-        """
-        self.idx = 0
+from .. import debug
+from .. import commons as co
+from ocvfw import _ocv as ocv
 
-    def set_camera(self, idx):
-        self.idx = idx
+Camera = None
 
-    def start(self):
-        self.start_camera(self.idx)
+try:
+    import gtk
+except ImportError:
+    debug.info("Camera", "Gtk not imported")
 
+def _camera(backend):
+    if not hasattr(ocv, backend):
+        debug.warning("Camera", "Not such backend %s falling back to OcvfwPython" % backend)
+        backend = "OcvfwPython"
+    
+    bknd = getattr(ocv, backend)
 
-Camera = __Camera()
+    @co.singleton
+    class Camera(bknd):
+        def __init__(self):
+            bknd.__init__(self)
 
+    return Camera()
 
 class Capture(object):
 
-    def __init__(self, image=None, fps=100, async=False, idx=0):
+    def __init__(self, image=None, fps=100, async=False, idx=0, backend="OcvfwPython"):
+
+        global Camera
 
         self.__lock        = False
         self.__flip        = {}
         self.__color       = "bgr"
         self.__props       = { "color" : "rgb" }
-        self.__camera      = Camera
-        self.__camera.set_camera(idx)
-        self.__camera.start()
+        
+
+        Camera = _camera(backend)
+        Camera.set_camera_idx(idx)
+        Camera.start_camera()
+        debug.debug("Camera", "Loaded backend %s" % backend)
 
         self.__graphics    = { "rect"  : [],
                                "point" : []}
@@ -80,8 +79,8 @@ class Capture(object):
         self.__image_log   = []
         self.__image_orig  = None
 
-        color_vars         = [x for x in dir(cv) if '2' in x and str(getattr(cv, x)).isdigit()]
-        self.__color_int   = dict(zip([x.lower() for x in color_vars], [getattr(cv,x) for x in color_vars]))
+        color_vars         = [x for x in dir(co.cv) if '2' in x and str(getattr(co.cv, x)).isdigit()]
+        self.__color_int   = dict(zip([x.lower() for x in color_vars], [getattr(co.cv,x) for x in color_vars]))
 
         self.roi           = None
 
@@ -114,30 +113,35 @@ class Capture(object):
         - self: The main object pointer.
         """
 
-        self.__camera.query_image()
+        Camera.query_image()
 
         if not self.__image:
-            self.__images_cn   = { 1 : cv.cvCreateImage ( self.__camera.imgSize, 8, 1 ),
-                                   3 : cv.cvCreateImage ( self.__camera.imgSize, 8, 3 ),
-                                   4 : cv.cvCreateImage ( self.__camera.imgSize, 8, 4 ) }
+            self.__images_cn   = { 1 : co.cv.cvCreateImage ( Camera.imgSize, 8, 1 ),
+                                   3 : co.cv.cvCreateImage ( Camera.imgSize, 8, 3 ),
+                                   4 : co.cv.cvCreateImage ( Camera.imgSize, 8, 4 ) }
 
         self.__color       = "bgr"
-        self.__image_orig  = self.__image = self.__camera.img
+        self.__image_orig  = self.__image = Camera.img
 
         if self.__color != self.__color_set:
             self.__image = self.color(self.__color_set)
 
         # TODO: Workaround, I've to fix it
-        if len(self.__camera.img_lkpoints["last"]) > 0:
-            self.__camera.show_lkpoints()
+        if len(Camera.img_lkpoints["last"]) > 0:
+            Camera.show_lkpoints()
 
-        self.__camera.swap_lkpoints()
+        if Camera.lk_swap():
+            Camera.swap_lkpoints()
 
         self.show_rectangles(self.rectangles())
 
         return self.async
 
-    # property
+    def set_camera(self, key, value):
+      """
+      """
+      Camera.set(key, value)
+
     def image(self, new_img = None):
         """
         Returns the image ready to use
@@ -164,14 +168,34 @@ class Capture(object):
         if self.__image is None:
             return False
 
-        tmp = cv.cvCreateImage( cv.cvSize( width, height ), 8, self.__ch )
-        cv.cvResize( self.__image, tmp, cv.CV_INTER_AREA )
+        tmp = co.cv.cvCreateImage( co.cv.cvSize( width, height ), 8, self.__ch )
+        co.cv.cvResize( self.__image, tmp, co.cv.CV_INTER_AREA )
 
         if not copy:
             self.__image = tmp
 
         return tmp
 
+    def to_gtk_buff(self):
+        """
+        Converts image to gtkImage and returns it
+
+        Arguments:
+        - self: The main object pointer.
+        """
+
+        img = self.__image
+
+        if "as_numpy_array" in dir(img):
+            buff = gtk.gdk.pixbuf_new_from_array(img.as_numpy_array(), 
+                                                 gtk.gdk.COLORSPACE_RGB, 
+                                                 img.depth)
+        else:
+            buff = gtk.gdk.pixbuf_new_from_data(img.imageData, 
+                                                gtk.gdk.COLORSPACE_RGB, False, 8,
+                                                int(img.width), int(img.height), 
+                                                img.widthStep )
+        return buff
 
     def points(self):
         """
@@ -201,10 +225,10 @@ class Capture(object):
         #debug.debug("Camera", "Showing existing rectangles -> %d" % len(rectangles))
 
         for rect in rectangles:
-            cv.cvRectangle( self.__image, cv.cvPoint(rect.x, rect.y), cv.cvPoint(rect.size[0], rect.size[1]), cv.CV_RGB(255,0,0), 3, 8, 0 )
+            co.cv.cvRectangle( self.__image, co.cv.cvPoint(rect.x, rect.y), co.cv.cvPoint(rect.size[0], rect.size[1]), co.cv.CV_RGB(255,0,0), 3, 8, 0 )
 
     def draw_point(self, x, y):
-        cv.cvCircle(self.__image, [x,y], 3, cv.cvScalar(0, 255, 0, 0), -1, 8, 0)
+        co.cv.cvCircle(self.__image, [x,y], 3, co.cv.cvScalar(0, 255, 0, 0), -1, 8, 0)
 
     def original(self):
         """
@@ -230,9 +254,9 @@ class Capture(object):
         rect = args[0]
 
         if len(args) > 1:
-            rect = cv.cvRect( args[0], args[1], args[2], args[3] )
+            rect = co.cv.cvRect( args[0], args[1], args[2], args[3] )
 
-        return cv.cvGetSubRect(self.__image, rect)
+        return co.cv.cvGetSubRect(self.__image, rect)
 
 
     def flip(self, flip):
@@ -245,10 +269,10 @@ class Capture(object):
         """
 
         if "hor" or "both" in flip:
-            cv.cvFlip( self.__image, self.__image, 1)
+            co.cv.cvFlip( self.__image, self.__image, 1)
 
         if "ver" or "both" in flip:
-            cv.cvFlip( self.__image, self.__image, 0)
+            co.cv.cvFlip( self.__image, self.__image, 0)
 
         return self.__image
 
@@ -263,11 +287,11 @@ class Capture(object):
         returns self.color if color == None
         """
 
-        channel = channel if channel != None else commons.get_ch(new_color)
+        channel = channel if channel != None else co.get_ch(new_color)
 
         if new_color:
             tmp = self.__images_cn[channel]
-            cv.cvCvtColor( self.__image, tmp, self.__color_int['cv_%s2%s' % (self.__color, new_color) ])
+            co.cv.cvCvtColor( self.__image, tmp, self.__color_int['cv_%s2%s' % (self.__color, new_color) ])
             self.__color = new_color
             self.__ch = channel
 
@@ -284,7 +308,7 @@ class Capture(object):
         - self: The main object pointer.
         - properties: The properties to change.
         """
-        #self.__size     = size  if size  != None else self.__camera.imgSize
+        #self.__size     = size  if size  != None else Camera.imgSize
         self.__color_set = color if color.lower() != None else self.__color_set
         self.__flip      = flip  if flip  != None else self.__flip
 
@@ -306,7 +330,7 @@ class Capture(object):
             self.__graphics[graphic.type].append(graphic)
 
             if graphic.is_point():
-                self.__camera.set_lkpoint(graphic)
+                Camera.set_lkpoint(graphic)
         else:
             warn("The Graphic %s already exists. It wont be added" % graphic.label, RuntimeWarning)
             return False
@@ -346,10 +370,10 @@ class Capture(object):
         """
 
         if roi is None:
-            return self.__camera.get_haar_points(haar_csd)
+            return Camera.get_haar_points(haar_csd)
 
-        roi = cv.cvRect(roi["start"], roi["end"], roi["width"], roi["height"])
-        return self.__camera.get_haar_roi_points(haar_csd, roi, orig)
+        roi = co.cv.cvRect(roi["start"], roi["end"], roi["width"], roi["height"])
+        return Camera.get_haar_roi_points(haar_csd, roi, orig)
 
     def message(self, message):
         """
@@ -439,7 +463,7 @@ class Point(Graphic):
         self.__ocv = None
         self.last  = None
         self.diff  = None
-        self.orig  = cv.cvPoint( self.x, self.y )
+        self.orig  = co.cv.cvPoint( self.x, self.y )
 
     def set_opencv(self, opencv):
         """
@@ -459,10 +483,10 @@ class Point(Graphic):
             self.last = self.__ocv
 
             # Update the diff attr
-            self.rel_diff = cv.cvPoint( self.last.x - self.x,
+            self.rel_diff = co.cv.cvPoint( self.last.x - self.x,
                                         self.last.y - self.y )
 
-            self.abs_diff = cv.cvPoint( self.x - self.orig.x,
+            self.abs_diff = co.cv.cvPoint( self.x - self.orig.x,
                                         self.y - self.orig.y )
 
         self.__ocv = opencv
diff --git a/src/ocvfw/idm/eyes.py b/src/ocvfw/idm/eyes.py
index 6380164..48c4152 100644
--- a/src/ocvfw/idm/eyes.py
+++ b/src/ocvfw/idm/eyes.py
@@ -30,7 +30,7 @@ __license__   = "GPLv2"
 import pyvision as pv
 import ocvfw.debug as debug
 import ocvfw.commons as commons
-from ocvfw.dev.camera import Camera, Capture, Point
+from ocvfw.dev.camera import Capture, Point
 from pyvision.face.FilterEyeLocator import loadFilterEyeLocator as eye_locator
 
 from opencv import cv
@@ -57,8 +57,6 @@ class Module(object):
         """
         debug.debug("ocvfw.idm", "Starting %s idm" % a_name)
         
-        Camera.init()
-
         self.img          = None
         self.ctr          = controller
         self.cap          = None
@@ -107,14 +105,15 @@ class Module(object):
         
         debug.debug("ocvfw.idm", "Setting Capture")
         
-        self.cap = Capture(async=False, idx=cam)
+        self.cap = Capture(async=False, idx=cam, backend="OcvfwPython")
         self.cap.change(color="rgb")
+        self.cap.set_camera("lk_swap", True)
 
     def calc_motion(self):
         if not hasattr(self.cap, "forehead"):
             self.get_forehead()
 
-    def get_image(self):
+    def get_capture(self):
         """
         Sets the forehead point if needed and returns the formated image.
 
@@ -127,7 +126,7 @@ class Module(object):
         if not hasattr(self.cap, "leye") or not hasattr(self.cap, "reye"):
             self.get_eye()
             
-        return self.cap.resize(200, 160, True)
+        return self.cap
 
     def get_pointer(self):
         """
diff --git a/src/ocvfw/idm/forehead.py b/src/ocvfw/idm/forehead.py
index 9a8763c..1b2c425 100644
--- a/src/ocvfw/idm/forehead.py
+++ b/src/ocvfw/idm/forehead.py
@@ -29,7 +29,7 @@ __license__   = "GPLv2"
 
 import ocvfw.debug as debug
 import ocvfw.commons as commons
-from ocvfw.dev.camera import Camera, Capture, Point
+from ocvfw.dev.camera import Capture, Point
 
 a_name = "Forehead"
 a_description = "Forehead point tracker based on LK Algorithm"
@@ -52,8 +52,6 @@ class Module(object):
 
         debug.debug("ocvfw.idm", "Starting %s idm" % a_name)
         
-        Camera.init()
-
         self.ctr          = controller
         self.cap          = None
         self.stgs         = stgs
@@ -107,15 +105,16 @@ class Module(object):
         
         debug.debug("ocvfw.idm", "Setting Capture")
         
-        self.cap = Capture(async=True, idx=cam)
+        self.cap = Capture(async=True, idx=cam, backend="OcvfwPython")
         self.cap.change(color="rgb")
+        self.cap.set_camera("lk_swap", True)
 
 
     def calc_motion(self):
         if not hasattr(self.cap, "forehead"):
             self.get_forehead()
 
-    def get_image(self):
+    def get_capture(self):
         """
         Sets the forehead point if needed and returns the formated image.
 
@@ -128,7 +127,8 @@ class Module(object):
         if not hasattr(self.cap, "forehead"):
             self.get_forehead()
 
-        return self.cap.resize(200, 160, True)
+        #return self.cap.resize(200, 160, True)
+        return self.cap
 
     def get_pointer(self):
         """



[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]