[mousetrap/gnome3-wip] Get a window to display.



commit acdcff619e55e77962406990742cbef8268c465f
Author: Logan Hotchkiss <lhotchkiss17 gmail com>
Date:   Sun Feb 10 22:02:57 2013 -0500

    Get a window to display.
    
    * Changed the lines for opencv to cv in configure.in
    * Save test configs. Not wokring
    * Showing image in start image
    * Code to show frame
    * Now image show will go away
    * Added debug lines to track calls
    * Window opens and displays menu. No camera yet
    * Window pops up and shows camera, but image is distorted
    * Updating
    * Image color issue fixed
    * Hard coded rectangle size to prevent crash
    * Adjusted temp fix for GetSubRect to only hardcode size if it will produce an error
    * Add rectangle to track forehead.
    * Debug code added for forehead and eyes
    * Added dot to view forehead tracking point.

 .project                                    |   17 --
 .pydevproject                               |   10 -
 configure.in                                |    2 +-
 docs/Mousetrap_Call Diagram.pdf             |  Bin 0 -> 27507 bytes
 src/mousetrap/app/addons/cpu.py             |    4 +-
 src/mousetrap/app/main.py                   |   16 +-
 src/mousetrap/app/ui/widgets.py             |    8 +-
 src/mousetrap/ocvfw/_ocv.py                 |  201 +++++++++++---------
 src/mousetrap/ocvfw/backends/Makefile.am    |    9 -
 src/mousetrap/ocvfw/backends/OcvfwBase.py   |  275 ---------------------------
 src/mousetrap/ocvfw/backends/OcvfwCtypes.py |   61 ------
 src/mousetrap/ocvfw/backends/OcvfwPython.py |  242 -----------------------
 src/mousetrap/ocvfw/dev/camera.py           |   53 +++--
 src/mousetrap/ocvfw/idm/forehead.py         |   48 +++--
 14 files changed, 192 insertions(+), 754 deletions(-)
---
diff --git a/configure.in b/configure.in
index 21d295d..7d4cb61 100644
--- a/configure.in
+++ b/configure.in
@@ -56,7 +56,7 @@ AM_CHECK_PYMOD(gettext,,,[AC_MSG_ERROR(Could not find python module gettext)])
 AM_CHECK_PYMOD(getopt,,,[AC_MSG_ERROR(Could not find python module getopt)])
 AM_CHECK_PYMOD(Xlib,,,[AC_MSG_ERROR(Could not find python module Xlib)])
 AM_CHECK_PYMOD(pyatspi,,,[AC_MSG_ERROR(Could not find python module pyatspi)])
-AM_CHECK_PYMOD(opencv,,,[AC_MSG_ERROR(Could not find python module opencv)])
+AM_CHECK_PYMOD(cv,,,[AC_MSG_ERROR(Could not find python module cv)])
 
 AM_CONDITIONAL(WITH_LOUIS, test x$have_liblouis = "xyes")
 
diff --git a/docs/Mousetrap_Call Diagram.pdf b/docs/Mousetrap_Call Diagram.pdf
new file mode 100644
index 0000000..e54d9b6
Binary files /dev/null and b/docs/Mousetrap_Call Diagram.pdf differ
diff --git a/src/mousetrap/app/addons/cpu.py b/src/mousetrap/app/addons/cpu.py
index a73656f..ec634bc 100644
--- a/src/mousetrap/app/addons/cpu.py
+++ b/src/mousetrap/app/addons/cpu.py
@@ -19,7 +19,7 @@
 # along with mouseTrap.  If not, see <http://www.gnu.org/licenses/>.
 
 import os
-import gobject
+from gi.repository import GObject
 import mousetrap.app.debug as debug
 import mousetrap.app.environment as env
 
@@ -35,7 +35,7 @@ class Addon(AddonsBase):
     def __init__(self, controller):
         AddonsBase.__init__(self, controller)
         
-        gobject.timeout_add(1000, self.check_cpu)
+        GObject.timeout_add(1000, self.check_cpu)
         debug.debug("addon.cpu", "CPU addon started")
 
     def check_cpu(self):
diff --git a/src/mousetrap/app/main.py b/src/mousetrap/app/main.py
index 93d855b..5c320a4 100644
--- a/src/mousetrap/app/main.py
+++ b/src/mousetrap/app/main.py
@@ -37,9 +37,9 @@ import sys
 sys.argv[0] = "mousetrap"
 
 from gi.repository import GObject
-from . import debug
+import debug
 import getopt
-from . import environment as env
+import environment as env
 
 from mousetrap.ocvfw import pocv
 
@@ -65,7 +65,7 @@ class Controller():
         # We don't want to load the settings each time we need them. do we?
         self.cfg = None
 
-        self.loop = gobject.MainLoop()
+        self.loop = GObject.MainLoop()
         self.httpd = httpd.HttpdServer(20433)
         self.dbusd = dbusd.DbusServer()
 
@@ -77,7 +77,6 @@ class Controller():
         Arguments:
         - self: The main object pointer.
         """
-
         if self.cfg is None:
             conf_created, self.cfg = settings.load()
 
@@ -92,8 +91,9 @@ class Controller():
             self.idm = idm.Module(self)
             self.idm.set_capture(self.cfg.getint("cam", "inputDevIndex"))
 
-            gobject.timeout_add(150, self.update_frame)
-            gobject.timeout_add(50, self.update_pointers)
+            #Will return false when cap.image() is false in ui/main
+            GObject.timeout_add(150, self.update_frame)    #Thread that updates the image on the screen
+            GObject.timeout_add(50, self.update_pointers)   #Thread that moves the mouse
             
             debug.info("mousetrap", "Idm loaded and started")
 
@@ -108,7 +108,7 @@ class Controller():
             
         debug.info("mousetrap", "MouseTrap's Interface Built and Loaded")
 
-        gobject.threads_init()
+        GObject.threads_init()
         self.loop.run()
 
     def proc_args(self):
@@ -236,7 +236,7 @@ class Controller():
         - self: The main object pointer.
         """
         self.itf.update_frame(self.idm.get_capture(), self.idm.get_pointer())
-        return True
+        return True 
 
     def update_pointers(self):
         """
diff --git a/src/mousetrap/app/ui/widgets.py b/src/mousetrap/app/ui/widgets.py
index c277ffc..bdff051 100644
--- a/src/mousetrap/app/ui/widgets.py
+++ b/src/mousetrap/app/ui/widgets.py
@@ -64,7 +64,8 @@ class Mapper(Gtk.Widget):
         # Mapper.do_realize: Class 'style' has no 'fg_gc' member
 
         # First set an internal flag telling that we're realized
-        self.set_flags(self.flags() | Gtk.REALIZED)
+        #self.set_flags(self.flags() | Gtk.REALIZED)
+       self.set_realized(True)
 
         # Create a new gdk.Window which we can draw on.
         # Also say that we want to receive exposure events
@@ -75,11 +76,12 @@ class Mapper(Gtk.Widget):
                 width=self.allocation.width,
                 height=self.allocation.height,
                 window_type=Gdk.WINDOW_CHILD,
-                wclass=Gdk.INPUT_OUTPUT,
                 event_mask=self.get_events() | Gdk.EventMask.EXPOSURE_MASK
                         | Gdk.EventMask.BUTTON1_MOTION_MASK | Gdk.EventMask.BUTTON_PRESS_MASK
                         | Gdk.EventMask.POINTER_MOTION_MASK
-                        | Gdk.EventMask.POINTER_MOTION_HINT_MASK)
+                        | Gdk.EventMask.POINTER_MOTION_HINT_MASK,
+               wclass=Gdk.INPUT_OUTPUT
+       )
 
         # Associate the gdk.Window with ourselves, Gtk+ needs a reference
         # between the widget and the gdk window
diff --git a/src/mousetrap/ocvfw/_ocv.py b/src/mousetrap/ocvfw/_ocv.py
index bb3d46d..05e0274 100644
--- a/src/mousetrap/ocvfw/_ocv.py
+++ b/src/mousetrap/ocvfw/_ocv.py
@@ -1,6 +1,3 @@
-# -*- coding: utf-8 -*-
-
-# Ocvfw
 #
 # Copyright 2009 Flavio Percoco Premoli
 #
@@ -27,8 +24,8 @@ __copyright__ = "Copyright (c) 2008 Flavio Percoco Premoli"
 __license__   = "GPLv2"
 
 import time
-from . import debug
-from . import commons as co
+import debug
+import commons as co
 import cv2 #remove
 import cv2.cv as cv
 import numpy
@@ -55,10 +52,10 @@ class OcvfwBase:
         """
         if hasattr(self, "%s" % key):
             getattr(self, "%s" % key)(value)
-            debug.debug("OcvfwBase", "Changed %s value to %s" % (key, value))
+            debug.debug("_ocv - set", "Changed %s value to %s" % (key, value))
             return True
         
-        debug.debug("OcvfwBase", "%s not found" % (key))
+        debug.debug("_ocv - set", "%s not found" % (key))
         return False
 
     def lk_swap(self, set=None):
@@ -80,10 +77,9 @@ class OcvfwBase:
         Creates a new image 
         """
 
-        if type(size) == "<type 'tuple'>":
-            size = co.cv.cvSize( size[0], size[1])
-
-        return co.cv.cvCreateImage( size, num, ch)
+        #if type(size) == "<type 'tuple'>":
+            #size = co.cv.cvSize( size[0], size[1])
+        return co.cv.CreateImage( (size[0], size[1]), num, ch)# was size'
 
     def set_camera_idx(self, idx):
         """
@@ -97,23 +93,24 @@ class OcvfwBase:
 
     def wait_key(self, num):
         """
-        Simple call to the co.hg.cvWaitKey function, which has to be called periodically.
+        Simple call to the co.cv.WaitKey function, which has to be called periodically.
 
         Arguments:
         - self: The main object pointer.
         - num: An int value.
         """
-        return co.hg.cvWaitKey(num)
+        return co.cv.WaitKey(num)
     
     def start_camera(self, params = None):
         """
-        Starts the camera capture using co.hg.
+        Starts the camera capture
 
         Arguments:
         - params: A list with the capture properties. NOTE: Not implemented yet.
         """
-        self.capture = co.hg.cvCreateCameraCapture( int(self.idx) )
-        debug.debug( "ocvfw", "cmStartCamera: Camera Started" )
+        self.capture = cv.CaptureFromCAM(self.idx )    
+
+        debug.debug( "ocvfw", "start_camera: Camera Started" )
     
     def query_image(self, bgr=False, flip=False):
         """
@@ -126,22 +123,31 @@ class OcvfwBase:
         Returns The image even if it was stored in self.img
         """
 
-        frame = co.hg.cvQueryFrame( self.capture )
+        frame = cv.QueryFrame( self.capture )
+
+       #Test to make sure camera starts properly
+        #cv.ShowImage("webcam", frame)
+        
 
         if not  self.img:
-            self.storage        = co.cv.cvCreateMemStorage(0)
-            self.imgSize        = co.cv.cvGetSize (frame)
-            self.img            = co.cv.cvCreateImage ( self.imgSize, 8, 3 )
+            self.storage        = co.cv.CreateMemStorage(0)
+            self.imgSize        = co.cv.GetSize (frame)
+            self.img            = co.cv.CreateImage ( self.imgSize, 8, 3 )
             #self.img.origin     = frame.origin
-            self.grey           = co.cv.cvCreateImage ( self.imgSize, 8, 1 )
-            self.yCrCb          = co.cv.cvCreateImage ( self.imgSize, 8, 3 )
-            self.prevGrey       = co.cv.cvCreateImage ( self.imgSize, 8, 1 )
-            self.pyramid        = co.cv.cvCreateImage ( self.imgSize, 8, 1 )
-            self.prevPyramid    = co.cv.cvCreateImage ( self.imgSize, 8, 1 )
-            self.small_img       = co.cv.cvCreateImage( co.cv.cvSize( co.cv.cvRound ( 
self.imgSize.width/self.imageScale),
-                                    co.cv.cvRound ( self.imgSize.height/self.imageScale) ), 8, 3 )
+            self.grey           = co.cv.CreateImage ( self.imgSize, 8, 1 )
+            self.yCrCb          = co.cv.CreateImage ( self.imgSize, 8, 3 )
+            self.prevGrey       = co.cv.CreateImage ( self.imgSize, 8, 1 )
+            self.pyramid        = co.cv.CreateImage ( self.imgSize, 8, 1 )
+            self.prevPyramid    = co.cv.CreateImage ( self.imgSize, 8, 1 )
+            #a = co.cv.Round(self.img.width/self.imageScale)
+            #b = co.cv.Round(self.img.height/self.imageScale)
+            #c = (a, b)
+            self.small_img      = co.cv.CreateImage( 
+                               ( co.cv.Round(self.img.width/self.imageScale),
+                                 co.cv.Round(self.img.height/self.imageScale) ),
+                               8, 3 )
+
         self.img = frame
-        co.cv.cvCvtColor(self.img, self.grey, co.cv.CV_BGR2GRAY)
 
         self.wait_key(10)
         return True
@@ -152,21 +158,24 @@ class OcvfwBase:
 
         Arguments:
         - self: The main object pointer.
-        - point: A co.cv.cvPoint Point.
+        - point: A co.cv.Point Point.
         """
 
-        cvPoint = co.cv.cvPoint( point.x, point.y )
+        #Point = co.cv.Point( point.x, point.y )
 
-        self.img_lkpoints["current"] = [ co.cv.cvPointTo32f ( cvPoint ) ]
+       self.img_lkpoints["current"] = numpy.zeros((point.x, point.y), numpy.float32)
+        self.img_lkpoints["current"] = cv.fromarray(self.img_lkpoints["current"])
 
-        if self.img_lkpoints["current"]:
-            co.cv.cvFindCornerSubPix (
+        self.grey = numpy.asarray(self.grey[:,:])      #new
+
+        if numpy.all(self.img_lkpoints["current"]):
+            #co.cv.FindCornerSubPix(
+            cv2.cornerSubPix(                          # was cv.FindCornerSubPix
                 self.grey,
                 self.img_lkpoints["current"],
-                co.cv.cvSize (20, 20), co.cv.cvSize (-1, -1),
-                co.cv.cvTermCriteria (co.cv.CV_TERMCRIT_ITER | co.cv.CV_TERMCRIT_EPS, 20, 0.03))
-
-            point.set_opencv( cvPoint )
+                (20, 20), (0,0),
+                (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03))
+            point.set_opencv( point )
             self.img_lkpoints["points"].append(point)
 
             setattr(point.parent, point.label, point)
@@ -192,18 +201,31 @@ class OcvfwBase:
 
     def show_lkpoints(self):
         """
-        Callculate the optical flow of the set points and draw them in the image.
+        Calculate the optical flow of the set points and draw them in the image.
 
         Arguments:
         - self: The main object pointer.
         """
 
-        # calculate the optical flow
-        optical_flow = co.cv.cvCalcOpticalFlowPyrLK (
-            self.prevGrey, self.grey, self.prevPyramid, self.pyramid,
-            self.img_lkpoints["last"], len( self.img_lkpoints["last"] ),
-            co.cv.cvSize (20, 20), 3, len( self.img_lkpoints["last"] ), None,
-            co.cv.cvTermCriteria (co.cv.CV_TERMCRIT_ITER|co.cv.CV_TERMCRIT_EPS, 20, 0.03), 0)
+        self.prevGrey = numpy.asarray(self.prevGrey[:,:])
+        prevGrey = cv2.cvtColor(self.prevGrey, cv2.COLOR_BGR2GRAY)
+
+        self.grey = numpy.asarray(self.grey[:,:])
+        grey = cv2.cvtColor(self.grey, cv2.COLOR_BGR2GRAY)
+
+          # calculate the optical flow
+        nextPts, status, err = cv2.calcOpticalFlowPyrLK (
+            prevGrey, #prevImg
+            grey, #nextImg
+           self.prevPyramid, #prevPts
+           self.pyramid, #nextPts
+           None, #status
+           (20, 20), #winSize
+           2, #maxLevel
+                (cv2.TERM_CRITERIA_MAX_ITER|cv2.TERM_CRITERIA_EPS, 20, 0.03), #criteria
+           cv2.OPTFLOW_USE_INITIAL_FLOW #flags
+           )
+       cv.ShowImage("test",self.grey)
 
         if isinstance(optical_flow[0], tuple):
             self.img_lkpoints["current"], status = optical_flow[0]
@@ -222,7 +244,7 @@ class OcvfwBase:
 
             # this point is a correct point
             current = self.img_lkpoints["points"][counter]
-            current.set_opencv(co.cv.cvPoint(int(point.x), int(point.y)))
+            current.set_opencv(co.cv.Point(int(point.x), int(point.y)))
 
             new_points.append( point )
 
@@ -260,7 +282,7 @@ class OcvfwBase:
 class OcvfwCtypes(OcvfwBase):
     """
     This Class controlls the main camera functions.
-    It works as a little framework for Openco.cv.
+    It works as a little framework for Opencv.cv.
 
     This Backend uses ctypes opencv python bindings.
     """
@@ -270,15 +292,15 @@ class OcvfwCtypes(OcvfwBase):
         """
         Initialize the module and set its main variables.
         """
-        co.cv = __import__("ctypesopencv.cv",
+        co.cv = __import__("pyopencv.cv",
                         globals(),
                         locals(),
                         [''])
         
-        co.hg = __import__("ctypesopencv.highgui",
+        co.hg = __import__("pyopencv.cv",
                         globals(),
                         locals(),
-                        [''])
+                        [''])#should be removed
  
         OcvfwBase.__init__(self)
 
@@ -291,15 +313,15 @@ class OcvfwPython(OcvfwBase):
     This Backend uses normal opencv python bindings.
     """
 
-    co.cv = __import__("opencv.cv",
+    co.cv = __import__("cv",
                         globals(),
                         locals(),
                         [''])
         
-    co.hg = __import__("opencv.highgui",
+    co.hg = __import__("cv",
                         globals(),
                         locals(),
-                        [''])
+                        ['']) #should be removed
 
     def __init__( self ):
         """
@@ -319,10 +341,10 @@ class OcvfwPython(OcvfwBase):
         - poss: The position of the message in the image. NOTE: Not enabled yet.
         """
 
-        font = co.cv.cvInitFont ( font, 1, 1, 0.0, 1, co.cv.CV_AA)
-        textSize, ymin = co.cv.cvGetTextSize (message, font)
-        pt1 = co.cv.cvPoint ( ( self.img.width - textSize.width ) / 2 , 20 )
-        co.cv.cvPutText (self.img, message, pt1, font, co.cv.cvScalar (255, 0, 0))
+        font = co.cv.InitFont ( font, 1, 1, 0.0, 1, co.cv.CV_AA)
+        textSize, ymin = co.cv.GetTextSize (message, font)
+        pt1 = (( self.img.width - textSize.width ) / 2 , 20 )
+        co.cv.PutText (self.img, message, pt1, font, co.cv.Scalar (255, 0, 0))
 
     def get_haar_points(self, haarCascade, method=co.cv.CV_HAAR_DO_CANNY_PRUNING):
         """
@@ -336,22 +358,24 @@ class OcvfwPython(OcvfwBase):
         Returns a list with the matches.
         """
 
-        cascade = co.cv.cvLoadHaarClassifierCascade( haarCascade, self.imgSize )
+        cascade = co.cv.Load( haarCascade) #, self.imgSize )
 
         if not cascade:
             debug.exception( "ocvfw", "The Haar Classifier Cascade load failed" )
 
-        co.cv.cvResize( self.img, self.small_img, co.cv.CV_INTER_LINEAR )
-
-        co.cv.cvClearMemStorage( self.storage )
+        co.cv.Resize( self.img, self.small_img, co.cv.CV_INTER_LINEAR )
 
-        points = co.cv.cvHaarDetectObjects( self.small_img, cascade, self.storage, 1.2, 2, method, 
co.cv.cvSize(20, 20) )
+        #co.cv.ClearMemStorage( self.storage )
 
+        points = co.cv.HaarDetectObjects( self.small_img, cascade, self.storage, 1.2, 2, method, (20, 20) )
+       
         if points:
-            matches = [ [ co.cv.cvPoint( int(r.x*self.imageScale), int(r.y*self.imageScale)), \
-                          co.cv.cvPoint( int((r.x+r.width)*self.imageScale), 
int((r.y+r.height)*self.imageScale) )] \
-                          for r in points]
+            matches = [ [ ( int(r[0][0]*self.imageScale), int(r[0][1]*self.imageScale)), \
+                        ( int((r[0][0]+r[0][3])*self.imageScale), int((r[0][0]+r[0][2])*self.imageScale) )] \
+                        for r in points]
+           
             debug.debug( "ocvfw", "cmGetHaarPoints: detected some matches" )
+            debug.debug("ocvfw-getHaarPoints", matches)
             return matches
 
     def get_haar_roi_points(self, haarCascade, rect, origSize=(0, 0), method=co.cv.CV_HAAR_DO_CANNY_PRUNING):
@@ -365,39 +389,43 @@ class OcvfwPython(OcvfwBase):
 
         Returns a list with the matches.
         """
-
-        cascade = co.cv.cvLoadHaarClassifierCascade( haarCascade, self.imgSize )
-
+        cascade = co.cv.Load( haarCascade ) #, self.imgSize )
         if not cascade:
             debug.exception( "ocvfw", "The Haar Classifier Cascade load failed" )
 
         debug.debug( "ocvfw-get_haar_roi_points", self.img)
 
-        #remove, DNE co.cv.ClearMemStorage(self.storage)
-
-       if ((rect[0]+rect[2]) > self.img.width) or ((rect[1]+rect[3]) > self.img.height):
+        #FIXME: Work around to fix when the rect is too big
+       if (rect[0]+rect[2]) > self.img.width:
+               rect = (rect[0], rect[1], self.img.width-rect[0],self.img.height-rect[1])
+       if (rect[1]+rect[3]) > self.img.height:
                rect = (rect[0], rect[1], self.img.width-rect[0],self.img.height-rect[1])
-               debug.debug("GetSubRect", "Rect was too big. Fixed size")
 
-        imageROI = co.cv.cvGetSubRect(self.img, rect)
+        debug.debug("before GetSubRect - rect",rect)
+       debug.debug("before GetSubRect - self.img", self.img)
+        imageROI = co.cv.GetSubRect(self.img, rect)
 
         if cascade:
-            points = co.cv.cvHaarDetectObjects( imageROI, cascade, self.storage,
-                                    1.2, 2, method, co.cv.cvSize(20,20) )
+            points = co.cv.HaarDetectObjects( imageROI, cascade, self.storage,
+                                    1.2, 2, method, (20,20) )
         else:
             debug.exception( "ocvfw", "The Haar Classifier Cascade load Failed (ROI)" )
 
         if points:
-            matches = [ [ co.cv.cvPoint( int(r.x+origSize[0]), int(r.y+origSize[1])), \
-                          co.cv.cvPoint( int(r.x+r.width+origSize[0]), int(r.y+r.height+origSize[1] ))] \
+            matches = [ [ ( int(r[0][0]*origSize[0]), int(r[0][1]*origSize[1])), \
+                          ( int((r[0][0]+r[0][3])+origSize[0]), int((r[0][1]+r[0][2])*origSize[1]) )] \
                           for r in points]
+           #matches = [ [ ( int(r[0][0]), int(r[0][1])), \
+             #             ( int((r[0][0]+r[0][3])), int((r[0][1]+r[0][2])) )] \
+              #            for r in points]
+          #FIXME: I don't think the  matches are right
 
             debug.debug( "ocvfw", "cmGetHaarROIPoints: detected some matches" )
+           debug.debug("ocvfw-getHaarROIPoints", matches)
             return matches
 
 
 
-
     ##########################################
     #                                        #
     #          THIS IS NOT USED YET          #
@@ -422,7 +450,7 @@ class OcvfwPython(OcvfwBase):
         timestamp = time.clock()/1.0
 
         if imgRoi:
-            img     = co.cv.cvGetSubRect( self.img, imgRoi )
+            img     = co.cv.GetSubRect( self.img, imgRoi )
             imgSize = co.cv.cvSize( imgRoi.width, imgRoi.height )
             self.imgRoi = img
         else:
@@ -436,21 +464,21 @@ class OcvfwPython(OcvfwBase):
             self.mhiD       = 1
             self.maxTD      = 0.5
             self.minTD      = 0.05
-            self.mask       = co.cv.cvCreateImage( imgSize,  8, 1 )
-            self.mhi        = co.cv.cvCreateImage( imgSize, 32, 1 )
-            self.orient     = co.cv.cvCreateImage( imgSize, 32, 1 )
-            self.segmask    = co.cv.cvCreateImage( imgSize, 32, 1 )
+            self.mask       = co.cv.CreateImage( imgSize,  8, 1 )
+            self.mhi        = co.cv.CreateImage( imgSize, 32, 1 )
+            self.orient     = co.cv.CreateImage( imgSize, 32, 1 )
+            self.segmask    = co.cv.CreateImage( imgSize, 32, 1 )
 
-            co.cv.cvZero( self.mhi )
+            co.cv.SetZero( self.mhi )
 
             for i in range( n_ ):
-                self.buf[i] = co.cv.cvCreateImage( imgSize, 8, 1 )
+                self.buf[i] = co.cv.CreateImage( imgSize, 8, 1 )
                 co.cv.cvZero( self.buf[i] )
 
         idx1 = self.lastFm
 
         # convert frame to grayscale
-        co.cv.cvCvtColor( img, self.buf[self.lastFm], co.cv.CV_BGR2GRAY )
+        cv2.cvtColor( img, self.buf[self.lastFm], cv2.CV_BGR2GRAY )
 
         # index of (self.lastFm - (n_-1))th frame
         idx2 = ( self.lastFm + 1 ) % n_
@@ -486,7 +514,7 @@ class OcvfwPython(OcvfwBase):
                 if( mRect.width + mRect.height < 30 ):
                     continue
 
-            center = co.cv.cvPoint( (mRect.x + mRect.width/2), (mRect.y + mRect.height/2) )
+            center = co.cv.Point( (mRect.x + mRect.width/2), (mRect.y + mRect.height/2) )
 
             silhRoi = co.cv.cvGetSubRect(silh, mRect)
             count = co.cv.cvNorm( silhRoi, None, co.cv.CV_L1, None )
@@ -498,3 +526,4 @@ class OcvfwPython(OcvfwBase):
             mv.append(center)
 
         return mv
+
diff --git a/src/mousetrap/ocvfw/dev/camera.py b/src/mousetrap/ocvfw/dev/camera.py
index a3eb776..7839b06 100644
--- a/src/mousetrap/ocvfw/dev/camera.py
+++ b/src/mousetrap/ocvfw/dev/camera.py
@@ -35,6 +35,7 @@ from warnings import *
 from .. import debug
 from .. import commons as co
 from mousetrap.ocvfw import _ocv as ocv
+from gi.repository import GObject
 
 Camera = None
 
@@ -62,7 +63,6 @@ class Capture(object):
         self.__flip        = {}
         self.__color       = "bgr"
         self.__props       = { "color" : "rgb" }
-        
 
         Camera = _camera(backend)
         Camera.set_camera_idx(idx)
@@ -84,7 +84,7 @@ class Capture(object):
 
         self.last_update   = 0
         self.last_duration = 0
-
+            
         self.set_async(fps, async)
 
     def set_async(self, fps=100, async=False):
@@ -101,7 +101,7 @@ class Capture(object):
         self.async = async
 
         if self.async:
-            gobject.timeout_add(self.fps, self.sync)
+            GObject.timeout_add(self.fps, self.sync)
 
     def sync(self):
         """
@@ -110,13 +110,13 @@ class Capture(object):
         Arguments:
         - self: The main object pointer.
         """
-
         Camera.query_image()
-
+        #cv.ShowImage("webcam", self.img)
+       
         if not self.__image:
-            self.__images_cn   = { 1 : co.cv.cvCreateImage ( Camera.imgSize, 8, 1 ),
-                                   3 : co.cv.cvCreateImage ( Camera.imgSize, 8, 3 ),
-                                   4 : co.cv.cvCreateImage ( Camera.imgSize, 8, 4 ) }
+            self.__images_cn   = { 1 : co.cv.CreateImage ( Camera.imgSize, 8, 1 ),
+                                   3 : co.cv.CreateImage ( Camera.imgSize, 8, 3 ),
+                                   4 : co.cv.CreateImage ( Camera.imgSize, 8, 4 ) }
 
         self.__color       = "bgr"
         self.__image_orig  = self.__image = Camera.img
@@ -132,6 +132,7 @@ class Capture(object):
             Camera.swap_lkpoints()
 
         self.show_rectangles(self.rectangles())
+       self.draw_point(self.points())
 
         return self.async
 
@@ -166,8 +167,8 @@ class Capture(object):
         if self.__image is None:
             return False
 
-        tmp = co.cv.cvCreateImage( co.cv.cvSize( width, height ), 8, self.__ch )
-        co.cv.cvResize( self.__image, tmp, co.cv.CV_INTER_AREA )
+        tmp = co.cv.CreateImage( ( width, height ), 8, self.__ch )
+        co.cv.Resize( self.__image, tmp, co.cv.CV_INTER_AREA )
 
         if not copy:
             self.__image = tmp
@@ -227,10 +228,11 @@ class Capture(object):
         #debug.debug("Camera", "Showing existing rectangles -> %d" % len(rectangles))
 
         for rect in rectangles:
-            co.cv.cvRectangle( self.__image, co.cv.cvPoint(rect.x, rect.y), co.cv.cvPoint(rect.size[0], 
rect.size[1]), co.cv.CV_RGB(255,0,0), 3, 8, 0 )
+            co.cv.Rectangle( self.__image, (rect.x, rect.y), (rect.size[0], rect.size[1]), 
co.cv.CV_RGB(255,0,0), 3, 8, 0 )
 
-    def draw_point(self, x, y):
-        co.cv.cvCircle(self.__image, (x,y), 3, co.cv.cvScalar(0, 255, 0, 0), -1, 8, 0)
+    def draw_point(self, points):
+       for point in points:
+               co.cv.Circle(self.__image, (point.x,point.y), 3, co.cv.Scalar(0, 255, 0, 0), 3, 8, 0)
 
     def original(self):
         """
@@ -256,9 +258,9 @@ class Capture(object):
         rect = args[0]
 
         if len(args) > 1:
-            rect = co.cv.cvRect( args[0], args[1], args[2], args[3] )
+            rect = co.cv.Rectangle( args[0], args[1], args[2], args[3] )
 
-        return co.cv.cvGetSubRect(self.__image, rect)
+        return co.cv.GetSubRect(self.__image, rect)
 
 
     def flip(self, flip):
@@ -271,10 +273,10 @@ class Capture(object):
         """
 
         if "hor" or "both" in flip:
-            co.cv.cvFlip( self.__image, self.__image, 1)
+            co.cv.Flip( self.__image, self.__image, 1)
 
         if "ver" or "both" in flip:
-            co.cv.cvFlip( self.__image, self.__image, 0)
+            co.cv.Flip( self.__image, self.__image, 0)
 
         return self.__image
 
@@ -293,7 +295,7 @@ class Capture(object):
 
         if new_color:
             tmp = self.__images_cn[channel]
-            co.cv.cvCvtColor( self.__image, tmp, self.__color_int['cv_%s2%s' % (self.__color, new_color) ])
+            co.cv.CvtColor( self.__image, tmp, self.__color_int['cv_%s2%s' % (self.__color, new_color) ])
             self.__color = new_color
             self.__ch = channel
 
@@ -374,7 +376,13 @@ class Capture(object):
         if roi is None:
             return Camera.get_haar_points(haar_csd)
 
-        roi = co.cv.cvRect(roi["start"], roi["end"], roi["width"], roi["height"])
+       #FIXME:This should not be hard coded
+       #roi = (250, 120, 390, 360)
+        roi = (roi["start"], roi["end"], roi["width"], roi["height"]) #get_haar_roi_points needs a list
+        #roi = co.cv.Rectangle(self.__image, (roi[0], roi[1]), (roi[2], roi[3]), (0,0,255)) 
+                                               # was roi["start"], roi["end"]), (roi["width"], roi["height"]
+            #roi pt1 and pt2 needs to be a vertex and added color
+        #might need to remove and reestablish point values
         return Camera.get_haar_roi_points(haar_csd, roi, orig)
 
     def message(self, message):
@@ -465,7 +473,7 @@ class Point(Graphic):
         self.__ocv = None
         self.last  = None
         self.diff  = None
-        self.orig  = co.cv.cvPoint( self.x, self.y )
+        self.orig  = ( self.x, self.y )
 
     def set_opencv(self, opencv):
         """
@@ -485,10 +493,10 @@ class Point(Graphic):
             self.last = self.__ocv
 
             # Update the diff attr
-            self.rel_diff = co.cv.cvPoint( self.last.x - self.x,
+            self.rel_diff = ( self.last.x - self.x,
                                         self.last.y - self.y )
 
-            self.abs_diff = co.cv.cvPoint( self.x - self.orig.x,
+            self.abs_diff = ( self.x - self.orig.x,
                                         self.y - self.orig.y )
 
         self.__ocv = opencv
@@ -503,3 +511,4 @@ class Point(Graphic):
         - self: The main object pointer.
         """
         return self.__ocv
+
diff --git a/src/mousetrap/ocvfw/idm/forehead.py b/src/mousetrap/ocvfw/idm/forehead.py
index 62e9fd0..5bb617d 100644
--- a/src/mousetrap/ocvfw/idm/forehead.py
+++ b/src/mousetrap/ocvfw/idm/forehead.py
@@ -29,7 +29,7 @@ __license__   = "GPLv2"
 
 import mousetrap.ocvfw.debug as debug
 import mousetrap.ocvfw.commons as commons
-from mousetrap.ocvfw.dev.camera import Capture, Point
+from mousetrap.ocvfw.dev.camera import Capture, Point, Graphic
 
 a_name = "Forehead"
 a_description = "Forehead point tracker based on LK Algorithm"
@@ -104,12 +104,10 @@ class Module(object):
         """
         
         debug.debug("mousetrap.ocvfw.idm", "Setting Capture")
-        
         self.cap = Capture(async=True, idx=cam, backend="OcvfwPython")
         self.cap.change(color="rgb")
         self.cap.set_camera("lk_swap", True)
-
-
+       
     def calc_motion(self):
         if not hasattr(self.cap, "forehead"):
             self.get_forehead()
@@ -127,9 +125,11 @@ class Module(object):
         if not hasattr(self.cap, "forehead"):
             self.get_forehead()
 
+       #self.get_forehead()
+
         #return self.cap.resize(200, 160, True)
         return self.cap
-
+       
     def get_pointer(self):
         """
         Returns the new MousePosition
@@ -139,39 +139,51 @@ class Module(object):
         """
 
         if hasattr(self.cap, "forehead"):
+           #debug.debug("Forehead Point", self.cap.forehead)
             return self.cap.forehead
 
     def get_forehead(self):
         eyes = False
-        #self.cap.add_message("Getting Forehead!!!")
-
+        #self.cap.message("Getting Forehead!!!")
         face     = self.cap.get_area(commons.haar_cds['Face'])
 
         if face:
-            areas    = [ (pt[1].x - pt[0].x)*(pt[1].y - pt[0].y) for pt in face ]
+           debug.debug("face", face)
+
+            areas    = [ (pt[1][0] - pt[0][0])*(pt[1][1] - pt[0][1]) for pt in face ] #replaced x with [0] 
and y with [1]
             startF   = face[areas.index(max(areas))][0]
-            endF     = face[areas.index(max(areas))][1]
+           #startF = face[0][0]
+           endF     = face[areas.index(max(areas))][1]
+           #endF = face[0][1]
 
             # Shows the face rectangle
-            #self.cap.add( Graphic("rect", "Face", ( startF.x, startF.y ), (endF.x, endF.y), 
parent=self.cap) )
+            self.cap.add( Graphic("rect", "Face", ( startF[0], startF[1] ), (endF[0], endF[1]), 
parent=self.cap) )
 
-            eyes = self.cap.get_area( commons.haar_cds['Eyes'], {"start" : startF.x,
-                                                         "end" : startF.y,
-                                                         "width" : endF.x - startF.x,
-                                                         "height" : endF.y - startF.y}, (startF.x, startF.y) 
)
+            eyes = self.cap.get_area( 
+                commons.haar_cds['Eyes'],
+               {"start" : startF[0], "end" : startF[1], "width" : endF[0] - startF[0],"height" : endF[1] - 
startF[1]},
+                (startF[0], startF[1]) ) # replaced x and y
+           debug.debug("eyes - get_area", eyes)
 
         if eyes:
-            areas = [ (pt[1].x - pt[0].x)*(pt[1].y - pt[0].y) for pt in eyes ]
+            areas = [ (pt[1][0] - pt[0][0])*(pt[1][1] - pt[0][1]) for pt in eyes ] #replaced x with [0] and 
y with [1]
 
             point1, point2   = eyes[areas.index(max(areas))][0], eyes[areas.index(max(areas))][1]
+           point1, point2 = eyes[0][0], eyes[0][1]
+           debug.debug("eyes", point1)
 
             # Shows the eyes rectangle
-            #self.cap.add(Graphic("rect", "Face", ( point1.x, point1.y ), (point2.x, point2.y), 
parent=self.cap))
+            #self.cap.add(Graphic("rect", "Eyes", ( point1[0], point1[1] ), (point2[0], point2[1]), 
parent=self.cap))
 
-            X, Y = ( (point1.x + point2.x) / 2 ), ( point1.y + ( (point1.y + point2.y) / 2 ) ) / 2
-            self.cap.add( Point("point", "forehead", ( X, Y ), parent=self.cap, follow=True) )
+            X, Y = ( (point1[0] + point2[0]) / 2 ), ( point1[1] + ( (point1[1] + point2[1]) / 2 ) ) / 2 
#replaced x and y
+           self.cap.forehead = (X,Y)
+
+           self.cap.forehead = (((startF[0] + endF[0])/2),((startF[1] + endF[1])/2))
+            self.cap.add( Point("point", "forehead-point", self.cap.forehead, parent=self.cap, follow=True) )
+           debug.debug("forehead point", self.cap.forehead)
             return True
 
         self.foreheadOrig = None
 
         return False
+



[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]