[mousetrap/gnome3-wip] BUG 691881: Removed outdated cv code and replaced with CV2 code.
- From: Heidi Ellis <heidiellis src gnome org>
- To: commits-list gnome org
- Cc:
- Subject: [mousetrap/gnome3-wip] BUG 691881: Removed outdated cv code and replaced with CV2 code.
- Date: Fri, 31 Jan 2014 18:27:49 +0000 (UTC)
commit eb4e482c553c9fd13ec57fab924c7c64304339be
Author: Logan Hotchkiss <lhotchkiss17 gmail com>
Date: Wed Jan 22 19:05:18 2014 -0500
BUG 691881: Removed outdated cv code and replaced with CV2 code.
src/mousetrap/app/ui/main.py | 23 +-----
src/mousetrap/ocvfw/_ocv.py | 143 +++++++++++++---------------------
src/mousetrap/ocvfw/dev/camera.py | 64 ++++++++--------
src/mousetrap/ocvfw/idm/forehead.py | 25 +-----
4 files changed, 94 insertions(+), 161 deletions(-)
---
diff --git a/src/mousetrap/app/ui/main.py b/src/mousetrap/app/ui/main.py
index 9852245..a082383 100644
--- a/src/mousetrap/app/ui/main.py
+++ b/src/mousetrap/app/ui/main.py
@@ -33,6 +33,7 @@ import settings_gui
import mousetrap.app.debug as debug
import mousetrap.app.environment as env
from mousetrap.app.addons import cpu
+import numpy
class MainGui( Gtk.Window ):
'''
@@ -87,8 +88,6 @@ class MainGui( Gtk.Window ):
self.set_title("MouseTrap")
self.connect("destroy", self.close)
- # Why does this appear twice?? (line below)
- self.setWindowsIcon()
self.vbox = Gtk.Grid()
@@ -118,29 +117,13 @@ class MainGui( Gtk.Window ):
self.cap_expander = Gtk.Expander.new_with_mnemonic("_Camera Image")
self.cap_expander.add(self.cap_image)
self.cap_expander.set_expanded(True)
- #expander.connect('notify::expanded', self.expaned_cb)
self.vbox.attach_next_to(self.cap_expander, self.addonBox, Gtk.PositionType.BOTTOM, 1, 1)
if self.cfg.getboolean("gui", "showPointMapper"):
self.map_expander = Gtk.Expander.new_with_mnemonic("_Script Mapper")
self.map_expander.add(self.script)
self.map_expander.set_expanded(True)
- #expander.connect('notify::expanded', self.expaned_cb)
- self.vbox.attach_next_to(self.map_expander, self.addonBox, Gtk.PositionType.BOTTOM, 1, 1)
-
-# self.hbox = Gtk.Grid()
-#
-# flipButton = Gtk.Button( _("Flip Image") )
-# flipButton.connect("clicked", self.recalcPoint, "flip" )
-# hbox.add(flipButton)
-#
-# recalcButton = Gtk.Button( _("Recalc Point") )
-# recalcButton.connect("clicked", self.recalcPoint )
-# hbox.add(recalcButton)
-#
-# self.vbox.add(self.hbox, Gtk.PositionType.BOTTOM)
-#
-# self.buttonsBox.show_all()
+ self.vbox.attach_next_to(self.map_expander, self.cap_expander, Gtk.PositionType.BOTTOM, 1, 1)
self.statusbar = Gtk.Statusbar()
self.statusbar_id = self.statusbar.get_context_id("statusbar")
@@ -172,7 +155,7 @@ class MainGui( Gtk.Window ):
- self: The main object pointer.
- img: The IPLimage object.
'''
- if not cap.image():
+ if not numpy.any(cap.image()):
return False
#sets new pixbuf
diff --git a/src/mousetrap/ocvfw/_ocv.py b/src/mousetrap/ocvfw/_ocv.py
index 064187a..d4319f2 100644
--- a/src/mousetrap/ocvfw/_ocv.py
+++ b/src/mousetrap/ocvfw/_ocv.py
@@ -77,9 +77,7 @@ class OcvfwBase:
Creates a new image
"""
- #if type(size) == "<type 'tuple'>":
- #size = co.cv.cvSize( size[0], size[1])
- return co.cv.CreateImage( (size[0], size[1]), num, ch)# was size'
+ return numpy.zeros((size[0],size[1],ch),num)
def set_camera_idx(self, idx):
"""
@@ -99,7 +97,7 @@ class OcvfwBase:
- self: The main object pointer.
- num: An int value.
"""
- return co.cv.WaitKey(num)
+ return cv2.waitKey(num)
def start_camera(self, params = None):
"""
@@ -108,7 +106,7 @@ class OcvfwBase:
Arguments:
- params: A list with the capture properties. NOTE: Not implemented yet.
"""
- self.capture = cv.CaptureFromCAM(self.idx )
+ self.capture = cv2.VideoCapture(self.idx)
debug.debug( "ocvfw", "start_camera: Camera Started" )
@@ -123,29 +121,18 @@ class OcvfwBase:
Returns The image even if it was stored in self.img
"""
- frame = cv.QueryFrame( self.capture )
+ ret,frame = self.capture.read()
- #Test to make sure camera starts properly
- #cv.ShowImage("webcam", frame)
-
-
- if not self.img:
- self.storage = co.cv.CreateMemStorage(0)
- self.imgSize = co.cv.GetSize (frame)
- self.img = co.cv.CreateImage ( self.imgSize, 8, 3 )
- #self.img.origin = frame.origin
- self.grey = co.cv.CreateImage ( self.imgSize, 8, 1 )
- self.yCrCb = co.cv.CreateImage ( self.imgSize, 8, 3 )
- self.prevGrey = co.cv.CreateImage ( self.imgSize, 8, 1 )
- self.pyramid = co.cv.CreateImage ( self.imgSize, 8, 1 )
- self.prevPyramid = co.cv.CreateImage ( self.imgSize, 8, 1 )
- #a = co.cv.Round(self.img.width/self.imageScale)
- #b = co.cv.Round(self.img.height/self.imageScale)
- #c = (a, b)
- self.small_img = co.cv.CreateImage(
- ( co.cv.Round(self.img.width/self.imageScale),
- co.cv.Round(self.img.height/self.imageScale) ),
- 8, 3 )
+ if not numpy.any(self.img):
+ self.imgSize = frame.shape
+ self.img = numpy.zeros((self.imgSize[0], self.imgSize[1], 3), numpy.uint8)
+ self.grey = numpy.zeros((self.imgSize[0], self.imgSize[1], 1), numpy.uint8)
+ self.yCrCb = numpy.zeros((self.imgSize[0], self.imgSize[1], 3), numpy.uint8)
+ self.prevGrey = numpy.zeros((self.imgSize[0], self.imgSize[1], 1), numpy.uint8)
+ self.pyramid = numpy.zeros((self.imgSize[0], self.imgSize[1], 1), numpy.uint8)
+ self.prevPyramid = numpy.zeros((self.imgSize[0], self.imgSize[1], 1), numpy.uint8)
+ self.small_img = numpy.zeros(((self.imgSize[0]/self.imageScale),
+ (self.imgSize[1]/self.imageScale),3 ),numpy.uint8)
self.img = frame
@@ -161,27 +148,25 @@ class OcvfwBase:
- point: A co.cv.Point Point.
"""
- #Point = co.cv.Point( point.x, point.y )
-
- self.img_lkpoints["current"] = numpy.zeros((point.x, point.y), numpy.float32)
- self.img_lkpoints["current"] = cv.fromarray(self.img_lkpoints["current"])
+ cvPoint = (point.x, point.y)
- self.grey = numpy.asarray(self.grey[:,:]) #new
+ self.img_lkpoints["current"] = numpy.mat((point.x,point.y),numpy.float32)
+ self.grey = numpy.asarray(self.grey[:,:])
if numpy.all(self.img_lkpoints["current"]):
- #co.cv.FindCornerSubPix(
- cv2.cornerSubPix( # was cv.FindCornerSubPix
+ cv2.cornerSubPix(
self.grey,
self.img_lkpoints["current"],
(20, 20), (0,0),
(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03))
- point.set_opencv( point )
+
+ point.set_opencv( cvPoint )
self.img_lkpoints["points"].append(point)
setattr(point.parent, point.label, point)
if len(self.img_lkpoints["last"]) > 0:
- self.img_lkpoints["last"].append( self.img_lkpoints["current"][0] )
+ self.img_lkpoints["last"] = numpy.append(self.img_lkpoints["last"],
self.img_lkpoints["current"][0])
debug.debug( "ocvfw", "cmSetLKPoints: New LK Point Added" )
else:
@@ -207,30 +192,29 @@ class OcvfwBase:
- self: The main object pointer.
"""
- self.prevGrey = numpy.asarray(self.prevGrey[:,:])
- prevGrey = cv2.cvtColor(self.prevGrey, cv2.COLOR_BGR2GRAY)
-
self.grey = numpy.asarray(self.grey[:,:])
- grey = cv2.cvtColor(self.grey, cv2.COLOR_BGR2GRAY)
-
- # calculate the optical flow
- nextPts, status, err = cv2.calcOpticalFlowPyrLK (
- prevGrey, #prevImg
- grey, #nextImg
- self.prevPyramid, #prevPts
- self.pyramid, #nextPts
- None, #status
- (20, 20), #winSize
- 2, #maxLevel
- (cv2.TERM_CRITERIA_MAX_ITER|cv2.TERM_CRITERIA_EPS, 20, 0.03), #criteria
- cv2.OPTFLOW_USE_INITIAL_FLOW #flags
- )
- cv.ShowImage("test",self.grey)
+ self.img_lkpoints["last"] = numpy.asarray(self.img_lkpoints["last"])
+ self.img_lkpoints["current"] = numpy.asarray(self.img_lkpoints["current"])
+
+
+ # calculate the optical flow
+ optical_flow, status, err = cv2.calcOpticalFlowPyrLK (
+ self.prevGrey,
+ self.grey,
+ self.img_lkpoints["last"],
+ self.img_lkpoints["last"],
+ None, #status vector
+ None, #error vector
+ (20, 20), #winSize
+ 2, #maxLevel
+ (cv2.TERM_CRITERIA_MAX_ITER|cv2.TERM_CRITERIA_EPS, 20, 0.03), #criteria
+ cv2.OPTFLOW_USE_INITIAL_FLOW #flags
+ )
if isinstance(optical_flow[0], tuple):
self.img_lkpoints["current"], status = optical_flow[0]
else:
- self.img_lkpoints["current"], status = optical_flow
+ self.img_lkpoints["current"] = optical_flow
# initializations
@@ -239,19 +223,16 @@ class OcvfwBase:
for point in self.img_lkpoints["current"]:
- if not status[counter]:
- continue
-
# this point is a correct point
current = self.img_lkpoints["points"][counter]
- current.set_opencv(co.cv.Point(int(point.x), int(point.y)))
+ current.set_opencv((int(point.item(0)),int(point.item(1))))
new_points.append( point )
setattr(current.parent, current.label, current)
# draw the current point
- current.parent.draw_point(point.x, point.y)
+ current.parent.draw_point((point.item(0), point.item(1)))
# increment the counter
counter += 1
@@ -330,7 +311,7 @@ class OcvfwPython(OcvfwBase):
OcvfwBase.__init__(self)
- def add_message(self, message, font=co.cv.CV_FONT_HERSHEY_COMPLEX, poss=None):
+ def add_message(self, message, font=cv2.FONT_HERSHEY_COMPLEX, poss=None):
"""
Write a message into the image.
@@ -341,12 +322,11 @@ class OcvfwPython(OcvfwBase):
- poss: The position of the message in the image. NOTE: Not enabled yet.
"""
- font = co.cv.InitFont ( font, 1, 1, 0.0, 1, co.cv.CV_AA)
- textSize, ymin = co.cv.GetTextSize (message, font)
+ textSize, ymin = cv2.getTextSize (message, font, 1,1)
pt1 = (( self.img.width - textSize.width ) / 2 , 20 )
- co.cv.PutText (self.img, message, pt1, font, co.cv.Scalar (255, 0, 0))
+ cv2.putText (self.img, message, pt1, font, 1, (255,0,0), 1, cv2.CV_AA)
- def get_haar_points(self, haarCascade, method=co.cv.CV_HAAR_DO_CANNY_PRUNING):
+ def get_haar_points(self, haarCascade, method=1):
"""
Search for points matching the haarcascade selected.
@@ -358,27 +338,24 @@ class OcvfwPython(OcvfwBase):
Returns a list with the matches.
"""
- cascade = co.cv.Load( haarCascade) #, self.imgSize )
+ cascade = cv2.CascadeClassifier(haarCascade)
if not cascade:
debug.exception( "ocvfw", "The Haar Classifier Cascade load failed" )
- co.cv.Resize( self.img, self.small_img, co.cv.CV_INTER_LINEAR )
-
- #co.cv.ClearMemStorage( self.storage )
+ self.small_img =
cv2.resize(self.img,(self.small_img.shape[0],self.small_img.shape[1]),self.small_img,0,0,cv2.INTER_LINEAR)
- points = co.cv.HaarDetectObjects( self.small_img, cascade, self.storage, 1.2, 2, method, (20, 20) )
+ points = cascade.detectMultiScale(self.small_img,1.2,2,method,(20,20))
- if points:
- matches = [ [ ( int(r[0][0]*self.imageScale), int(r[0][1]*self.imageScale)), \
- ( int((r[0][0]+r[0][3])*self.imageScale), int((r[0][0]+r[0][2])*self.imageScale) )] \
+ if numpy.any(points):
+ matches = [ [ ( int(r[0]*self.imageScale), int(r[1]*self.imageScale)), \
+ ( int((r[0]+r[3])*self.imageScale), int((r[0]+r[2])*self.imageScale) )] \
for r in points]
debug.debug( "ocvfw", "cmGetHaarPoints: detected some matches" )
- debug.debug("ocvfw-getHaarPoints", matches)
return matches
- def get_haar_roi_points(self, haarCascade, rect, origSize=(0, 0), method=co.cv.CV_HAAR_DO_CANNY_PRUNING):
+ def get_haar_roi_points(self, haarCascade, rect, origSize=(0, 0), method=1):
"""
Search for points matching the haarcascade selected.
@@ -389,25 +366,20 @@ class OcvfwPython(OcvfwBase):
Returns a list with the matches.
"""
- cascade = co.cv.Load( haarCascade ) #, self.imgSize )
+ cascade = cv2.CascadeClassifier(haarCascade)
if not cascade:
debug.exception( "ocvfw", "The Haar Classifier Cascade load failed" )
- debug.debug( "ocvfw-get_haar_roi_points", self.img)
-
#FIXME: Work around to fix when the rect is too big
if (rect[0]+rect[2]) > self.img.width:
rect = (rect[0], rect[1], self.img.width-rect[0],self.img.height-rect[1])
if (rect[1]+rect[3]) > self.img.height:
rect = (rect[0], rect[1], self.img.width-rect[0],self.img.height-rect[1])
- debug.debug("before GetSubRect - rect",rect)
- debug.debug("before GetSubRect - self.img", self.img)
- imageROI = co.cv.GetSubRect(self.img, rect)
+ imageROI = self.img[rect[1]:rect[3], rect[0]:rect[2]]
if cascade:
- points = co.cv.HaarDetectObjects( imageROI, cascade, self.storage,
- 1.2, 2, method, (20,20) )
+ points = cascade.detectMultiScale(imageROI,1.2,2,method,(20,20))
else:
debug.exception( "ocvfw", "The Haar Classifier Cascade load Failed (ROI)" )
@@ -415,13 +387,8 @@ class OcvfwPython(OcvfwBase):
matches = [ [ ( int(r[0][0]*origSize[0]), int(r[0][1]*origSize[1])), \
( int((r[0][0]+r[0][3])+origSize[0]), int((r[0][1]+r[0][2])*origSize[1]) )] \
for r in points]
- #matches = [ [ ( int(r[0][0]), int(r[0][1])), \
- # ( int((r[0][0]+r[0][3])), int((r[0][1]+r[0][2])) )] \
- # for r in points]
- #FIXME: I don't think the matches are right
debug.debug( "ocvfw", "cmGetHaarROIPoints: detected some matches" )
- debug.debug("ocvfw-getHaarROIPoints", matches)
return matches
diff --git a/src/mousetrap/ocvfw/dev/camera.py b/src/mousetrap/ocvfw/dev/camera.py
index 9224061..ffb0a72 100644
--- a/src/mousetrap/ocvfw/dev/camera.py
+++ b/src/mousetrap/ocvfw/dev/camera.py
@@ -36,6 +36,8 @@ from .. import debug
from .. import commons as co
from mousetrap.ocvfw import _ocv as ocv
from gi.repository import GObject
+import numpy
+import cv2
Camera = None
@@ -111,12 +113,11 @@ class Capture(object):
- self: The main object pointer.
"""
Camera.query_image()
- #cv.ShowImage("webcam", self.img)
- if not self.__image:
- self.__images_cn = { 1 : co.cv.CreateImage ( Camera.imgSize, 8, 1 ),
- 3 : co.cv.CreateImage ( Camera.imgSize, 8, 3 ),
- 4 : co.cv.CreateImage ( Camera.imgSize, 8, 4 ) }
+ if not numpy.any(self.__image):
+ self.__images_cn = { 1 : numpy.zeros((Camera.imgSize[0], Camera.imgSize[1], 1), numpy.uint8),
+ 3 : numpy.zeros((Camera.imgSize[0], Camera.imgSize[1], 3), numpy.uint8),
+ 4 : numpy.zeros((Camera.imgSize[0], Camera.imgSize[1], 4), numpy.uint8)}
self.__color = "bgr"
self.__image_orig = self.__image = Camera.img
@@ -132,7 +133,8 @@ class Capture(object):
Camera.swap_lkpoints()
self.show_rectangles(self.rectangles())
- self.draw_point(self.points())
+ if(self.points()):
+ self.draw_point(self.points()[0].orig)
return self.async
@@ -167,8 +169,8 @@ class Capture(object):
if self.__image is None:
return False
- tmp = co.cv.CreateImage( ( width, height ), 8, self.__ch )
- co.cv.Resize( self.__image, tmp, co.cv.CV_INTER_AREA )
+ tmp = numpy.zeros((width, height, self.__ch), numpy.uint8)
+ tmp = cv2.resize(self.__image,(width,height),tmp,0,0,cv2.INTER_LINEAR)
if not copy:
self.__image = tmp
@@ -186,18 +188,18 @@ class Capture(object):
img = self.__image
if "as_numpy_array" in dir(img):
- buff = GdkPixbuf.Pixbuf.new_from_array(img.as_numpy_array(),
+ buff = GdkPixbuf.new_from_array(img.as_numpy_array(),
GdkPixbuf.Colorspace.RGB,
img.depth)
else:
buff = GdkPixbuf.Pixbuf.new_from_data(img.tostring(),
GdkPixbuf.Colorspace.RGB,
False, # has alpha channel
- img.depth,
- img.width,
- img.height,
- img.width*img.nChannels, # stride or widthStep
- None, None) #Bug workaround for
memory management
+ 8, #depth
+ img.shape[1], #width
+ img.shape[0], #height
+ img.shape[1]*img.shape[2],
+ None, None)
return buff
def points(self):
@@ -228,11 +230,11 @@ class Capture(object):
#debug.debug("Camera", "Showing existing rectangles -> %d" % len(rectangles))
for rect in rectangles:
- co.cv.Rectangle( self.__image, (rect.x, rect.y), (rect.size[0], rect.size[1]),
co.cv.CV_RGB(255,0,0), 3, 8, 0 )
+ cv2.rectangle( self.__image,(rect.x, rect.y), (rect.size[0], rect.size[1]),(255, 0, 0),3,8,0)
def draw_point(self, points):
- for point in points:
- co.cv.Circle(self.__image, (point.x,point.y), 3, co.cv.Scalar(0, 255, 0, 0), 3, 8, 0)
+ #for point in points:
+ cv2.circle(self.__image,(int(points[0]),int(points[1])),3,(0, 255, 0),3,8,0)
def original(self):
"""
@@ -273,10 +275,10 @@ class Capture(object):
"""
if "hor" or "both" in flip:
- co.cv.Flip( self.__image, self.__image, 1)
+ self.__image = cv2.flip(self.__image, 1)
if "ver" or "both" in flip:
- co.cv.Flip( self.__image, self.__image, 0)
+ self.__image = cv2.flip(self.__image, 0)
return self.__image
@@ -295,7 +297,7 @@ class Capture(object):
if new_color:
tmp = self.__images_cn[channel]
- co.cv.CvtColor( self.__image, tmp, self.__color_int['cv_%s2%s' % (self.__color, new_color) ])
+ tmp = cv2.cvtColor(self.__image, self.__color_int['cv_%s2%s' % (self.__color, new_color) ])
self.__color = new_color
self.__ch = channel
@@ -329,7 +331,8 @@ class Capture(object):
warn("The Capture is locked, no changes can be done", RuntimeWarning)
return False
- if not hasattr(self, graphic.label):
+ #FIXME: Change this to show only 1 or many rectangles
+ if True:
setattr(self, graphic.label, graphic)
self.__graphics[graphic.type].append(graphic)
@@ -376,13 +379,8 @@ class Capture(object):
if roi is None:
return Camera.get_haar_points(haar_csd)
- #FIXME:This should not be hard coded
- #roi = (250, 120, 390, 360)
roi = (roi["start"], roi["end"], roi["width"], roi["height"]) #get_haar_roi_points needs a list
- #roi = co.cv.Rectangle(self.__image, (roi[0], roi[1]), (roi[2], roi[3]), (0,0,255))
- # was roi["start"], roi["end"]), (roi["width"], roi["height"]
- #roi pt1 and pt2 needs to be a vertex and added color
- #might need to remove and reestablish point values
+
return Camera.get_haar_roi_points(haar_csd, roi, orig)
def message(self, message):
@@ -485,19 +483,19 @@ class Point(Graphic):
"""
# Update the current attrs
- self.x = opencv.x
- self.y = opencv.y
+ self.x = opencv[0]
+ self.y = opencv[1]
if self.__ocv is not None:
# Update the last attr
self.last = self.__ocv
# Update the diff attr
- self.rel_diff = ( self.last.x - self.x,
- self.last.y - self.y )
+ self.rel_diff = ( self.last[0] - self.x,
+ self.last[1] - self.y )
- self.abs_diff = ( self.x - self.orig.x,
- self.y - self.orig.y )
+ self.abs_diff = ( self.x - self.orig[0],
+ self.y - self.orig[1] )
self.__ocv = opencv
diff --git a/src/mousetrap/ocvfw/idm/forehead.py b/src/mousetrap/ocvfw/idm/forehead.py
index 04f1c1f..97c1d3e 100644
--- a/src/mousetrap/ocvfw/idm/forehead.py
+++ b/src/mousetrap/ocvfw/idm/forehead.py
@@ -125,9 +125,6 @@ class Module(object):
if not hasattr(self.cap, "forehead"):
self.get_forehead()
- #self.get_forehead()
-
- #return self.cap.resize(200, 160, True)
return self.cap
def get_pointer(self):
@@ -139,7 +136,6 @@ class Module(object):
"""
if hasattr(self.cap, "forehead"):
- #debug.debug("Forehead Point", self.cap.forehead)
return self.cap.forehead
def get_forehead(self):
@@ -148,39 +144,28 @@ class Module(object):
face = self.cap.get_area(commons.haar_cds['Face'])
if face:
- debug.debug("face", face)
areas = [ (pt[1][0] - pt[0][0])*(pt[1][1] - pt[0][1]) for pt in face ] #replaced x with [0]
and y with [1]
startF = face[areas.index(max(areas))][0]
- #startF = face[0][0]
endF = face[areas.index(max(areas))][1]
- #endF = face[0][1]
# Shows the face rectangle
self.cap.add( Graphic("rect", "Face", ( startF[0], startF[1] ), (endF[0], endF[1]),
parent=self.cap) )
- eyes = self.cap.get_area(
- commons.haar_cds['Eyes'],
- {"start" : startF[0], "end" : startF[1], "width" : endF[0] - startF[0],"height" : endF[1] -
startF[1]},
- (startF[0], startF[1]) ) # replaced x and y
- debug.debug("eyes - get_area", eyes)
+ eyes = self.cap.get_area(commons.haar_cds['Eyes'])
if eyes:
- areas = [ (pt[1][0] - pt[0][0])*(pt[1][1] - pt[0][1]) for pt in eyes ] #replaced x with [0] and
y with [1]
+ areas = [ (pt[1][0] - pt[0][0])*(pt[1][1] - pt[0][1]) for pt in eyes ]
point1, point2 = eyes[areas.index(max(areas))][0], eyes[areas.index(max(areas))][1]
point1, point2 = eyes[0][0], eyes[0][1]
- debug.debug("eyes", point1)
# Shows the eyes rectangle
#self.cap.add(Graphic("rect", "Eyes", ( point1[0], point1[1] ), (point2[0], point2[1]),
parent=self.cap))
- X, Y = ( (point1[0] + point2[0]) / 2 ), ( point1[1] + ( (point1[1] + point2[1]) / 2 ) ) / 2
#replaced x and y
- self.cap.forehead = (X,Y)
-
- self.cap.forehead = (((startF[0] + endF[0])/2),((startF[1] + endF[1])/2))
- self.cap.add( Point("point", "forehead-point", self.cap.forehead, parent=self.cap, follow=True) )
- debug.debug("forehead point", self.cap.forehead)
+ #FIXME: Not correct
+ X, Y = (((startF[0] + endF[0])/2),((startF[1] + endF[1])/3))
+ self.cap.add( Point("point", "forehead", (X,Y), parent=self.cap, follow=True) )
return True
self.foreheadOrig = None
[
Date Prev][
Date Next] [
Thread Prev][
Thread Next]
[
Thread Index]
[
Date Index]
[
Author Index]