|
|
import React, { useState, useEffect, useRef, useCallback } from 'react'; |
|
|
import * as vision from '@mediapipe/tasks-vision'; |
|
|
|
|
|
import { facePoke } from '@/lib/facePoke'; |
|
|
import { useMainStore } from './useMainStore'; |
|
|
import useThrottledCallback from 'beautiful-react-hooks/useThrottledCallback'; |
|
|
|
|
|
import { landmarkGroups, FACEMESH_LIPS, FACEMESH_LEFT_EYE, FACEMESH_LEFT_EYEBROW, FACEMESH_RIGHT_EYE, FACEMESH_RIGHT_EYEBROW, FACEMESH_FACE_OVAL } from './landmarks'; |
|
|
|
|
|
|
|
|
export type LandmarkGroup = 'lips' | 'leftEye' | 'leftEyebrow' | 'rightEye' | 'rightEyebrow' | 'faceOval' | 'background'; |
|
|
export type LandmarkCenter = { x: number; y: number; z: number }; |
|
|
export type ClosestLandmark = { group: LandmarkGroup; distance: number; vector: { x: number; y: number; z: number } }; |
|
|
|
|
|
export type MediaPipeResources = { |
|
|
faceLandmarker: vision.FaceLandmarker | null; |
|
|
drawingUtils: vision.DrawingUtils | null; |
|
|
}; |
|
|
|
|
|
export function useFaceLandmarkDetection() { |
|
|
const error = useMainStore(s => s.error); |
|
|
const setError = useMainStore(s => s.setError); |
|
|
const imageFile = useMainStore(s => s.imageFile); |
|
|
const setImageFile = useMainStore(s => s.setImageFile); |
|
|
const originalImage = useMainStore(s => s.originalImage); |
|
|
const originalImageHash = useMainStore(s => s.originalImageHash); |
|
|
const setOriginalImageHash = useMainStore(s => s.setOriginalImageHash); |
|
|
const previewImage = useMainStore(s => s.previewImage); |
|
|
const setPreviewImage = useMainStore(s => s.setPreviewImage); |
|
|
const resetImage = useMainStore(s => s.resetImage); |
|
|
|
|
|
;(window as any).debugJuju = useMainStore; |
|
|
|
|
|
|
|
|
|
|
|
const averageLatency = 220 |
|
|
|
|
|
|
|
|
|
|
|
const [faceLandmarks, setFaceLandmarks] = useState<vision.NormalizedLandmark[][]>([]); |
|
|
const [isMediaPipeReady, setIsMediaPipeReady] = useState(false); |
|
|
const [isDrawingUtilsReady, setIsDrawingUtilsReady] = useState(false); |
|
|
const [blendShapes, setBlendShapes] = useState<vision.Classifications[]>([]); |
|
|
|
|
|
|
|
|
const [dragStart, setDragStart] = useState<{ x: number; y: number } | null>(null); |
|
|
const [dragEnd, setDragEnd] = useState<{ x: number; y: number } | null>(null); |
|
|
|
|
|
const [isDragging, setIsDragging] = useState(false); |
|
|
const [isWaitingForResponse, setIsWaitingForResponse] = useState(false); |
|
|
const dragStartRef = useRef<{ x: number; y: number } | null>(null); |
|
|
const currentMousePosRef = useRef<{ x: number; y: number } | null>(null); |
|
|
const lastModifiedImageHashRef = useRef<string | null>(null); |
|
|
|
|
|
const [currentLandmark, setCurrentLandmark] = useState<ClosestLandmark | null>(null); |
|
|
const [previousLandmark, setPreviousLandmark] = useState<ClosestLandmark | null>(null); |
|
|
const [currentOpacity, setCurrentOpacity] = useState(0); |
|
|
const [previousOpacity, setPreviousOpacity] = useState(0); |
|
|
|
|
|
const [isHovering, setIsHovering] = useState(false); |
|
|
|
|
|
|
|
|
const canvasRef = useRef<HTMLCanvasElement>(null); |
|
|
const mediaPipeRef = useRef<MediaPipeResources>({ |
|
|
faceLandmarker: null, |
|
|
drawingUtils: null, |
|
|
}); |
|
|
|
|
|
const setActiveLandmark = useCallback((newLandmark: ClosestLandmark | undefined) => { |
|
|
|
|
|
setPreviousLandmark(currentLandmark || null); |
|
|
setCurrentLandmark(newLandmark || null); |
|
|
setCurrentOpacity(0); |
|
|
setPreviousOpacity(1); |
|
|
|
|
|
}, [currentLandmark, setPreviousLandmark, setCurrentLandmark, setCurrentOpacity, setPreviousOpacity]); |
|
|
|
|
|
|
|
|
useEffect(() => { |
|
|
console.log('Initializing MediaPipe...'); |
|
|
let isMounted = true; |
|
|
|
|
|
const initializeMediaPipe = async () => { |
|
|
const { FaceLandmarker, FilesetResolver, DrawingUtils } = vision; |
|
|
|
|
|
try { |
|
|
console.log('Initializing FilesetResolver...'); |
|
|
const filesetResolver = await FilesetResolver.forVisionTasks( |
|
|
"https://cdn.jsdelivr.net/npm/@mediapipe/[email protected]/wasm" |
|
|
); |
|
|
|
|
|
console.log('Creating FaceLandmarker...'); |
|
|
const faceLandmarker = await FaceLandmarker.createFromOptions(filesetResolver, { |
|
|
baseOptions: { |
|
|
modelAssetPath: `https://storage.googleapis.com/mediapipe-models/face_landmarker/face_landmarker/float16/1/face_landmarker.task`, |
|
|
delegate: "GPU" |
|
|
}, |
|
|
outputFaceBlendshapes: true, |
|
|
runningMode: "IMAGE", |
|
|
numFaces: 1 |
|
|
}); |
|
|
|
|
|
if (isMounted) { |
|
|
console.log('FaceLandmarker created successfully.'); |
|
|
mediaPipeRef.current.faceLandmarker = faceLandmarker; |
|
|
setIsMediaPipeReady(true); |
|
|
} else { |
|
|
faceLandmarker.close(); |
|
|
} |
|
|
} catch (error) { |
|
|
console.error('Error during MediaPipe initialization:', error); |
|
|
setError('Failed to initialize face detection. Please try refreshing the page.'); |
|
|
} |
|
|
}; |
|
|
|
|
|
initializeMediaPipe(); |
|
|
|
|
|
|
|
|
return () => { |
|
|
isMounted = false; |
|
|
if (mediaPipeRef.current.faceLandmarker) { |
|
|
mediaPipeRef.current.faceLandmarker.close(); |
|
|
} |
|
|
}; |
|
|
}, []); |
|
|
|
|
|
|
|
|
const [landmarkCenters, setLandmarkCenters] = useState<Record<LandmarkGroup, LandmarkCenter>>({} as Record<LandmarkGroup, LandmarkCenter>); |
|
|
|
|
|
|
|
|
const computeLandmarkCenters = useCallback((landmarks: vision.NormalizedLandmark[]) => { |
|
|
const centers: Record<LandmarkGroup, LandmarkCenter> = {} as Record<LandmarkGroup, LandmarkCenter>; |
|
|
|
|
|
const computeGroupCenter = (group: Readonly<Set<number[]>>): LandmarkCenter => { |
|
|
let sumX = 0, sumY = 0, sumZ = 0, count = 0; |
|
|
group.forEach(([index]) => { |
|
|
if (landmarks[index]) { |
|
|
sumX += landmarks[index].x; |
|
|
sumY += landmarks[index].y; |
|
|
sumZ += landmarks[index].z || 0; |
|
|
count++; |
|
|
} |
|
|
}); |
|
|
return { x: sumX / count, y: sumY / count, z: sumZ / count }; |
|
|
}; |
|
|
|
|
|
centers.lips = computeGroupCenter(FACEMESH_LIPS); |
|
|
centers.leftEye = computeGroupCenter(FACEMESH_LEFT_EYE); |
|
|
centers.leftEyebrow = computeGroupCenter(FACEMESH_LEFT_EYEBROW); |
|
|
centers.rightEye = computeGroupCenter(FACEMESH_RIGHT_EYE); |
|
|
centers.rightEyebrow = computeGroupCenter(FACEMESH_RIGHT_EYEBROW); |
|
|
centers.faceOval = computeGroupCenter(FACEMESH_FACE_OVAL); |
|
|
centers.background = { x: 0.5, y: 0.5, z: 0 }; |
|
|
|
|
|
setLandmarkCenters(centers); |
|
|
|
|
|
}, []); |
|
|
|
|
|
|
|
|
const findClosestLandmark = useCallback((mouseX: number, mouseY: number, isGroup?: LandmarkGroup): ClosestLandmark => { |
|
|
const defaultLandmark: ClosestLandmark = { |
|
|
group: 'background', |
|
|
distance: 0, |
|
|
vector: { |
|
|
x: mouseX, |
|
|
y: mouseY, |
|
|
z: 0 |
|
|
} |
|
|
} |
|
|
|
|
|
if (Object.keys(landmarkCenters).length === 0) { |
|
|
console.warn('Landmark centers not computed yet'); |
|
|
return defaultLandmark; |
|
|
} |
|
|
|
|
|
let closestGroup: LandmarkGroup | null = null; |
|
|
let minDistance = Infinity; |
|
|
let closestVector = { x: 0, y: 0, z: 0 }; |
|
|
let faceOvalDistance = Infinity; |
|
|
let faceOvalVector = { x: 0, y: 0, z: 0 }; |
|
|
|
|
|
Object.entries(landmarkCenters).forEach(([group, center]) => { |
|
|
const dx = mouseX - center.x; |
|
|
const dy = mouseY - center.y; |
|
|
const distance = Math.sqrt(dx * dx + dy * dy); |
|
|
|
|
|
if (group === 'faceOval') { |
|
|
faceOvalDistance = distance; |
|
|
faceOvalVector = { x: dx, y: dy, z: 0 }; |
|
|
} |
|
|
|
|
|
|
|
|
if (isGroup) { |
|
|
if (group !== isGroup) { |
|
|
return |
|
|
} |
|
|
} |
|
|
|
|
|
if (distance < minDistance) { |
|
|
minDistance = distance; |
|
|
closestGroup = group as LandmarkGroup; |
|
|
closestVector = { x: dx, y: dy, z: 0 }; |
|
|
} |
|
|
}); |
|
|
|
|
|
|
|
|
if (minDistance > 0.05) { |
|
|
|
|
|
closestGroup = 'background'; |
|
|
minDistance = faceOvalDistance; |
|
|
closestVector = faceOvalVector; |
|
|
} |
|
|
|
|
|
if (closestGroup) { |
|
|
|
|
|
return { group: closestGroup, distance: minDistance, vector: closestVector }; |
|
|
} else { |
|
|
|
|
|
return defaultLandmark |
|
|
} |
|
|
}, [landmarkCenters]); |
|
|
|
|
|
|
|
|
const detectFaceLandmarks = useCallback(async (imageDataUrl: string) => { |
|
|
|
|
|
if (!isMediaPipeReady) { |
|
|
console.log('MediaPipe not ready. Skipping detection.'); |
|
|
return; |
|
|
} |
|
|
|
|
|
const faceLandmarker = mediaPipeRef.current.faceLandmarker; |
|
|
|
|
|
if (!faceLandmarker) { |
|
|
console.error('FaceLandmarker is not initialized.'); |
|
|
return; |
|
|
} |
|
|
|
|
|
const drawingUtils = mediaPipeRef.current.drawingUtils; |
|
|
|
|
|
const image = new Image(); |
|
|
image.src = imageDataUrl; |
|
|
await new Promise((resolve) => { image.onload = resolve; }); |
|
|
|
|
|
const faceLandmarkerResult = faceLandmarker.detect(image); |
|
|
|
|
|
|
|
|
setFaceLandmarks(faceLandmarkerResult.faceLandmarks); |
|
|
setBlendShapes(faceLandmarkerResult.faceBlendshapes || []); |
|
|
|
|
|
if (faceLandmarkerResult.faceLandmarks && faceLandmarkerResult.faceLandmarks[0]) { |
|
|
computeLandmarkCenters(faceLandmarkerResult.faceLandmarks[0]); |
|
|
} |
|
|
|
|
|
if (canvasRef.current && drawingUtils) { |
|
|
drawLandmarks(faceLandmarkerResult.faceLandmarks[0], canvasRef.current, drawingUtils); |
|
|
} |
|
|
}, [isMediaPipeReady, isDrawingUtilsReady, computeLandmarkCenters]); |
|
|
|
|
|
const drawLandmarks = useCallback(( |
|
|
landmarks: vision.NormalizedLandmark[], |
|
|
canvas: HTMLCanvasElement, |
|
|
drawingUtils: vision.DrawingUtils |
|
|
) => { |
|
|
const ctx = canvas.getContext('2d'); |
|
|
if (!ctx) return; |
|
|
|
|
|
ctx.clearRect(0, 0, canvas.width, canvas.height); |
|
|
|
|
|
if (canvasRef.current && previewImage) { |
|
|
const img = new Image(); |
|
|
img.onload = () => { |
|
|
canvas.width = img.width; |
|
|
canvas.height = img.height; |
|
|
|
|
|
const drawLandmarkGroup = (landmark: ClosestLandmark | null, opacity: number) => { |
|
|
if (!landmark) return; |
|
|
const connections = landmarkGroups[landmark.group]; |
|
|
if (connections) { |
|
|
ctx.globalAlpha = opacity; |
|
|
drawingUtils.drawConnectors( |
|
|
landmarks, |
|
|
connections, |
|
|
{ color: 'orange', lineWidth: 4 } |
|
|
); |
|
|
} |
|
|
}; |
|
|
|
|
|
drawLandmarkGroup(previousLandmark, previousOpacity); |
|
|
drawLandmarkGroup(currentLandmark, currentOpacity); |
|
|
|
|
|
ctx.globalAlpha = 1; |
|
|
}; |
|
|
img.src = previewImage; |
|
|
} |
|
|
}, [previewImage, currentLandmark, previousLandmark, currentOpacity, previousOpacity]); |
|
|
|
|
|
useEffect(() => { |
|
|
if (isMediaPipeReady && isDrawingUtilsReady && faceLandmarks.length > 0 && canvasRef.current && mediaPipeRef.current.drawingUtils) { |
|
|
drawLandmarks(faceLandmarks[0], canvasRef.current, mediaPipeRef.current.drawingUtils); |
|
|
} |
|
|
}, [isMediaPipeReady, isDrawingUtilsReady, faceLandmarks, currentLandmark, previousLandmark, currentOpacity, previousOpacity, drawLandmarks]); |
|
|
useEffect(() => { |
|
|
let animationFrame: number; |
|
|
const animate = () => { |
|
|
setCurrentOpacity((prev) => Math.min(prev + 0.2, 1)); |
|
|
setPreviousOpacity((prev) => Math.max(prev - 0.2, 0)); |
|
|
|
|
|
if (currentOpacity < 1 || previousOpacity > 0) { |
|
|
animationFrame = requestAnimationFrame(animate); |
|
|
} |
|
|
}; |
|
|
animationFrame = requestAnimationFrame(animate); |
|
|
return () => cancelAnimationFrame(animationFrame); |
|
|
}, [currentLandmark]); |
|
|
|
|
|
|
|
|
const canvasRefCallback = useCallback((node: HTMLCanvasElement | null) => { |
|
|
if (node !== null) { |
|
|
const ctx = node.getContext('2d'); |
|
|
if (ctx) { |
|
|
|
|
|
const pixelRatio = window.devicePixelRatio || 1; |
|
|
|
|
|
|
|
|
node.width = node.clientWidth * pixelRatio; |
|
|
node.height = node.clientHeight * pixelRatio; |
|
|
ctx.scale(pixelRatio, pixelRatio); |
|
|
|
|
|
mediaPipeRef.current.drawingUtils = new vision.DrawingUtils(ctx); |
|
|
setIsDrawingUtilsReady(true); |
|
|
} else { |
|
|
console.error('Failed to get 2D context from canvas.'); |
|
|
} |
|
|
canvasRef.current = node; |
|
|
} |
|
|
}, []); |
|
|
|
|
|
|
|
|
useEffect(() => { |
|
|
if (!isMediaPipeReady) { |
|
|
console.log('MediaPipe not ready. Skipping landmark detection.'); |
|
|
return |
|
|
} |
|
|
if (!previewImage) { |
|
|
console.log('Preview image not ready. Skipping landmark detection.'); |
|
|
return |
|
|
} |
|
|
if (!isDrawingUtilsReady) { |
|
|
console.log('DrawingUtils not ready. Skipping landmark detection.'); |
|
|
return |
|
|
} |
|
|
detectFaceLandmarks(previewImage); |
|
|
}, [isMediaPipeReady, isDrawingUtilsReady, previewImage]) |
|
|
|
|
|
|
|
|
|
|
|
const modifyImage = useCallback(({ landmark, vector }: { |
|
|
landmark: ClosestLandmark |
|
|
vector: { x: number; y: number; z: number } |
|
|
}) => { |
|
|
|
|
|
const { |
|
|
originalImage, |
|
|
originalImageHash, |
|
|
params: previousParams, |
|
|
setParams, |
|
|
setError |
|
|
} = useMainStore.getState() |
|
|
|
|
|
|
|
|
if (!originalImage) { |
|
|
console.error('Image file or facePoke not available'); |
|
|
return; |
|
|
} |
|
|
|
|
|
const params = { |
|
|
...previousParams |
|
|
} |
|
|
|
|
|
const minX = -0.50; |
|
|
const maxX = 0.50; |
|
|
const minY = -0.50; |
|
|
const maxY = 0.50; |
|
|
|
|
|
|
|
|
const mapRange = (value: number, inMin: number, inMax: number, outMin: number, outMax: number): number => { |
|
|
return Math.min(outMax, Math.max(outMin, ((value - inMin) * (outMax - outMin)) / (inMax - inMin) + outMin)); |
|
|
}; |
|
|
|
|
|
console.log("modifyImage:", { |
|
|
originalImage, |
|
|
originalImageHash, |
|
|
landmark, |
|
|
vector, |
|
|
minX, |
|
|
maxX, |
|
|
minY, |
|
|
maxY, |
|
|
}) |
|
|
|
|
|
|
|
|
switch (landmark.group) { |
|
|
case 'leftEye': |
|
|
case 'rightEye': |
|
|
|
|
|
const eyesMin = 210 |
|
|
const eyesMax = 5 |
|
|
params.eyes = mapRange(vector.x, minX, maxX, eyesMin, eyesMax); |
|
|
|
|
|
break; |
|
|
case 'leftEyebrow': |
|
|
case 'rightEyebrow': |
|
|
|
|
|
|
|
|
|
|
|
const eyebrowMin = -10 |
|
|
const eyebrowMax = 15 |
|
|
params.eyebrow = mapRange(vector.y, minY, maxY, eyebrowMin, eyebrowMax); |
|
|
|
|
|
break; |
|
|
case 'lips': |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const eeeMin = -20 |
|
|
const eeeMax = 15 |
|
|
params.eee = mapRange(vector.y, minY, maxY, eeeMin, eeeMax); |
|
|
|
|
|
|
|
|
|
|
|
const wooMin = -20 |
|
|
const wooMax = 15 |
|
|
params.woo = mapRange(vector.x, minX, maxX, wooMin, wooMax); |
|
|
|
|
|
break; |
|
|
case 'faceOval': |
|
|
|
|
|
|
|
|
|
|
|
const rollMin = -40 |
|
|
const rollMax = 40 |
|
|
|
|
|
|
|
|
params.rotate_roll = mapRange(vector.x, minX, maxX, rollMin, rollMax); |
|
|
break; |
|
|
|
|
|
case 'background': |
|
|
|
|
|
|
|
|
|
|
|
const yawMin = -40 |
|
|
const yawMax = 40 |
|
|
|
|
|
|
|
|
params.rotate_yaw = mapRange(-vector.x, minX, maxX, yawMin, yawMax); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const pitchMin = -40 |
|
|
const pitchMax = 40 |
|
|
params.rotate_pitch = mapRange(vector.y, minY, maxY, pitchMin, pitchMax); |
|
|
break; |
|
|
default: |
|
|
return |
|
|
} |
|
|
|
|
|
for (const [key, value] of Object.entries(params)) { |
|
|
if (isNaN(value as any) || !isFinite(value as any)) { |
|
|
console.log(`${key} is NaN, aborting`) |
|
|
return |
|
|
} |
|
|
} |
|
|
console.log(`PITCH=${params.rotate_pitch || 0}, YAW=${params.rotate_yaw || 0}, ROLL=${params.rotate_roll || 0}`); |
|
|
|
|
|
setParams(params) |
|
|
try { |
|
|
|
|
|
if (!lastModifiedImageHashRef.current || lastModifiedImageHashRef.current !== originalImageHash) { |
|
|
lastModifiedImageHashRef.current = originalImageHash; |
|
|
facePoke.modifyImage(originalImage, null, params); |
|
|
} else { |
|
|
|
|
|
facePoke.modifyImage(null, lastModifiedImageHashRef.current, params); |
|
|
} |
|
|
} catch (error) { |
|
|
|
|
|
setError('Failed to modify image'); |
|
|
} |
|
|
}, []); |
|
|
|
|
|
|
|
|
const modifyImageWithRateLimit = useThrottledCallback((params: { |
|
|
landmark: ClosestLandmark |
|
|
vector: { x: number; y: number; z: number } |
|
|
}) => { |
|
|
modifyImage(params); |
|
|
}, [modifyImage], averageLatency); |
|
|
|
|
|
const handleMouseEnter = useCallback(() => { |
|
|
setIsHovering(true); |
|
|
}, []); |
|
|
|
|
|
const handleMouseLeave = useCallback(() => { |
|
|
setIsHovering(false); |
|
|
}, []); |
|
|
|
|
|
|
|
|
const handleMouseDown = useCallback((event: React.MouseEvent<HTMLCanvasElement>) => { |
|
|
if (!canvasRef.current) return; |
|
|
|
|
|
const rect = canvasRef.current.getBoundingClientRect(); |
|
|
const x = (event.clientX - rect.left) / rect.width; |
|
|
const y = (event.clientY - rect.top) / rect.height; |
|
|
|
|
|
const landmark = findClosestLandmark(x, y); |
|
|
console.log(`Mouse down on ${landmark.group}`); |
|
|
setActiveLandmark(landmark); |
|
|
setDragStart({ x, y }); |
|
|
dragStartRef.current = { x, y }; |
|
|
}, [findClosestLandmark, setActiveLandmark, setDragStart]); |
|
|
|
|
|
const handleMouseMove = useCallback((event: React.MouseEvent<HTMLCanvasElement>) => { |
|
|
if (!canvasRef.current) return; |
|
|
|
|
|
const rect = canvasRef.current.getBoundingClientRect(); |
|
|
const x = (event.clientX - rect.left) / rect.width; |
|
|
const y = (event.clientY - rect.top) / rect.height; |
|
|
|
|
|
|
|
|
if (dragStart && dragStartRef.current) { |
|
|
|
|
|
const landmark = findClosestLandmark(x, y, currentLandmark?.group); |
|
|
|
|
|
console.log(`Dragging mouse (was over ${currentLandmark?.group || 'nothing'}, now over ${landmark.group})`); |
|
|
|
|
|
|
|
|
modifyImageWithRateLimit({ |
|
|
landmark: currentLandmark || landmark, |
|
|
vector: { |
|
|
x: x - landmarkCenters[landmark.group].x, |
|
|
y: y - landmarkCenters[landmark.group].y, |
|
|
z: 0 |
|
|
} |
|
|
}); |
|
|
setIsDragging(true); |
|
|
} else { |
|
|
const landmark = findClosestLandmark(x, y); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (!currentLandmark || (currentLandmark?.group !== landmark?.group)) { |
|
|
|
|
|
setActiveLandmark(landmark); |
|
|
} |
|
|
setIsHovering(true); |
|
|
} |
|
|
}, [currentLandmark, dragStart, setIsHovering, setActiveLandmark, setIsDragging, modifyImageWithRateLimit, landmarkCenters]); |
|
|
|
|
|
const handleMouseUp = useCallback((event: React.MouseEvent<HTMLCanvasElement>) => { |
|
|
if (!canvasRef.current) return; |
|
|
|
|
|
const rect = canvasRef.current.getBoundingClientRect(); |
|
|
const x = (event.clientX - rect.left) / rect.width; |
|
|
const y = (event.clientY - rect.top) / rect.height; |
|
|
|
|
|
|
|
|
if (dragStart && dragStartRef.current) { |
|
|
|
|
|
const landmark = findClosestLandmark(x, y, currentLandmark?.group); |
|
|
|
|
|
console.log(`Mouse up (was over ${currentLandmark?.group || 'nothing'}, now over ${landmark.group})`); |
|
|
|
|
|
|
|
|
modifyImageWithRateLimit({ |
|
|
landmark: currentLandmark || landmark, |
|
|
vector: { |
|
|
x: x - landmarkCenters[landmark.group].x, |
|
|
y: y - landmarkCenters[landmark.group].y, |
|
|
z: 0 |
|
|
} |
|
|
}); |
|
|
} |
|
|
|
|
|
setIsDragging(false); |
|
|
dragStartRef.current = null; |
|
|
setActiveLandmark(undefined); |
|
|
}, [currentLandmark, isDragging, modifyImageWithRateLimit, findClosestLandmark, setActiveLandmark, landmarkCenters, modifyImageWithRateLimit, setIsDragging]); |
|
|
|
|
|
useEffect(() => { |
|
|
facePoke.setOnModifiedImage((image: string, image_hash: string) => { |
|
|
if (image) { |
|
|
setPreviewImage(image); |
|
|
} |
|
|
setOriginalImageHash(image_hash); |
|
|
lastModifiedImageHashRef.current = image_hash; |
|
|
}); |
|
|
}, [setPreviewImage, setOriginalImageHash]); |
|
|
|
|
|
return { |
|
|
canvasRef, |
|
|
canvasRefCallback, |
|
|
mediaPipeRef, |
|
|
faceLandmarks, |
|
|
isMediaPipeReady, |
|
|
isDrawingUtilsReady, |
|
|
blendShapes, |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
setFaceLandmarks, |
|
|
setBlendShapes, |
|
|
|
|
|
handleMouseDown, |
|
|
handleMouseUp, |
|
|
handleMouseMove, |
|
|
handleMouseEnter, |
|
|
handleMouseLeave, |
|
|
|
|
|
currentLandmark, |
|
|
currentOpacity, |
|
|
} |
|
|
} |
|
|
|