Hallo
Ich möchte mit js ein Programm bauen, welches meinen Körper erkennt und dort drauf dann ein T-Shirt-Modell legt. Also so eine virtuelle Garderobe quasi. Wieso geht der Code nicht?
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Virtual Try-On Final</title>
<style>
body { margin: 0; overflow: hidden; }
#webcam {
position: absolute;
transform: scaleX(-1);
opacity: 0.5;
z-index: 1;
}
#output {
position: absolute;
z-index: 2;
}
</style>
</head>
<body>
<video id="webcam" autoplay playsinline></video>
<canvas id="output"></canvas>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs"></script>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/body-pix"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r128/three.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/three@0.128.0/examples/js/loaders/GLTFLoader.js"></script>
<script>
const video = document.getElementById('webcam');
const output = document.getElementById('output');
let net, scene, camera, renderer, shirt;
let bodyPosition = { x: 0, y: 0, width: 0, height: 0 };
async function setupWebcam() {
try {
const stream = await navigator.mediaDevices.getUserMedia({
video: {
width: { ideal: 640 },
height: { ideal: 480 },
facingMode: 'user'
}
});
video.srcObject = stream;
return new Promise(resolve => {
video.onloadedmetadata = () => {
output.width = video.videoWidth;
output.height = video.videoHeight;
resolve();
};
});
} catch (error) {
console.error('Webcam error:', error);
alert('Bitte Webcam-Zugriff erlauben!');
}
}
function initThreeJS() {
scene = new THREE.Scene();
camera = new THREE.PerspectiveCamera(
60,
output.width / output.height,
0.1,
1000
);
camera.position.set(0, 0, 2);
renderer = new THREE.WebGLRenderer({
canvas: output,
alpha: true,
antialias: true
});
renderer.setClearColor(0x000000, 0);
// Beleuchtung
const ambientLight = new THREE.AmbientLight(0xffffff, 1.0);
scene.add(ambientLight);
const directionalLight = new THREE.DirectionalLight(0xffffff, 0.8);
directionalLight.position.set(0, 2, 5);
scene.add(directionalLight);
loadShirtModel();
}
async function loadShirtModel() {
const loader = new THREE.GLTFLoader();
try {
const gltf = await loader.loadAsync('tshirt_model.glb');
shirt = gltf.scene;
// Material-Anpassung
shirt.traverse(child => {
if (child.isMesh) {
child.material = new THREE.MeshPhongMaterial({
color: 0xffffff,
transparent: true,
opacity: 0.9,
depthWrite: false
});
}
});
shirt.scale.set(0.3, 0.3, 0.3);
scene.add(shirt);
console.log('T-Shirt Modell geladen');
} catch (error) {
console.error('Modellfehler:', error);
addFallbackCube();
}
}
function addFallbackCube() {
const geometry = new THREE.BoxGeometry(0.5, 0.5, 0.5);
const material = new THREE.MeshBasicMaterial({ color: 0xff0000 });
shirt = new THREE.Mesh(geometry, material);
scene.add(shirt);
}
function calculateBodyPosition(segmentation) {
const data = segmentation.data;
let minX = Infinity, maxX = -Infinity;
let minY = Infinity, maxY = -Infinity;
for (let y = 0; y < segmentation.height; y++) {
for (let x = 0; x < segmentation.width; x++) {
if (data[y * segmentation.width + x] === 1) {
minX = Math.min(minX, x);
maxX = Math.max(maxX, x);
minY = Math.min(minY, y);
maxY = Math.max(maxY, y);
}
}
}
if (minX !== Infinity) {
bodyPosition = {
x: (minX + maxX) / 2,
y: (minY + maxY) / 2,
width: maxX - minX,
height: maxY - minY
};
}
}
function updateShirtPosition() {
if (!shirt) return;
// Koordinatenumrechnung
const x = (bodyPosition.x / output.width - 0.5) * 2;
const y = -(bodyPosition.y / output.height - 0.5) * 2;
shirt.position.set(x, y, 0);
// Skalierung basierend auf Körpergröße
const scaleFactor = bodyPosition.height / output.height * 2;
shirt.scale.set(scaleFactor, scaleFactor, scaleFactor);
}
async function detectBody() {
try {
// Ganzkörper-Segmentierung
const segmentation = await net.segmentPerson(video, {
segmentationThreshold: 0.7,
internalResolution: 'high',
maxDetections: 1
});
calculateBodyPosition(segmentation);
updateShirtPosition();
// Rendering
const ctx = output.getContext('2d');
ctx.clearRect(0, 0, output.width, output.height);
renderer.render(scene, camera);
requestAnimationFrame(detectBody);
} catch (error) {
console.error('Detektionsfehler:', error);
}
}
(async () => {
await setupWebcam();
net = await bodyPix.load({
architecture: 'ResNet50',
outputStride: 32,
quantBytes: 2
});
initThreeJS();
detectBody();
})();
</script>
</body>
</html>
Freundliche Grüsse