前言
直播、短视频、在线会议等利用越来越多地进入人们的生存,随之诞生的是丰盛的各类创意玩法与陈腐体验,其中大量利用了以AI检测和图形渲染为根底的AR技术。

而随着Web技术的一直成熟,AR技术在Web上的实现成为了一种可能。明天就总结了在Web端实现此性能的几个技术要点,跟大家一起探讨一下。

架构和概念
形象整体的实现思路如下

调取Camera取得相机画面应用tensorflow加载人脸识别模型生成FaceMesh依据FaceMesh生成三角网格并进行UV贴图
FaceMesh
MediaPipe Face Mesh是一种脸部几何解决方案,即便在挪动设施上,也能够实时预计468个3D脸部界标。它采纳 机器学习 (ML)来推断3D外表几何形态,只须要单个摄像机输出,而无需专用的深度传感器。该解决方案利用轻量级的模型架构以及整个管线中的GPU减速,可提供对实时体验至关重要的实时性能。

UVMap
UV是二维纹理坐标,U代表程度方向,V代表垂直方向。UV Map用来形容三维物体外表与图像纹理(Texture) 的映射关系,有了UV Map,咱们就能够将二维的图像纹理粘贴到三维的物体外表。

image.png

矩形贴图和球面的映射图
技术实现
调取Camera取得相机画面
通过navigator.mediaDevices.getUserMedia获取stream,放到video查看。

async function setupWebcam() {

return new Promise( ( resolve, reject ) => {    const webcamElement = document.getElementById( "webcam" );    const navigatorAny = navigator;    navigator.getUserMedia = navigator.getUserMedia ||    navigatorAny.webkitGetUserMedia || navigatorAny.mozGetUserMedia ||    navigatorAny.msGetUserMedia;    if( navigator.getUserMedia ) {        navigator.getUserMedia( { video: true },            stream => {                webcamElement.srcObject = stream;                webcamElement.addEventListener( "loadeddata", resolve, false );            },        error => reject());    }    else {        reject();    }});

}
复制代码
人脸识别
//创立模型
createModel() {

return new Promise(async resolve => {    await tf.setBackend('webgl')    const model = faceLandmarksDetection.SupportedModels.MediaPipeFaceMesh;    const detectorConfig = {        maxFaces: 1, //检测到的最大面部数量        refineLandmarks: true, //能够欠缺眼睛和嘴唇四周的地标坐标,并在虹膜四周输入其余地标        runtime: 'mediapipe',        solutionPath: 'https://unpkg.com/@mediapipe/face_mesh', //WASM二进制文件和模型文件所在的门路    };    this.model = await faceLandmarksDetection.createDetector(model, detectorConfig);    resolve(this.model);})

},
//辨认
async recognition() {

try {    const video = this.$refs.video;    const faces = await this.model.estimateFaces(video, {        flipHorizontal: false, //镜像    });    if (faces.length > 0) {        const keypoints = faces[0].keypoints;        this.render3D({            scaledMesh:keypoints.reduce((acc, pos) =>{                acc.push([pos.x,pos.y,pos.z])                return acc            }, [])        });    }else{        this.render3D({scaledMesh:[]})    }} catch (error) {    console.log(error);}

}
复制代码
3D场景贴图
TRIANGULATION
UV_COORDS

//3D场景const scene = new THREE.Scene();//增加一些光照scene.add( new THREE.AmbientLight( 0xcccccc, 0.4 ) );camera.add( new THREE.PointLight( 0xffffff, 0.8 ) );//正交相机scene camera = new THREE.PerspectiveCamera( 45, 1, 0.1, 2000 );camera.position.x = videoWidth / 2;camera.position.y = -videoHeight / 2;camera.position.z = -( videoHeight / 2 ) / Math.tan( 45 / 2 )scene.add( camera ); //渲染器const renderer = new THREE.WebGLRenderer({    canvas: document.getElementById( "overlay" ),    alpha: true});//创立geometry,将468集体脸特色点依照肯定的程序(TRIANGULATION)组成三角网格,并加载UV_COORDSconst geometry = new THREE.BufferGeometry()geometry.setIndex(TRIANGULATION)geometry.setAttribute('uv', new THREE.Float32BufferAttribute(UV_COORDS.map((item, index) => index % 2 ? item : 1 - item), 2))geometry.computeVertexNormals()            //创立materialconst textureLoader = new THREE.TextureLoader();const meshImg = this.meshList[meshIndex].src;//材质图片地址textureLoader.load(meshImg,texture=>{    texture.encoding = THREE.sRGBEncoding    texture.anisotropy = 16    const material = new THREE.MeshBasicMaterial({        map: texture,        transparent: true,        color: new THREE.Color(0xffffff),        reflectivity: 0.5    });    const mesh = new THREE.Mesh(geometry, material)    scene.add(mesh)})// 依据face mesh实时更新geometryupdateGeometry(prediction){    let w = canvasWidth;    let h = canvasWidth;    const faceMesh = resolveMesh(prediction.scaledMesh, w, h)    const positionBuffer = faceMesh.reduce((acc, pos) => acc.concat(pos), [])    geometry.setAttribute('position', new THREE.Float32BufferAttribute(positionBuffer, 3))    geometry.attributes.position.needsUpdate = true}

resolveMesh(faceMesh, vw, vh){

   return faceMesh.map(p => [p[0] - vw / 2, vh / 2 - p[1], -p[2]])

}

//渲染
render3D(prediction){

    if (prediction) {        updateGeometry(prediction)    }    renderer.render(scene, threeCamera)}

复制代码
加载3D模型
//加载3D模型
const loader = new GLTFLoader();
const Object3D = new THREE.Object3D();
loader.load(modelUrl, (gltf) => {

const object = gltf.sceneconst box = new THREE.Box3().setFromObject(object)const size = box.getSize(new THREE.Vector3()).length()const center = box.getCenter(new THREE.Vector3())object.position.x += (object.position.x - center.x);object.position.y += (object.position.y - center.y + 1);object.position.z += (object.position.z - center.z - 15);Object3D.add(object)this.scene.add(Object3D)

})

//计算Matrix
const position = prediction.midwayBetweenEyes[0]
const scale = this.getScale(prediction.scaledMesh, 234, 454)
const rotation = this.getRotation(prediction.scaledMesh, 10, 50, 280)
object.position.set(...position)
object.scale.setScalar(scale / 20)
object.scale.x *= -1
object.rotation.setFromRotationMatrix(rotation)
object.rotation.y = -object.rotation.y
object.rotateZ(Math.PI)
object.rotateX(-Math.PI * .05)
if (this.morphTarget) {

// flippedthis.morphTarget['leftEye'] && this.morphTarget['leftEye'](1 - prediction.faceRig.eye.r)this.morphTarget['rightEye'] && this.morphTarget['rightEye'](1 - prediction.faceRig.eye.l)this.morphTarget['mouth'] && this.morphTarget['mouth'](prediction.faceRig.mouth.shape.A)

}