Distinguishes between real-time detection and take pictures, to identify the needs of the accuracy of the real-time monitoring according to the number of video frames (of course there’s not much testing a totally depends on our frame) to detect, facial recognition only static images, so the real-time detection only adopted Mtcnn detection and face recognition, speed, and real-time detection, defect is a fall in accuracy, For now, only PCS are supported. The next article will combine NodeJS with a front-end identification and back-end detection feature.

Face-api address: github.com/justadudewh…

<! DOCTYPE html> <html lang="en">

<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="Width = device - width, initial - scale = 1.0"</title> </head> <body> <div style="position: relative" class="margin">
        <video id="inputVideo" style="width: 500px; margin:auto" autoplay muted></video>
        <canvas id="overlay" />
    </div>
</body>
<script src="https://cdn.bootcss.com/jquery/3.4.1/jquery.min.js"></script>
<script src="./face-pi.js"></script>
<script>

    const canvas = document.getElementById('overlay')
    const context = canvas.getContext('2d')
    const video = document.getElementById('inputVideo'The $()function () {
        run()
    })
    async function run() {
        // load the models
        await faceapi.loadMtcnnModel('/weights')
        await faceapi.loadFaceRecognitionModel('/weights')

        // try to access users webcam and stream the images
        // to the video element

        if(the navigator. MediaDevices. GetUserMedia) {/ / the latest standard API navigator. MediaDevices. GetUserMedia ({video: {width: 1000, height: 1000 } }).then(success).catch(error); }else if(the navigator. WebkitGetUserMedia) {/ / its core the navigator browser. WebkitGetUserMedia ({video: {width: 1000, height: 1000 } }, success, error) }else if(the navigator. MozGetUserMedia) {/ / firfox navigator browser. MozGetUserMedia ({video: {width: 1000, height: 1000 } }, success, error); }else if (navigator.getUserMedia) {
            //旧版API
            navigator.getUserMedia({ video: { width: 1000, height: 1000 } }, success, error);
        }

        functionSuccess (stream) {// Compatible with WebKit core browser //letCompatibleURL = window.URL || window.webkitURL; // Set the video stream as the source of the video element // console.log(stream); //video.src = CompatibleURL.createObjectURL(stream); video.srcObject = stream; // video.play(); timer =setInterval(function() { canvas.width = video.offsetWidth, canvas.height = video.offsetHeight context.drawImage(video, 0, 0, video.offsetWidth, video.offsetHeight); // Draw video onPlay(); }, 100); }functionError (error) {console.log(' Failed to access user media device${error.name}.${error.message}`); }} const mtcnnForwardParams = {// The scaled version of the input image passed through CNN // In the first stage, the smaller the number, the shorter the reasoning time, // But not quite accurate maxNumScales: ScaleFactor: 0.709, // Used to filter the boundary score threshold // stage 1, 2 and 3 [0.6, 0.7, 0.7], // The higher the expected minimum face size, the faster the processing speed, // but smaller faces will not be detected 360 } var mtcnnResults = []; // Start detection when playing, recognize face asyncfunction onPlay(a) {
        mtcnnResults = await faceapi.mtcnn(document.getElementById('inputVideo'), mtcnnForwardParams)
        const resizedResults = faceapi.resizeResults(mtcnnResults, displaySize)
        faceapi.draw.drawDetections(canvas, resizedResults)
        faceapi.draw.drawFaceLandmarks(canvas, resizedResults)

        if(! mtcnnResults.length) { console.log('No face detected')
        }

    }

</script>

</html>
Copy the code