Frame drawing = > pseudo “human induction” using video to play with canvas

Time:2021-7-31

Write in front

Thank you all for working hard at the front end. I hope HelloWorld will be better in the future.

thankLet’s see how smart front-end children’s shoes are anti-theftAuthor’s sharing

Thank you for stopping the network for a day and finally recovering

dried food

Let’s share two pictures first
1) Wang’s anger
Frame drawing = > pseudo
2) Procedural ape anger
Frame drawing = > pseudo

principle

Map the graph of each frame to canvas by enabling browser photography. Compare the difference between the previous frame and the current frame to calculate the percentage of the difference. If it exceeds a certain percentage, the function will be triggered.

Chestnut: when I shake my hands, the positions of the hands in the two frames are different, so the difference accounts for a large proportion, which triggers the written callback function{

Add an animation to the water cup
Change the background color for the body

}
The anger of the program ape is throughLet’s see how smart front-end children’s shoes are anti-theftIt is realized by using the code for reference,
Original author project GitHub address
Frame drawing = > pseudo
At the same time, without destroying the original author’s source code, the angry program code is as follows:

<!DOCTYPE html>
<html>
    <head>
        <meta charset="UTF-8">
        < title > angry program ape < / Title >
        
        <style type="text/css">
        
            .room{    
                text-align: center;
                margin: 200px 10px;
                position: relative;
                height: 240px;
                width: 320px;
                display: inline-block;
                
            }
            .room>*{
                float: left;
                position: absolute;
                bottom: 0;
                left: 100px;
            }
            
            .room .beizi{
                width: 50px;
                height: 100px;
                border-radius: 10px;
                background-color: darkolivegreen;
                color: white;
                line-height: 100px;
                    
            }
            .room>span{
                width: 100%;
                left: 80px; color: white;
            }
        
            canvas{
                /*display: none;*/
            }
            
            
        

            @keyframes tiao{0%{transform: translateY(0);}50%{transform: translateY(-100%);}100%{transform: translateY(0);}}
            @-webkit-keyframes tiao{0%{transform: translateY(0);}50%{transform: translateY(-100%);}100%{transform: translateY(0);}}
            
            .tiao{
                animation: tiao 1s ease-in-out .1s;
                -webkit-animation: tiao 1s ease-in-out .1s;
            }

        </style>
    </head>
    <body>
        <div class="room">
            <video autoplay width="320" height="240"></video>
            < div class = "Beizi" > water cup < / div >    
            <span>You can try tapping the desktop</span>
        </div>
        
        <div class="room" style="display: none;">
            <canvas width="320" height="240"></canvas>
            <span>Interception per frame</span>
        </div>
        
        <div class="room">
            <canvas width="320" height="240"></canvas>
            <span>Contrast difference between the previous frame and this frame</span>
        </div>
        
        <div class="room">
            <canvas width="320" height="240"></canvas>
            <span>A screenshot with a certain amplitude of motion is detected</span>
        </div>
        
        <script>
            var w = 320,
                h = 240;
            
            var video = document.querySelector('video'),
                beizi = document.querySelector(".beizi"),
                canvas = document.querySelectorAll('canvas'),
                canvasForDiff = canvas[1];
                canvasPhoto = canvas[2];
                canvas = canvas[0];
            
            
            
            navigator.getUserMedia || navigator.webkitGetUserMedia// || navigator.mozGetUserMedia//ie chrome firefox
            ({video:true}, function(stream) {
                video.src = window.URL.createObjectURL(stream);
                video.play();
            }, function(err) {
                Alert ('error: '+ ERR)
            });
            
            //canvas
            var context = canvas.getContext('2d'),
                diffCtx = canvasForDiff.getContext('2d'),
                photo = canvasPhoto.getContext('2d');
            //Set the second canvas blend mode to difference
            diffCtx.globalCompositeOperation = 'difference';
            
            Var preframe, // previous frame
                curFrame;   // Current frame
        
            var diffFrame;  // Imagedata for storing difference frames
        
            //Capture and save frame content
            function captureAndSaveFrame(){
                preFrame = curFrame;
                context.drawImage(video, 0, 0, w, h);
                curFrame = canvas.toDataURL();  // Convert to Base64 and save
            }
        
            //Draw Base64 image onto canvas
            function drawImg(src, ctx){
                ctx = ctx || diffCtx;
                var img = new Image();
                img.src = src;
                ctx.drawImage(img, 0, 0, w, h);
            }
        
            //Difference between two frames before and after rendering
            function renderDiff(){
                if(!preFrame || !curFrame) return;
                diffCtx.clearRect(0, 0, w, h);
                drawImg(preFrame);
                drawImg(curFrame);
                diffFrame = diffCtx.getImageData( 0, 0, w, h );  // The imagedata object that captures the difference frame
            }
        
            //Calculation difference
            function calcDiff(){
                if(!diffFrame) return 0;
                var cache = arguments.callee,
                    count = 0;
                cache.total = cache.total || 0; // The sum of the values of all pixels when the entire canvas is white
                for (var i = 0, l = diffFrame.width * diffFrame.height * 4; i < l; i += 4) {
                    count += diffFrame.data[i] + diffFrame.data[i + 1] + diffFrame.data[i + 2];
                    if(! Cache. Isloopever) {// just execute in the first loop
                        cache.total += 255 * 3;   // Single white pixel value
                    }
                }
                cache.isLoopEver = true;
                count *= 3;  // Brightness amplification
                //Returns the proportion of "total pixel value of highlighted part of difference canvas" to "total pixel value of fully lit canvas"
                return Number(count/cache.total).toFixed(2);
            }
            
            
            
            
            var t,d;
            //Timing capture
            function timer(delta){
                d = 0;
                setTimeout(function(){
                    captureAndSaveFrame();
                    renderDiff();
                    setTimeout(function(){
                        if((d=calcDiff())>=0.13){
                            
                            handel();
                        }
                    }, 16.7);
        
                    timer(delta)
                }, delta || 500);
                
                
                
            }
            
            
            function handel(){
                if(t){return}
                photo.drawImage(video, 0, 0, w, h);
                beizi.classList.add('tiao');
                canvasForDiff.classList.add('tiao');
                document.body.style.backgroundColor = (function(){
                    return ['red','yellow','black','blue','green','white'][Math.floor(Math.random()*6)];
                }());
                t = setTimeout(function(){
                    canvasForDiff.classList.remove('tiao');
                    beizi.classList.remove('tiao');
                    t = null;
                },1000);
                
            }
        
            timer();
            
        </script>
    </body>
</html>
  • Q: Running on Google browser means that the page is blank and there is nothing. Why?
  • A: It can be seen from Google’s documents that this is for security reasons. Non HTTPS server requests cannot access the camera. In short, chrome cannot open the camera by opening an HTML file through a file. It can be opened on the local server! For example: localhost
  • Also pay attention to writing compatibility! The landlord only wrote chrome

Come on

How can program apes stop on the spot? Let’s use the code to write an automatic camera by ourselves!

  • AV.htmlFrame drawing = > pseudo
<!DOCTYPE html>
<html>
    <head>
        <meta charset="UTF-8">
        <title></title>
    </head>
    <style>
        div{
            text-align: center;
            width: 250px;
            margin: 10px 0;
        }
    </style>
    <body>
        <video></video>
        
        <div>
            < button onclick = "paipaipaipai()" > take photos manually < / button >
            < button onclick = "auto()" > take photos automatically < / button >    
            < button onclick = "debug()" > debugging mode < / button >    
        </div>
        
        <img/>
        
        <script></script>
        <script>
            var img = document.querySelector("img");
            var av = Av({
                //style: {
                //Width: 320, // video canvas width
                //Height: 240 // high
                //},
                //DEG: 0.12, // sensitivity trigger action amplitude
                //Die: 500, // dwon machine time, not triggered within 500ms after triggering the event
                //Delta: 300, // get video frames every 300ms
                //Sw: true // switch, on by default
                FN: function (data) {// trigger function
                    img.src = data;
                }
            });
            
            function paipaipai(){
                av.switchAV(false);
                img.src = av.getCurFrame();
            }
            
            function auto(){
                av.switchAV(true);
            }
            
            function debug(){
                [].map.call(document.querySelectorAll("canvas"),function(c){
                    c.style.display = 'inline';
                });
            }
            
        </script>
    </body>
</html>
  • AV.jsFrame drawing = > pseudo
(function() {
    var av = function(option) {
        
        option = option || {};
        this.op = (function(o) {
            for(var i in option) {
                o[i] = option[i];
            }
            return o;
        }({
            style: {
                Width: 320, // video canvas width
                Height: 240 // high
            },
            DEG: 0.12, // sensitivity trigger action amplitude
            Die: 500, // dwon machine time, not triggered within 500ms after triggering the event
            Delta: 300, // get video frames every 300ms
            Sw: true // switch, on by default
        }));
        this.initEm();
        this.initMedia();
        this.initCanvas();
        this.switchAV();
    }
    var avpro = av.prototype;

    //Initialize base element
    avpro.initEm = function() {
        //Video element
        this.video = document.querySelector(this.op.el || 'video');
        this.video.setAttribute('autoplay', '');
        this.video.style.objectFit = 'fill';
        //Initialize canvas element
        var canvas = document.createElement("canvas");
        canvas.style.display = 'none';
        canvas.style.backgroundColor = this.video.style.backgroundColor = 'grey';
        canvas.style.width = this.video.style.width = (this.w = canvas.width = this.op.style.width) + 'px';
        canvas.style.height = this.video.style.height = (this.h = canvas.height = this.op.style.height) + 'px';
        
        //Action canvas cloning, mapping video AC action, easy to remember
        var acCanvas = canvas.cloneNode(true);
        //Comparing canvas clones, comparing differences BW indicates that black and white are easy to remember 
        var bwCanvas = canvas.cloneNode(true);
        //Clean up and release resources
        canvas = null;
        //Add to page
        document.body.appendChild(acCanvas);
        document.body.appendChild(bwCanvas);
        
        this.canvas = acCanvas;
        this.acCanvas = acCanvas.getContext('2d');
        this.bwCanvas = bwCanvas.getContext('2d');
    }

    //Initialize camera
    avpro.initMedia = function() {
        var tv = this.video;
        navigator.getUserMedia || navigator.webkitGetUserMedia //ie chrome
            ({
                video: true
            }, function(se) {
                tv.src = window.URL.createObjectURL(se);
                tv.play();
            }, function(err) {
                console.log('err:' + err);
            });
    }

    //Initialize canvas, frames
    avpro.initCanvas = function() {
        //Set the second canvas blend mode to difference
        this.bwCanvas.globalCompositeOperation = 'difference';
        //Previous frame current frame difference frame
        this.preFrame = this.curFrame = this.diffFrame = null;
    }

    //Start AV
    avpro.startAv = function() {
        var tv = this;
        var call = arguments.callee;
        tv.zt = setTimeout(function() {
            tv.avSaveFrame();
            tv.renderDiff();
            setTimeout(function() {
                if(tv.calcDiff() >= tv.op.deg) {
                    //Trigger event
                    tv.handel();
                }
            }, 16.7);
            call.call(tv);
        },tv.op.delta);

    }
    
    //Setting switches and callback functions
    avpro.switchAV = function(sw,fn){
        if(sw == undefined ? this.op.sw:sw){
            this.startAv();
        }else{
            this.stopAv();
        }
        fn && (this.op.fn = fn);
    }
    
    avpro.stopAv = function(){
        this.zt && clearTimeout(this.zt);
    }
    
    //Trigger event
    avpro.handel = function() {
        var tv = this;
        if(tv.t) {
            return;
        }
        console.log(tv.fn);
        tv.op.fn && tv.fn(tv.curFrame);
        tv.t = setTimeout(function() {
            tv.t = null;
        },tv.op.die);
        
    }
    
    avpro.getCurFrame = function(){
        return this.curFrame;
    }

    //Capture and save frames
    avpro.avSaveFrame = function() {
        //Frame replacement
        this.preFrame = this.curFrame;
        this.acCanvas.drawImage(this.video, 0, 0, this.w, this.h);
        //Go to Base64 and save the current frame
        this.curFrame = this.canvas.toDataURL();
    }

    //Draw Base64 image onto canvas
    avpro.drawImg = function(src, ctx) {
        ctx = ctx || this.bwCanvas;
        var img = new Image();
        img.src = src;
        ctx.drawImage(img, 0, 0 ,this.w, this.h);
        
    }

    //Difference between two frames before and after rendering
    avpro.renderDiff = function() {
        if(!this.preFrame || !this.curFrame) return;
        this.bwCanvas.clearRect(0, 0, this.w, this.h);
        this.drawImg(this.preFrame);
        this.drawImg(this.curFrame);
        //The imagedata object that captures the difference frame
        this.diffFrame = this.bwCanvas.getImageData(0, 0, this.w, this.h);
    }

    //Calculation difference
    avpro.calcDiff = function() {
        if(!this.diffFrame) return 0;
        var cache = arguments.callee,
            count = 0;
        cache.total = cache.total || 0; // The sum of the values of all pixels when the entire canvas is white
        for(var i = 0, l = this.diffFrame.width * this.diffFrame.height * 4; i < l; i += 4) {
            count += this.diffFrame.data[i] + this.diffFrame.data[i + 1] + this.diffFrame.data[i + 2];
            if(! Cache. Isloopever) {// just execute in the first loop
                cache.total += 255 * 3; // Single white pixel value
            }
        }
        cache.isLoopEver = true;
        count *= 3; // Brightness amplification
        //Returns the proportion of "total pixel value of highlighted part of difference canvas" to "total pixel value of fully lit canvas"
        return Number(count / cache.total).toFixed(2);
    }

    var nav = null;
    window.Av = function(op) {
        return nav || (nav = new av(op));
    };

}())

Last words

Before writing, I feel like I’m going to write an unparalleled article.
Stay evil after writing: write like a daily account.

Finally, I hope Baoqiang can get better.

Recommended Today

Monkey patch monkey patch programming method and its application in Ruby

What ismonkey patch (Monkey Patch)? In a dynamic language, functions are added and changed without modifying the source code. Purpose of using monkey patch:1. Additional function2. Function change3. Fix program errors4. Add a hook to execute some other processing while executing a method, such as printing logs, realizing AOP, etc,5. Cache, when the amount of […]