webcodecs mix-blend-mode

_clai發表於2024-05-06

WebCodecs mix-blend-mode: screen 混合模式


<!DOCTYPE html>
<html lang="en">
  <head>
    <meta charset="UTF-8" />
    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
    <title>Document</title>
    <style>
      .bg,
      .video,
      .canvas {
        width: 200px;
        height: auto;
        object-fit: contain;
        box-sizing: border-box;
      }
    </style>
  </head>
  <body>
    <img src="./school_overcast-s.jpg" alt="" class="bg" />
    <video src="./fire.mp4" controls autoplay loop muted class="video"></video>
    <canvas class="canvas"></canvas>

    <script src="./mp4box.min.js"></script>
    <script>
      const imgBg = document.querySelector('.bg');
      /** @type {HTMLVideoElement} */
      const video = document.querySelector('.video');
      /** @type {HTMLCanvasElement} */
      const canvas = document.querySelector('.canvas');

      /** @type {CanvasRenderingContext2D} */
      const ctx = canvas.getContext('2d');

      const mp4box = MP4Box.createFile();
      // console.log("mp4box => ", mp4box)

      // 解碼的影片軌道
      let videoTrack = null,
        videoDecoder = null;
      // 解碼的影片畫面序列檔案
      const videoFrames = [];

      let nbSampleTotal = 0,
        countSample = 0;

      mp4box.onError = (e) => {
        console.error('Error:', e);
      };
      mp4box.onReady = (info) => {
        console.log('Info:', info);
        videoTrack = info.videoTracks[0];

        if (videoTrack) {
          // 提取給定 `track id` 的軌道樣本
          mp4box.setExtractionOptions(videoTrack.id, 'video', {
            // 每次回撥呼叫的樣本數
            nbSamples: 100,
          });
        }

        // 設定影片解碼器
        videoDecoder = new VideoDecoder({
          async output(videoFrame) {
            // console.log('videoFrame => ', videoFrame);

            const img = await createImageBitmap(videoFrame);
            videoFrames.push({
              img,
              duration: videoFrame.duration,
              timestamp: videoFrame.timestamp,
            });
            videoFrame.close();
          },
          error(err) {
            console.log('videoDecoder error => ', err);
          },
        });

        nbSampleTotal = videoTrack.nb_samples;

        videoDecoder.configure({
          codec: videoTrack.codec,
          codedWidth: videoTrack.track_width,
          codedHeight: videoTrack.track_height,
          description: getExtraData(),
        });

        mp4box.start();
      };
      mp4box.onSamples = (tranckId, ref, samples) => {
        // console.log('Samples:', tranckId, ref, samples);
        // samples 採集的資料
        if (videoTrack.id === tranckId) {
          mp4box.stop();

          countSample += samples.length;

          for (const { is_sync, duration, data, cts } of samples) {
            const type = is_sync ? 'key' : 'delta';

            const chunk = new EncodedVideoChunk({
              type,
              timestamp: cts,
              duration,
              data,
            });

            videoDecoder.decode(chunk);
          }

          if (countSample === nbSampleTotal) {
            videoDecoder.flush();
          }
        }
      };

      /**
       * 生成 `VideoDecoder`.configure() 引數的 `description` 資訊
       */
      function getExtraData() {
        const entry = mp4box.moov.traks[0].mdia.minf.stbl.stsd.entries[0];

        const box = entry.avcC ?? entry.hvcC ?? entry.vpcC;
        if (box) {
          const stream = new DataStream(undefined, 0, DataStream.BIG_ENDIAN);

          box.write(stream);
          return new Uint8Array(stream.buffer.slice(8));
        }
      }

      let index = 0;
      /**
       * @param {DOMHighResTimeStamp} now
       * @param {VideoFrameCallbackMetadata} metadata
       */
      function drawFrame() {
        const { img, timestamp, duration } = videoFrames[index];
        ctx.clearRect(0, 0, canvas.width, canvas.height);

        ctx.globalCompositeOperation = 'source-over';
        // 繪製背景
        ctx.drawImage(imgBg, 0, 0, canvas.width, canvas.height);

        // 使用 screen 混合模式
        ctx.globalCompositeOperation = 'screen';
        ctx.drawImage(img, 0, 0, canvas.width, canvas.height);

        index++;

        if (index === videoFrames.length) {
          index = 0;
        }

        video.requestVideoFrameCallback(drawFrame);
      }

      fetch('./fire.mp4')
        .then((res) => res.arrayBuffer())
        .then((buffer) => {
          buffer.fileStart = 0;

          mp4box.appendBuffer(buffer);
          mp4box.flush();
        });

      video.addEventListener('loadedmetadata', () => {
        canvas.width = video.videoWidth;
        canvas.height = video.videoHeight;

        video.requestVideoFrameCallback(drawFrame);
      });
    </script>
  </body>
</html>