www.xbdev.net
xbdev - software development
Thursday February 19, 2026
Home | Contact | Support | WebGPU Graphics and Compute ... | WebGPU.. Games, Tutorials, Demos, Projects, and Code.....
     
 

WebGPU..

Games, Tutorials, Demos, Projects, and Code.....

 


Motion Blur


Building on the previous tutorial of storing the rendered output to a texture - we can store a few past frame to create a motion blur effect. Averaging these stored textures creates a `blur` effection if the object is moving around.


Quad if flying around - with motion blur begin shown - we show the motion blur on the right side, and without on the left - jus...
Quad if flying around - with motion blur begin shown - we show the motion blur on the right side, and without on the left - just so you can compare.


Functions Used: setVertexBuffer(), setIndexBuffer(), drawIndexed(), createBuffer(), getMappedRange(), getContext(), requestAdapter(), getPreferredCanvasFormat(), createCommandEncoder(), beginRenderPass(), setPipeline(), draw(), end(), submit(), getCurrentTexture(), createView(), createShaderModule()

The scene is reguarly rendered to an offscreen texture - however, the texture is copied and saved (keep 4 past copies). This is stored in an 'offscreen' rendering pass. It renders takes the average of the 4 stored textures.


The off screen quad that stores 4 textures and renders full screen. Just to emphasis the differences - the left and right side of the screen are drawn with and without motion blue. We also draw a small black line down the middle of the screen.

This is all done in the fragment shader - using the UV coordinates. As it's a full screen quad - the uv coordinates go from (0,0) to (1,1) for the full screen so we can easily calculate the middle.

fullscreenquad = function()
{
  
const 
1.0;
this.positions =  new Float32Array([-s,  s,   0,  
                                    -
s, -s,  0,   
                                     
s, -s,  0,   
                                     
s,  s,   ]);

this.indices = new Uint32Array([ 0,1,2,    2,3,]);
  
this.uvs  = new Float32Array([0,01,01,10,]);

this.create async function(devicepresentationFormatpresentationSize)
{
this.positionBuffer device.createBuffer({
  
size:  this.positions.byteLength,
  
usageGPUBufferUsage.VERTEX GPUBufferUsage.COPY_DST
});

this.uvBuffer device.createBuffer({
  
size:  this.uvs.byteLength,
  
usageGPUBufferUsage.VERTEX GPUBufferUsage.COPY_DST
});

this.indicesBuffer device.createBuffer({
  
size:  this.indices.byteLength,
  
usageGPUBufferUsage.INDEX GPUBufferUsage.COPY_DST
});

device.queue.writeBuffer(this.positionBuffer0this.positions);
device.queue.writeBuffer(this.indicesBuffer 0this.indices  );
device.queue.writeBuffer(this.uvBuffer      0this.uvs      );

// var vertWGSL = document.getElementById('vertex.wgsl').innerHTML;
// var fragWGSL = document.getElementById('fragment.wgsl').innerHTML;

var vertWGSL = `
struct VSOut {
    @builtin(position) Position: vec4<f32>,
    @location(0)       uvs     : vec2<f32>,
};

@vertex 
fn main(@location(0) inPos  : vec3<f32>,
        @location(1) uvs    : vec2<f32>) -> VSOut  

  var vsOut: VSOut;
  vsOut.Position = vec4<f32>( inPos, 1.0);
  vsOut.uvs      = uvs;
  return vsOut;
}
`;
  
var 
fragWGSL = `
@group(0) @binding(0) var mySampler: sampler;
@group(0) @binding(1) var myTexture0: texture_2d<f32>;
@group(0) @binding(2) var myTexture1: texture_2d<f32>;
@group(0) @binding(3) var myTexture2: texture_2d<f32>;
@group(0) @binding(4) var myTexture3: texture_2d<f32>;

@fragment
fn main( @location(0) uvs : vec2<f32> ) -> @location(0) vec4<f32> 
{
    let texCol0 = textureSample(myTexture0, mySampler, uvs ).xyz;
    let texCol1 = textureSample(myTexture1, mySampler, uvs ).xyz;
    let texCol2 = textureSample(myTexture2, mySampler, uvs ).xyz;
    let texCol3 = textureSample(myTexture3, mySampler, uvs ).xyz;
    
    let texCol = texCol0*0.25 + texCol1*0.25 + texCol2*0.25 + texCol3*0.25;
    
    // draw a small black line down the middle of the screen.
    if ( uvs.y >0.499 && uvs.y < 0.50 )
    {
        return vec4<f32>(0,0,0,1.0);
    }
    // only draw half the screen using motion blur to compare with and without
    if ( uvs.y < 0.5 )
    {
        return vec4<f32>(texCol0, 1.0);
    }
    
    return vec4<f32>(texCol, 1.0);
}
`;

// ----------------------------------------------------------------

let textureSampler device.createSampler({
     
minFilter"linear",
     
magFilter"linear",
});

this.basicTextures = [];
  
for (
let bs=0bs<4bs++)
{
    
let basicTexture device.createTexture({
        
size: [presentationSize[0], presentationSize[1], 1],
        
formatpresentationFormat // "bgra8unorm",
        //usage:  GPUTextureUsage.COPY_DST | GPUTextureUsage.TEXTURE_BINDING
        
usageGPUTextureUsage.COPY_SRC GPUTextureUsage.COPY_DST GPUTextureUsage.TEXTURE_BINDING
    
});
    
this.basicTextures.pushbasicTexture );


// ----------------------------------------------------------------

this.sceneUniformBindGroupLayout device.createBindGroupLayout({
  
entries: [
    { 
binding0visibilityGPUShaderStage.FRAGMENTsampler: { type"filtering"}   },
    { 
binding1visibilityGPUShaderStage.FRAGMENTtexture: { sampleType"float"viewDimension"2d"} },
    { 
binding2visibilityGPUShaderStage.FRAGMENTtexture: { sampleType"float"viewDimension"2d"} },
    { 
binding3visibilityGPUShaderStage.FRAGMENTtexture: { sampleType"float"viewDimension"2d"} },
    { 
binding4visibilityGPUShaderStage.FRAGMENTtexture: { sampleType"float"viewDimension"2d"} },
  ]
});

this.uniformBindGroup device.createBindGroup({
  
layout:   this.sceneUniformBindGroupLayout,
  
entries: [
    { 
binding 0resourcetextureSampler },
    { 
binding 1resourcethis.basicTextures[0].createView()  },
    { 
binding 2resourcethis.basicTextures[1].createView()  },
    { 
binding 3resourcethis.basicTextures[2].createView()  },
    { 
binding 4resourcethis.basicTextures[3].createView()  },
   ],
});
  
// ----------------------------------------------------------------

this.pipeline device.createRenderPipeline({
    
layoutdevice.createPipelineLayout({bindGroupLayouts: [this.sceneUniformBindGroupLayout]}),
    
vertex:   {  module    device.createShaderModule({  code vertWGSL }),
                 
entryPoint'main',
                 
buffers    : [ { arrayStride12attributes: [{ shaderLocation0format"float32x3",offset0  }]        },
                                { 
arrayStride8,  attributes: [{ shaderLocation1format"float32x2",offset0  }]        },
                              ]},
    
fragment: {  module    device.createShaderModule({ 
                             
code fragWGSL,     }),
                 
entryPoint'main',
                 
targets: [{  format presentationFormat  }] },
    
primitive: { topology  'triangle-list',
                 
frontFace "ccw",
                 
cullMode  'none',
                 
stripIndexFormatundefined },
});

}
// end create

// ----------------------------------------------------------------
  
this.frameIndex 0;

this.copyTexture async function( devicetexFramepresentationSize )
{
  
// copy the texture to the basicTexture
  
this.frameIndex++;
  
this.frameIndex this.frameIndex 4;


  const 
commandEncoder device.createCommandEncoder();

  
//copyTextureToTexture(source, destination, copySize)
  
commandEncoder.copyTextureToTexture({ texture:texFrame }, 
                                      { 
texture:this.basicTextures[this.frameIndex] },
                                      { 
width:presentationSize[0], 
                                        
height:presentationSize[1], 
                                        
depthOrArrayLayers:} );

  
// Submit GPU commands.
  
const gpuCommands commandEncoder.finish();
  
await device.queue.submit([gpuCommands]);
}
  
// ---------------------------------------------------------------- 
  
this.draw async function( devicecontext 
{

  
// GPURenderPassDescriptor 
  
this.renderPassDescriptor = { 
        
colorAttachments:  [{    
             
view     undefined// asign later in frame
             
loadOp:   "clear"
             
clearValue: { r0.2g0.2b0.2a1.0 },
             
storeOp  'store' }],
  };
  
  
// --------------------------------------------------
  
this.renderPassDescriptor.colorAttachments[0].view context.getCurrentTexture().createView();

  const 
commandEncoder device.createCommandEncoder();

  const 
renderPass commandEncoder.beginRenderPass(this.renderPassDescriptor);
  
renderPass.setPipeline(this.pipeline);
  
renderPass.setBindGroup(0this.uniformBindGroup);
  
renderPass.setVertexBuffer(0this.positionBuffer);
  
renderPass.setVertexBuffer(1this.uvBuffer);
  
renderPass.setIndexBuffer(this.indicesBuffer'uint32');
  
renderPass.drawIndexed(6100);
  
renderPass.end();
  
device.queue.submit([commandEncoder.finish()]);
  
}
// end render(..)
  

}// end fullscreenquad


The body of the code that initialized WebGPU API and draws the simple scene (square with a texture on it moving around).


// Load matrix library on dynamically (on-the-fly)
let matprom await fetch'https://cdnjs.cloudflare.com/ajax/libs/gl-matrix/2.6.0/gl-matrix-min.js' );
let mattex  await matprom.text();
var 
script   document.createElement('script');
script.type  'text/javascript';
script.innerHTML mattex;
document.head.appendChild(script); 

// -------------
let canvas document.createElement('canvas');
document.body.appendChildcanvas ); canvas.height=canvas.width=512;

const 
context canvas.getContext('webgpu');
const 
adapter await navigator.gpu.requestAdapter();
const 
device  await adapter.requestDevice();
const 
presentationFormat navigator.gpu.getPreferredCanvasFormat(); 
context.configure({ devicedeviceformatpresentationFormat  });

const 
presentationSize   = [ canvas.widthcanvas.height ];

async function loadTexturefileName "https://webgpulab.xbdev.net/var/images/test512.png" )
{
  
console.log('loading image:'fileName );
  
// Load image 
  
const img document.createElement("img");
  
img.src fileName;

  
await Promise.all([
    
img.decode()
  ]);

  
let imgWidth  img.width;
  
let imgHeight img.height;

  const 
imageCanvas document.createElement('canvas');
  
imageCanvas.width =  imgWidth;
  
imageCanvas.height imgHeight;
  const 
imageCanvasContext imageCanvas.getContext('2d');
  
imageCanvasContext.drawImage(img00imgWidthimgHeight);
  const 
imageData imageCanvasContext.getImageData(00imgWidthimgHeight);
  
let textureDataimageData.data;
  
console.log('textureData.byteLength:'textureData.byteLength );

  
// Create a texture and a sampler using WebGPU
  
const sampler device.createSampler({
    
minFilter"linear",
    
magFilter"linear"  
  
});

  const 
basicTexture device.createTexture({
    
size: [imgWidthimgHeight1],
    
format"rgba8unorm",
    
usageGPUTextureUsage.COPY_DST GPUTextureUsage.TEXTURE_BINDING
  
});

  
await
  device
.queue.writeTexture(
      { 
texture:basicTexture },
      
textureData,
      { 
bytesPerRowimgWidth },
      [ 
imgWidthimgHeight]
  );
  return { 
w:imgWidthh:imgHeights:samplert:basicTexture };
}
// end loadTexture(..)

function createTexturedSquaredevice )
{
  const 
0.7;
  
let positionVertex = new Float32Array([
     
s,    s,   0.0,
    -
s,    s,   0.0,
     
s,   -s,   0.0,
    -
s,   -s,   0.0
  
]);
  const 
vBuffer device.createBuffer({ size:  positionVertex.byteLength,
                                        
usageGPUBufferUsage.VERTEX GPUBufferUsage.COPY_DST });
  
device.queue.writeBuffer(vBuffer0positionVertex);
  
  
let uvVertex = new Float32Array([
     
1.0,   0.0,
     
0.0,   0.0,
     
1.0,   1.0,
     
0.0,   1.0,
  ]);
  const 
uvBuffer device.createBuffer({ size:  uvVertex.byteLength,
                                        
usageGPUBufferUsage.VERTEX GPUBufferUsage.COPY_DST });
  
device.queue.writeBuffer(uvBuffer0uvVertex);
  
  
// return the vertex and texture buffers
  
return { v:vBuffert:uvBuffer };
}

function 
createMatrixUniformmatrixUniformBuffer=0camx=0camy=0camz=)
{
  
// Create the matrix in Javascript (using matrix library)
  
const projectionMatrix     mat4.create();
  const 
viewMatrix           mat4.create();
  const 
viewProjectionMatrix mat4.create();
  
  
mat4.perspective(projectionMatrixMath.PI 2canvas.width canvas.height0.001500.0)
  
mat4.lookAt(viewMatrix, [camxcamycamz],  [000], [010]);
  
mat4.multiply(viewProjectionMatrixprojectionMatrixviewMatrix);
  
  
// Create a buffer using WebGPU API (copy matrix into it)
  
if ( matrixUniformBuffer == )
  
matrixUniformBuffer device.createBuffer({
     
sizeviewProjectionMatrix.byteLength ,
     
usageGPUBufferUsage.UNIFORM GPUBufferUsage.COPY_DST
  
});
  
device.queue.writeBuffer(matrixUniformBuffer0viewProjectionMatrix );

  return {
matrixUniformBuffer:matrixUniformBufferviewMatrix:viewMatrixprojectionMatrix:projectionMatrix };
}

let shaderWGSL = `
@group(0) @binding(0) var<uniform> viewProjectionmMatrix : mat4x4<f32>;

struct vsout {
    @builtin(position) Position: vec4<f32>,
    @location(0)       uvs     : vec2<f32>
};

@vertex 
fn vsmain(@location(0) pos : vec3<f32>,
          @location(1) uvs : vec2<f32>) -> vsout

    var r:vsout;
    r.Position = viewProjectionmMatrix * vec4<f32>(pos, 1.0);
    r.uvs      = uvs;
    return r;
}

@group(0) @binding(1) var mySampler: sampler;
@group(0) @binding(2) var myTexture: texture_2d<f32>;

@fragment 
fn psmain(@location(0) uvs: vec2<f32>) -> @location(0) vec4<f32> 
{
    var texCol = textureSample(myTexture, mySampler, uvs );
    return vec4<f32>( texCol.xyz, 0.5 );
    //return vec4<f32>(1.0, 0.0, 0.5, 1.0);
}
`;

const 
textureData         await loadTexture( );
const 
squareBuffer        createTexturedSquaredevice );
const 
matrixUniformBuffer createMatrixUniform().matrixUniformBuffer;
const 
shaderModule        device.createShaderModule({ code shaderWGSL });

// Define the layout information for the shader (uniforms)
const sceneUniformBindGroupLayout device.createBindGroupLayout({
  
entries: [{ binding0visibilityGPUShaderStage.VERTEXbuffer: { type"uniform" }      },
            { 
binding1visibilityGPUShaderStage.FRAGMENTsampler: { type"filtering"  } },
            { 
binding2visibilityGPUShaderStage.FRAGMENTtexture: { sampleType"float"viewDimension"2d"} },
           ]
});

const 
sceneUniformBindGroup device.createBindGroup({
  
layoutsceneUniformBindGroupLayout,
  
entries: [{ binding:  0resource: { buffermatrixUniformBuffer }    },
            { 
binding 1resourcetextureData.s                  },
            { 
binding 2resourcetextureData.t.createView()     },
           ]
});

const 
pipeline device.createRenderPipeline({
  
layoutdevice.createPipelineLayout({bindGroupLayouts: [sceneUniformBindGroupLayout]}),
  
vertex:      {   moduleshaderModuleentryPoint'vsmain'
                   
buffers: [
                            { 
arrayStride4*3,attributes: [ {shaderLocation0offset0format'float32x3' } ] },
                            { 
arrayStride4*2,attributes: [ {shaderLocation1offset0format'float32x2' } ] }
                            ]
               },
  
fragment:    {   moduleshaderModuleentryPoint'psmain',
                   
targets: [ { formatpresentationFormat } ]
               }, 
  
primitive:   {   topology'triangle-strip' },
});

// Graphics buffer texture render targets
const screenTexture0 device.createTexture({
    
sizepresentationSize,
   
// usage: 0x10|0x04, //  GPUTextureUsage.RENDER_ATTACHMENT|GPUTextureUsage.TEXTURE_BINDING,
    
usageGPUTextureUsage.RENDER_ATTACHMENT GPUTextureUsage.TEXTURE_BINDING GPUTextureUsage.COPY_SRC GPUTextureUsage.COPY_DST,
    
formatpresentationFormat // 'bgra8unorm',
});

const 
screenTexureView0 screenTexture0.createView();


let fullscreen = new fullscreenquad();
await fullscreen.create(devicepresentationFormatpresentationSize);


let counter 0.0;

async function draw() 
{
  
counter += 0.1;
  
let mats createMatrixUniformmatrixUniformBuffer1Math.sincounter ), Math.coscounter*0.1 ) );
  
  {
  const 
commandEncoder device.createCommandEncoder();
  const 
renderPassDescriptor =  { // GPURenderPassDescriptor 
        
colorAttachments: [ { view:screenTexureView0,  loadOp:"clear"clearValue:[0.00.80.81],  storeOp:'store' },
                          ]};
  const 
passEncoder commandEncoder.beginRenderPass(renderPassDescriptor);
  
passEncoder.setViewport(0.0,  0.0,                   // x, y
                          
canvas.widthcanvas.height// width, height
                          
0);                      // minDepth, maxDepth                  
  
passEncoder.setPipeline(pipeline);
  
passEncoder.setVertexBuffer(0squareBuffer.v);
  
passEncoder.setVertexBuffer(1squareBuffer.t);
  
passEncoder.setBindGroup(0sceneUniformBindGroup);
  
passEncoder.draw(4100);
  
passEncoder.end();
  
await device.queue.submit([commandEncoder.finish()]);
  }
  

  
await fullscreen.copyTexturedevicescreenTexture0presentationSize );
  
  
// ------------------------------------------
  
  
fullscreen.drawdevicecontext );

  
requestAnimationFrame(draw);
}
draw();




The method of using past renders (stored to texture) to create a blur motion effect is an accumulation buffer technique. Each frame is rendered with a lower opacity and added to the previous frames in the accumulation buffer. The result is a blurred image that represents the motion of the scene. The advantage of this method is that it is simple and easy to implement. The disadvantage is that it requires a lot of memory and bandwidth, and it can produce ghosting artifacts if the motion is too fast or irregular.


Things to Try


• Add another object to the scene which is not moving (so you can confirm only the moving objects have 'motion blur')
• Try and refactor the offscreen code so it's more flexible (i.e., define a constant to say how many past textures to store)
• Try and keep track of which is the most recent/oldest in the fragment shader and scale the textures non-linearly (more recent texture has higher priorty than older ones)
• Try and mix colors - so the old textures are 'gray scale' - or converted to red/blue - to create a psychedelic effect.
• Use texture transforms - so past echos can be twisted, rotated and stretched slightly (even if not moving - mixed with colors so it's mezmorizing)
• Edge detection - determine which pixels have changed - but focus on ones which are on the edge (i.e., one side is changes and the other isn't - then add some color emphasis)



Resources and Links


• WebGPU API Example [LINK]




















Not the only way to create a motion blue - you could also use a velocity buffer (store the velocity of the object in the render target pixel). The velocity is calculated by comparing the positions of the vertices in the previous and current frames, using a vertex shader. The velocity is then used to sample the previous frame in the direction and magnitude of the movement, using a fragment shader. The result is a blurred image that follows the motion of the scene. The advantage of this method is that it is more efficient and flexible than the accumulation buffer. The disadvantage is that it requires more shader calculations and it can produce noise or artifacts if the velocity is inaccurate or inconsistent.





































WebGPU by Example: Fractals, Image Effects, Ray-Tracing, Procedural Geometry, 2D/3D, Particles, Simulations WebGPU Compute graphics and animations using the webgpu api 12 week course kenwright learn webgpu api kenwright programming compute and graphics applications with html5 and webgpu api kenwright real-time 3d graphics with webgpu kenwright webgpu api develompent a quick start guide kenwright webgpu by example 2022 kenwright webgpu gems kenwright webgpu interactive compute and graphics visualization cookbook kenwright wgsl webgpu shading language cookbook kenwright wgsl webgpugems shading language cookbook kenwright



 
Advert (Support Website)

 
 Visitor:
Copyright (c) 2002-2025 xbdev.net - All rights reserved.
Designated articles, tutorials and software are the property of their respective owners.