www.xbdev.net
xbdev - software development
Tuesday April 29, 2025
Home | Contact | Support | WebGPU Graphics and Compute ... | WebGPU 'Compute'.. Compute, Algorithms, and Code.....
     
 

WebGPU 'Compute'..

Compute, Algorithms, and Code.....

 


Image Filtering


Very easy to take an image and perform filtering effects. As shown here, we can blur, sharpen or do an edge detection - a great thing is you can also 'combine' them in different ways.


Image filtering - bluring, sharpening, edge detection and combination of them.
Image filtering - bluring, sharpening, edge detection and combination of them.


Functions Used: requestAdapter(), getPreferredCanvasFormat(), createCommandEncoder(), beginRenderPass(), setPipeline(), draw(), end(), submit(), getCurrentTexture(), createView(), createShaderModule()





Complete Code


let div document.createElement('div');
document.body.appendChilddiv );
div.style['font-size'] = '20pt';
function 
log)
{
  
console.log);
  
let args = [...arguments].join(' ');
  
div.innerHTML += args '<br><br>';
}

log('WebGPU Compute Example');

async function loadTexture(fileName "https://webgpulab.xbdev.net/var/images/test512.png"
                           
width=512height=512) {
  
console.log('loading image:'fileName);
  
// Load image 
  
const img document.createElement("img");
  
img.src fileName;

  
await img.decode();

  const 
originalWidth img.width;
  const 
originalHeight img.height;

  const 
imageCanvas document.createElement('canvas');
  
imageCanvas.width width;
  
imageCanvas.height height;
  const 
imageCanvasContext imageCanvas.getContext('2d');

  
// Draw the image onto the canvas, resizing it to the specified width and height
  
imageCanvasContext.drawImage(img00widthheight);

  const 
imageData imageCanvasContext.getImageData(00widthheight);
  const 
textureData imageData.data;
  
console.log('textureData.byteLength:'textureData.byteLength);

  const 
basicTexture device.createTexture({
    
size: [widthheight1],
    
format"rgba8unorm",
    
usageGPUTextureUsage.COPY_DST GPUTextureUsage.TEXTURE_BINDING
  
});

  
await device.queue.writeTexture(
    { 
texturebasicTexture },
    
textureData,
    { 
bytesPerRowwidth },
    [
widthheight1]
  );

  return { 
wwidthhheighttbasicTexture };
}


if (!
navigator.gpu) { log("WebGPU is not supported (or is it disabled? flags/settings)"); return; }

const 
adapter await navigator.gpu.requestAdapter();
const 
device  await adapter.requestDevice();

const 
imgWidth 512;
const 
imgHeight imgWidth;

// ----------------------------------------------------------

const texture0         await loadTexture'https://webgpulab.xbdev.net/var/images/test512.png'imgWidth );
//const texture1         = await loadTexture( 'https://webgpulab.xbdev.net/var/images/avatar.png', imgWidth);

// ----------------------------------------------------------

// Basic canvas which will be used to display the output from the compute shader

let canvasa document.createElement('canvas');
document.body.appendChildcanvasa ); canvasa.height canvasa.width imgWidth;
const 
context canvasa.getContext('webgpu');
const 
presentationFormat navigator.gpu.getPreferredCanvasFormat(); 
console.log('presentationFormat:'presentationFormat );

context.configure({ devicedevice
                    
usageGPUTextureUsage.RENDER_ATTACHMENT GPUTextureUsage.COPY_SRC GPUTextureUsage.COPY_DST,
                    
format"rgba8unorm" /*presentationFormat*/  });

let canvasTexture context.getCurrentTexture();

// ----------------------------------------------------------

// Output texture - output from the compute shader written to this texture
// Copy this texutre to the 'canvas' - needs to be the same size as the output
// canvas size
const texture1 device.createTexture({
  
size: [imgWidthimgHeight1],
  
format"rgba8unorm",
  
usageGPUTextureUsage.COPY_DST GPUTextureUsage.COPY_SRC GPUTextureUsage.TEXTURE_BINDING GPUTextureUsage.STORAGE_BINDING
});

// ----------------------------------------------------------

const timerUniformBuffer device.createBuffer({
  
size4
  
usageGPUBufferUsage.UNIFORM GPUBufferUsage.COPY_DST
});

const 
timestep  = new Float32Array( [0.0] );

device.queue.writeBuffer(timerUniformBuffer,   0timestep             );

// ----------------------------------------------------------

const GCOMPUTE GPUShaderStage.COMPUTE;

// Bind group layout and bind group
const bindGroupLayout device.createBindGroupLayout({
  
entries: [ {binding0visibilityGCOMPUTEtexture: { sampleType"float" }   }, 
             {
binding1visibilityGCOMPUTEbuffer:  { type"uniform"     }   },
             {
binding2visibilityGCOMPUTEstorageTexture: {format:"rgba8unorm"access:"write-only"viewDimension:"2d"}   }
           ]
});

const 
bindGroup device.createBindGroup({
    
layoutbindGroupLayout,
    
entries: [  {   binding0,  resourcetexture0.t.createView()         },
                  {   
binding1,  resource: { buffertimerUniformBuffer  } },
                {   
binding2,  resourcetexture1.createView()           }
    ]
});

// Compute shader code
const computeShader = ` 
@group(0) @binding(0) var myTexture0:  texture_2d<f32>; // input texture image
@group(0) @binding(1) var<uniform>     mytimer   : f32; // timer increments each frame
@group(0) @binding(2) var myTexture1:  texture_storage_2d<rgba8unorm, write>; // output image


@compute @workgroup_size(8, 8)
fn main(@builtin(global_invocation_id) globalId      : vec3<u32>,
        @builtin(local_invocation_id)  localId       : vec3<u32>,
        @builtin(workgroup_id)         workgroupId   : vec3<u32>,
        @builtin(num_workgroups)       workgroupSize : vec3<u32>
        ) 
{
    var imgWidth  = f32( 
${imgWidth}  ); 
    var imgHeight = f32( 
${imgHeight} ); 

    var coords = vec2<f32>( f32(globalId.x), f32(globalId.y) );
    var uv = coords / vec2<f32>(imgWidth, imgHeight); // normalize coordinates to 0.0 - 1.0 range

    // -------------------------------------------
    
    var blurColor = vec4<f32>(0.0);
    {
    // Gaussian Blur Kernel (3x3)
    let blurKernel: array<array<f32, 3>, 3> = array<array<f32, 3>, 3>(
        array<f32, 3>(0.0625, 0.125, 0.0625),
        array<f32, 3>(0.125, 0.25, 0.125),
        array<f32, 3>(0.0625, 0.125, 0.0625)
    );

    for (var i = -1; i <= 1; i++) {
        for (var j = -1; j <= 1; j++) {
            var sampleUv = uv + vec2<f32>(f32(i) / imgWidth, f32(j) / imgHeight);
            sampleUv = clamp(sampleUv, vec2<f32>(0.0), vec2<f32>(1.0));
            blurColor += textureLoad(myTexture0, vec2<i32>(sampleUv * vec2<f32>(imgWidth, imgHeight)), 0) * blurKernel[i + 1][j + 1];
        }
    }
    }
    
    // -------------------------------------------
    
    var sharpenColor = vec4<f32>(0.0);
    {
    // Sharpening Kernel (3x3)
    let sharpenKernel: array<array<f32, 3>, 3> = array<array<f32, 3>, 3>(
        array<f32, 3>(-1.0, -1.0, -1.0),
        array<f32, 3>(-1.0,  9.0, -1.0),
        array<f32, 3>(-1.0, -1.0, -1.0)
    );

    for (var i = -1; i <= 1; i++) {
        for (var j = -1; j <= 1; j++) {
            var sampleUv = uv + vec2<f32>(f32(i) / imgWidth, f32(j) / imgHeight);
            sampleUv = clamp(sampleUv, vec2<f32>(0.0), vec2<f32>(1.0));
            sharpenColor += textureLoad(myTexture0, vec2<i32>(sampleUv * vec2<f32>(imgWidth, imgHeight)), 0) * sharpenKernel[i + 1][j + 1];
        }
    }
    }

    var edgeColor = vec4<f32>(0.0);
    {
    // Sobel Edge Detection (3x3)
    let sobelKernelX: array<array<f32, 3>, 3> = array<array<f32, 3>, 3>(
        array<f32, 3>(-1.0, 0.0, 1.0),
        array<f32, 3>(-2.0, 0.0, 2.0),
        array<f32, 3>(-1.0, 0.0, 1.0)
    );

    let sobelKernelY: array<array<f32, 3>, 3> = array<array<f32, 3>, 3>(
        array<f32, 3>(-1.0, -2.0, -1.0),
        array<f32, 3>(0.0,  0.0,  0.0),
        array<f32, 3>(1.0,  2.0,  1.0)
    );

    var edgeColorX = vec4<f32>(0.0);
    var edgeColorY = vec4<f32>(0.0);
    for (var i = -1; i <= 1; i++) {
        for (var j = -1; j <= 1; j++) {
            var sampleUv = uv + vec2<f32>(f32(i) / imgWidth, f32(j) / imgHeight);
            sampleUv = clamp(sampleUv, vec2<f32>(0.0), vec2<f32>(1.0));
            var sampleColor = textureLoad(myTexture0, vec2<i32>(sampleUv * vec2<f32>(imgWidth, imgHeight)), 0);
            edgeColorX += sampleColor * sobelKernelX[i + 1][j + 1];
            edgeColorY += sampleColor * sobelKernelY[i + 1][j + 1];
        }
    }

    edgeColor = sqrt(edgeColorX * edgeColorX + edgeColorY * edgeColorY);
    }
    
    // -------------------------------------------
    
    var finalColor = vec4<f32>(0);
    
    // 4 corners of the output image - each with a different effect
    if      ( uv.x < 0.5 && uv.y < 0.5 ) { finalColor = blurColor;    } // top left blur
    else if ( uv.x > 0.5 && uv.y < 0.5 ) { finalColor = sharpenColor; } // top right sharpen
    else if ( uv.x < 0.5 && uv.y > 0.5 ) { finalColor = edgeColor;    } // bottom left edge
    else    { finalColor = blurColor * 0.3 + sharpenColor * 0.4 + edgeColor * 0.3; } // bottom right combine effects

    // Store the result in the output texture
    textureStore(myTexture1, vec2<i32>(globalId.xy), finalColor);
    
    

}
`;

// Pipeline setup
const computePipeline device.createComputePipeline({
    
layout :   device.createPipelineLayout({bindGroupLayouts: [bindGroupLayout]}),
    
compute: { module    device.createShaderModule({code:computeShader}),
               
entryPoint"main" }
});


async function frame()
{
  
  
// Commands submission
  
const commandEncoder device.createCommandEncoder();
  const 
passEncoder commandEncoder.beginComputePass();
  
passEncoder.setPipeline(computePipeline);
  
passEncoder.setBindGroup(0bindGroup);
  
passEncoder.dispatchWorkgroupsimgWidth/8imgWidth/);
  
await passEncoder.end();


  
canvasTexture context.getCurrentTexture();
  
  
await
  commandEncoder
.copyTextureToTexture( { texturetexture1 },
                                       { 
texturecanvasTexture },
                                       { 
width:imgWidthheight:imgHeightdepthOrArrayLayers:1} );

  
// Submit GPU commands.
  
const gpuCommands commandEncoder.finish();
  
await device.queue.submit([gpuCommands]);

  
timestep[0] = timestep[0] + 0.01;
  
device.queue.writeBuffer(timerUniformBuffer,   0timestep             );

  
  
requestAnimationFrame(frame);
}

frame();





Things to Try


• Modify the coefficients for the filtering effects
• Try other kernel filters
• Create multiple filter functions and connect them together (combine filters)











Resources and Links


• WebGPU Lab Demo [LINK]










WebGPU by Example: Fractals, Image Effects, Ray-Tracing, Procedural Geometry, 2D/3D, Particles, Simulations WebGPU Compute graphics and animations using the webgpu api 12 week course kenwright learn webgpu api kenwright programming compute and graphics applications with html5 and webgpu api kenwright real-time 3d graphics with webgpu kenwright webgpu api develompent a quick start guide kenwright webgpu by example 2022 kenwright webgpu gems kenwright webgpu interactive compute and graphics visualization cookbook kenwright wgsl webgpu shading language cookbook kenwright wgsl webgpugems shading language cookbook kenwright



 
Advert (Support Website)

 
 Visitor:
Copyright (c) 2002-2025 xbdev.net - All rights reserved.
Designated articles, tutorials and software are the property of their respective owners.