www.xbdev.net
xbdev - software development
Thursday February 19, 2026
Home | Contact | Support | WebGPU Graphics and Compute ... | WebGPU.. Games, Tutorials, Demos, Projects, and Code.....
     
 

WebGPU..

Games, Tutorials, Demos, Projects, and Code.....

 


Multi-Texturing


We're now starting to open pandoras box of textures! As multiple textures mean you can combine textures - both loaded ones and generated ones - you can also mix transforms so the different textures are manimpulated in different ways.

These textures are eventually combined to create an infinite number of possibilities. For example, loading in a brick surface and mixing in dirt and scratches on top.


Simple multitexturing example - combining two textures together to create a new result.
Simple multitexturing example - combining two textures together to create a new result.


Functions Used: getContext(), requestAdapter(), getPreferredCanvasFormat(), createCommandEncoder(), beginRenderPass(), setPipeline(), draw(), end(), submit(), getCurrentTexture(), createView(), createShaderModule()

The simple fragment shader below has two textures (myTexture0 and myTexture1). We use the first texture as the main color and the second texture contains some scratches and noise that we want to overlay.

We scale and clamp the noise texture and use only one of the components - the texture is black and white (so we only need the red component);


@group(0) @binding(1) var mySamplersampler;
@
group(0) @binding(2) var myTexture0texture_2d<f32>;
@
group(0) @binding(3) var<uniformmyTimer  f32;
@
group(0) @binding(4) var myTexture1texture_2d<f32>;

@
fragment 
fn psmain(@location(0uvvec2<f32>) -> @location(0vec4<f32
{    

    var 
texCol0 textureSample(myTexture0mySampleruv ).xyz;
    
    var 
texCol1 textureSample(myTexture1mySampleruv ).xyz;
    
    var 
col texCol0 clamp(texCol1.r*1.50.01.0);
    
    return 
vec4<f32>( col1.0 ); // set alpha to 1.0 so there isn't any transparncy

}



Example shows the original texture and a scratch texture which are combined to produce a scratched old texture.
Example shows the original texture and a scratch texture which are combined to produce a scratched old texture.



Textures can come in many formats - for 32bit rgba and single 8bit gray scale - this can be accomodated for in the file and the shader so your loading and rendering is more efficient (just default to 32bit so it's easier for the examples).


The full code for the example is given below.

// Load matrix library on dynamically (on-the-fly)
let matprom await fetch'https://cdnjs.cloudflare.com/ajax/libs/gl-matrix/2.6.0/gl-matrix-min.js' );
let mattex  await matprom.text();
var 
script   document.createElement('script');
script.type  'text/javascript';
script.innerHTML mattex;
document.head.appendChild(script); 

// -------------
let canvas document.createElement('canvas');
document.body.appendChildcanvas ); canvas.height=canvas.width=512;

const 
context canvas.getContext('webgpu');
const 
adapter await navigator.gpu.requestAdapter();
const 
device  await adapter.requestDevice();
const 
presentationFormat navigator.gpu.getPreferredCanvasFormat(); 
context.configure({ devicedeviceformatpresentationFormat  });

async function loadTexturefileName "https://webgpulab.xbdev.net/var/images/test512.png" )
{
  
console.log('loading image:'fileName );
  
// Load image 
  
const img document.createElement("img");
  
img.src fileName;

  
await Promise.all([
    
img.decode()
  ]);

  
let imgWidth  img.width;
  
let imgHeight img.height;

  const 
imageCanvas document.createElement('canvas');
  
imageCanvas.width =  imgWidth;
  
imageCanvas.height imgHeight;
  const 
imageCanvasContext imageCanvas.getContext('2d');
  
imageCanvasContext.drawImage(img00imgWidthimgHeight);
  const 
imageData imageCanvasContext.getImageData(00imgWidthimgHeight);
  
let textureDataimageData.data;
  
console.log('textureData.byteLength:'textureData.byteLength );

  
// Create a texture and a sampler using WebGPU
  
const sampler device.createSampler({
    
minFilter"linear",
    
magFilter"linear"  
  
});

  const 
basicTexture device.createTexture({
    
size: [imgWidthimgHeight1],
    
format"rgba8unorm",
    
usageGPUTextureUsage.COPY_DST GPUTextureUsage.TEXTURE_BINDING
  
});

  
await
  device
.queue.writeTexture(
      { 
texture:basicTexture },
      
textureData,
      { 
bytesPerRowimgWidth },
      [ 
imgWidthimgHeight]
  );
  return { 
w:imgWidthh:imgHeights:samplert:basicTexture };
}
// end loadTexture(..)

function createTexturedSquaredevice )
{
  
let positionVertex = new Float32Array([
     
1.0,    1.0,   0.0,
    -
1.0,    1.0,   0.0,
     
1.0,   -1.0,   0.0,
    -
1.0,   -1.0,   0.0
  
]);
  const 
vBuffer device.createBuffer({ size:  positionVertex.byteLength,
                                        
usageGPUBufferUsage.VERTEX GPUBufferUsage.COPY_DST });
  
device.queue.writeBuffer(vBuffer0positionVertex);
  
  
let uvVertex = new Float32Array([
     
1.0,   0.0,
     
0.0,   0.0,
     
1.0,   1.0,
     
0.0,   1.0,
  ]);
  const 
uvBuffer device.createBuffer({ size:  uvVertex.byteLength,
                                        
usageGPUBufferUsage.VERTEX GPUBufferUsage.COPY_DST });
  
device.queue.writeBuffer(uvBuffer0uvVertex);
  
  
// return the vertex and texture buffers
  
return { v:vBuffert:uvBuffer };
}

function 
createMatrixUniform( )
{
  
// Create the matrix in Javascript (using matrix library)
  
const projectionMatrix     mat4.create();
  const 
viewMatrix           mat4.create();
  const 
viewProjectionMatrix mat4.create();
  
  
mat4.perspective(projectionMatrixMath.PI 2canvas.width canvas.height0.001500.0)
  
mat4.lookAt(viewMatrix, [001.5],  [000], [010]);
  
mat4.multiply(viewProjectionMatrixprojectionMatrixviewMatrix);
  
  
// Create a buffer using WebGPU API (copy matrix into it)
  
const matrixUniformBuffer device.createBuffer({
     
sizeviewProjectionMatrix.byteLength ,
     
usageGPUBufferUsage.UNIFORM GPUBufferUsage.COPY_DST
  
});
  
device.queue.writeBuffer(matrixUniformBuffer0viewProjectionMatrix );

  return 
matrixUniformBuffer;
}

// Create a timer for the shader - pass timer
let myTimer = new Float32Array( [0.0] );
const 
timerBuffer device.createBuffer({sizemyTimer.byteLength
                                                 
usageGPUBufferUsage.UNIFORM GPUBufferUsage.COPY_DST });


let shaderWGSL = `
@group(0) @binding(0) var<uniform> viewProjectionmMatrix : mat4x4<f32>;

struct vsout {
    @builtin(position) Position: vec4<f32>,
    @location(0)       uvs     : vec2<f32>
};

@vertex 
fn vsmain(@location(0) pos : vec3<f32>,
          @location(1) uvs : vec2<f32>) -> vsout

    var r:vsout;
    r.Position = viewProjectionmMatrix * vec4<f32>(pos, 1.0);
    r.uvs      = uvs;
    return r;
}

@group(0) @binding(1) var mySampler: sampler;
@group(0) @binding(2) var myTexture0: texture_2d<f32>;
@group(0) @binding(3) var<uniform> myTimer  : f32;
@group(0) @binding(4) var myTexture1: texture_2d<f32>;

@fragment 
fn psmain(@location(0) uv: vec2<f32>) -> @location(0) vec4<f32> 
{    

    var texCol0 = textureSample(myTexture0, mySampler, uv ).xyz;
    
    var texCol1 = textureSample(myTexture1, mySampler, uv ).xyz;
    
    var col = texCol0 * clamp(texCol1.r*1.5, 0.0, 1.0);
    
    return vec4<f32>( col, 1.0 ); // set alpha to 1.0 so there isn't any transparncy

}
`;

const 
texture0         await loadTexture'https://webgpulab.xbdev.net/var/images/test512.png' );
const 
texture1         await loadTexture'https://webgpulab.xbdev.net/var/images/cracks.jpg');

const 
squareBuffer        createTexturedSquaredevice );
const 
matrixUniformBuffer createMatrixUniform();
const 
shaderModule        device.createShaderModule({ code shaderWGSL });

// Define the layout information for the shader (uniforms)
const sceneUniformBindGroupLayout device.createBindGroupLayout({
  
entries: [{ binding0visibilityGPUShaderStage.VERTEXbuffer: { type"uniform" }      },
            { 
binding1visibilityGPUShaderStage.FRAGMENTsampler: { type"filtering"  } },
            { 
binding2visibilityGPUShaderStage.FRAGMENTtexture: { sampleType"float"viewDimension"2d"} },
            { 
binding3visibilityGPUShaderStage.FRAGMENTbuffer: { type"uniform" }      },
            { 
binding4visibilityGPUShaderStage.FRAGMENTtexture: { sampleType"float"viewDimension"2d"} },
           ]
});

const 
sceneUniformBindGroup device.createBindGroup({
  
layoutsceneUniformBindGroupLayout,
  
entries: [{ binding:  0resource: { buffermatrixUniformBuffer }    },
            { 
binding 1resourcetexture0.s                  },
            { 
binding 2resourcetexture0.t.createView()     },
            { 
binding:  3resource: { buffertimerBuffer  }    },
            { 
binding 4resourcetexture1.t.createView()     },
           ]
});

const 
pipeline device.createRenderPipeline({
  
layoutdevice.createPipelineLayout({bindGroupLayouts: [sceneUniformBindGroupLayout]}),
  
vertex:      {   moduleshaderModuleentryPoint'vsmain'
                   
buffers: [
                            { 
arrayStride4*3,attributes: [ {shaderLocation0offset0format'float32x3' } ] },
                            { 
arrayStride4*2,attributes: [ {shaderLocation1offset0format'float32x2' } ] }
                            ]
               },
  
fragment:    {   moduleshaderModuleentryPoint'psmain',
                   
targets: [ { formatpresentationFormat } ]
               }, 
  
primitive:   {   topology'triangle-strip' },
});

function 
draw() 
{
  
myTimer[0] = myTimer[0] + 0.1;
  
device.queue.writeBuffer(timerBuffer0myTimer );
  
  const 
commandEncoder device.createCommandEncoder();
  const 
renderPassDescriptor =                                       { // GPURenderPassDescriptor 
             
colorAttachments: [ { view       context.getCurrentTexture().createView(),
                                   
loadOp     "clear"
                                   
clearValue: [0.80.80.81], // clear screen to color/rgba
                                   
storeOp   'store' } ]           };
  const 
passEncoder commandEncoder.beginRenderPass(renderPassDescriptor);
  
passEncoder.setViewport(0.0,  0.0,                   // x, y
                          
canvas.widthcanvas.height// width, height
                          
0);                      // minDepth, maxDepth                  
  
passEncoder.setPipeline(pipeline);
  
passEncoder.setVertexBuffer(0squareBuffer.v);
  
passEncoder.setVertexBuffer(1squareBuffer.t);
  
passEncoder.setBindGroup(0sceneUniformBindGroup);
  
passEncoder.draw(4100);
  
passEncoder.end();
  
device.queue.submit([commandEncoder.finish()]);
  
requestAnimationFrame(draw);
}
draw();
console.log('done...');


The example just combines two textures in the simplest way - as discussed in previous tutorials - textures can be manipulated in many different ways (texture transforms) so the combined result can be very unique.


Things to Try


• Adding a 3rd texture
• Use transforms to manipulate the uv coordinates for one of the textures (rotated and stretched)
• Modify the texture `sampler` so it supports texture repeating and mirroring (so combined textures can be tiled instead of just clamping to the last pixel)



Resources and Links


• WebGPU Lab Example [LINK]

























































WebGPU by Example: Fractals, Image Effects, Ray-Tracing, Procedural Geometry, 2D/3D, Particles, Simulations WebGPU Compute graphics and animations using the webgpu api 12 week course kenwright learn webgpu api kenwright programming compute and graphics applications with html5 and webgpu api kenwright real-time 3d graphics with webgpu kenwright webgpu api develompent a quick start guide kenwright webgpu by example 2022 kenwright webgpu gems kenwright webgpu interactive compute and graphics visualization cookbook kenwright wgsl webgpu shading language cookbook kenwright wgsl webgpugems shading language cookbook kenwright



 
Advert (Support Website)

 
 Visitor:
Copyright (c) 2002-2025 xbdev.net - All rights reserved.
Designated articles, tutorials and software are the property of their respective owners.