www.xbdev.net
xbdev - software development
Thursday February 19, 2026
Home | Contact | Support | WebGPU Graphics and Compute ... | WebGPU.. Games, Tutorials, Demos, Projects, and Code.....
     
 

WebGPU..

Games, Tutorials, Demos, Projects, and Code.....

 


Render to Texture


Simple example of rendering to an offscreen texture - then using it to render in the scene. The example uses the texture on square code.


Render to texture - drawing the square on the texture from another angle.
Render to texture - drawing the square on the texture from another angle.


Functions Used: setVertexBuffer(), setIndexBuffer(), drawIndexed(), createBuffer(), getMappedRange(), getContext(), requestAdapter(), getPreferredCanvasFormat(), createCommandEncoder(), beginRenderPass(), setPipeline(), draw(), end(), submit(), getCurrentTexture(), createView(), createShaderModule()

To make the example more interesting - we render the offscreen using a different camera position - then for the on screen rendering we set it back to facing forwards.

You could use this type of technique to render mirrors or views from different perspectives - as you move the camera to that location - render to a texture then render it to the surface (think of a car mirror so you can see behind you).

You need to add a few things to render to an offscreen texture:

1. create an offscreen texture (normal texture but some extra flags so it can be used as a render surface)
2. another binding group - which is linked to the offscreen texture
3. create another pipeline which uses the offsceen binding group and texture

When you render offscreen, just make sure you set the pipeline and binding group.

Finally, when you build the render pass structure - set the colorAttachments to use an offscreen texture (not the texture for the context for the current screen).

// Load matrix library on dynamically (on-the-fly)
let matprom await fetch'https://cdnjs.cloudflare.com/ajax/libs/gl-matrix/2.6.0/gl-matrix-min.js' );
let mattex  await matprom.text();
var 
script   document.createElement('script');
script.type  'text/javascript';
script.innerHTML mattex;
document.head.appendChild(script); 

// -------------
let canvas document.createElement('canvas');
document.body.appendChildcanvas ); canvas.height=canvas.width=512;

const 
context canvas.getContext('webgpu');
const 
adapter await navigator.gpu.requestAdapter();
const 
device  await adapter.requestDevice();
const 
presentationFormat navigator.gpu.getPreferredCanvasFormat(); 
context.configure({ devicedeviceformatpresentationFormat  });

const 
presentationSize   = [ canvas.widthcanvas.height ];

async function loadTexturefileName "https://webgpulab.xbdev.net/var/images/test512.png" )
{
  
console.log('loading image:'fileName );
  
// Load image 
  
const img document.createElement("img");
  
img.src fileName;

  
await Promise.all([
    
img.decode()
  ]);

  
let imgWidth  img.width;
  
let imgHeight img.height;

  const 
imageCanvas document.createElement('canvas');
  
imageCanvas.width =  imgWidth;
  
imageCanvas.height imgHeight;
  const 
imageCanvasContext imageCanvas.getContext('2d');
  
imageCanvasContext.drawImage(img00imgWidthimgHeight);
  const 
imageData imageCanvasContext.getImageData(00imgWidthimgHeight);
  
let textureDataimageData.data;
  
console.log('textureData.byteLength:'textureData.byteLength );

  
// Create a texture and a sampler using WebGPU
  
const sampler device.createSampler({
    
minFilter"linear",
    
magFilter"linear"  
  
});

  const 
basicTexture device.createTexture({
    
size: [imgWidthimgHeight1],
    
format"rgba8unorm",
    
usageGPUTextureUsage.COPY_DST GPUTextureUsage.TEXTURE_BINDING
  
});

  
await
  device
.queue.writeTexture(
      { 
texture:basicTexture },
      
textureData,
      { 
bytesPerRowimgWidth },
      [ 
imgWidthimgHeight]
  );
  return { 
w:imgWidthh:imgHeights:samplert:basicTexture };
}
// end loadTexture(..)

function createTexturedSquaredevice )
{
  const 
0.7;
  
let positionVertex = new Float32Array([
     
s,    s,   0.0,
    -
s,    s,   0.0,
     
s,   -s,   0.0,
    -
s,   -s,   0.0
  
]);
  const 
vBuffer device.createBuffer({ size:  positionVertex.byteLength,
                                        
usageGPUBufferUsage.VERTEX GPUBufferUsage.COPY_DST });
  
device.queue.writeBuffer(vBuffer0positionVertex);
  
  
let uvVertex = new Float32Array([
     
1.0,   0.0,
     
0.0,   0.0,
     
1.0,   1.0,
     
0.0,   1.0,
  ]);
  const 
uvBuffer device.createBuffer({ size:  uvVertex.byteLength,
                                        
usageGPUBufferUsage.VERTEX GPUBufferUsage.COPY_DST });
  
device.queue.writeBuffer(uvBuffer0uvVertex);
  
  
// return the vertex and texture buffers
  
return { v:vBuffert:uvBuffer };
}

function 
createMatrixUniformmatrixUniformBuffer=0camx=0camy=0camz=)
{
  
// Create the matrix in Javascript (using matrix library)
  
const projectionMatrix     mat4.create();
  const 
viewMatrix           mat4.create();
  const 
viewProjectionMatrix mat4.create();
  
  
mat4.perspective(projectionMatrixMath.PI 2canvas.width canvas.height0.001500.0)
  
mat4.lookAt(viewMatrix, [camxcamycamz],  [000], [010]);
  
mat4.multiply(viewProjectionMatrixprojectionMatrixviewMatrix);
  
  
// Create a buffer using WebGPU API (copy matrix into it)
  
if ( matrixUniformBuffer == )
  
matrixUniformBuffer device.createBuffer({
     
sizeviewProjectionMatrix.byteLength ,
     
usageGPUBufferUsage.UNIFORM GPUBufferUsage.COPY_DST
  
});
  
device.queue.writeBuffer(matrixUniformBuffer0viewProjectionMatrix );

  return 
matrixUniformBuffer;
}

let shaderWGSL = `
@group(0) @binding(0) var<uniform> viewProjectionmMatrix : mat4x4<f32>;

struct vsout {
    @builtin(position) Position: vec4<f32>,
    @location(0)       uvs     : vec2<f32>
};

@vertex 
fn vsmain(@location(0) pos : vec3<f32>,
          @location(1) uvs : vec2<f32>) -> vsout

    var r:vsout;
    r.Position = viewProjectionmMatrix * vec4<f32>(pos, 1.0);
    r.uvs      = uvs;
    return r;
}

@group(0) @binding(1) var mySampler: sampler;
@group(0) @binding(2) var myTexture: texture_2d<f32>;

@fragment 
fn psmain(@location(0) uvs: vec2<f32>) -> @location(0) vec4<f32> 
{
    var texCol = textureSample(myTexture, mySampler, uvs );
    return vec4<f32>( texCol.xyz, 0.5 );
    //return vec4<f32>(1.0, 0.0, 0.5, 1.0);
}
`;

const 
textureData         await loadTexture( );
const 
squareBuffer        createTexturedSquaredevice );
const 
matrixUniformBuffer createMatrixUniform();
const 
shaderModule        device.createShaderModule({ code shaderWGSL });

// Define the layout information for the shader (uniforms)
const sceneUniformBindGroupLayout device.createBindGroupLayout({
  
entries: [{ binding0visibilityGPUShaderStage.VERTEXbuffer: { type"uniform" }      },
            { 
binding1visibilityGPUShaderStage.FRAGMENTsampler: { type"filtering"  } },
            { 
binding2visibilityGPUShaderStage.FRAGMENTtexture: { sampleType"float"viewDimension"2d"} },
           ]
});

const 
sceneUniformBindGroup device.createBindGroup({
  
layoutsceneUniformBindGroupLayout,
  
entries: [{ binding:  0resource: { buffermatrixUniformBuffer }    },
            { 
binding 1resourcetextureData.s                  },
            { 
binding 2resourcetextureData.t.createView()     },
           ]
});

const 
pipeline device.createRenderPipeline({
  
layoutdevice.createPipelineLayout({bindGroupLayouts: [sceneUniformBindGroupLayout]}),
  
vertex:      {   moduleshaderModuleentryPoint'vsmain'
                   
buffers: [
                            { 
arrayStride4*3,attributes: [ {shaderLocation0offset0format'float32x3' } ] },
                            { 
arrayStride4*2,attributes: [ {shaderLocation1offset0format'float32x2' } ] }
                            ]
               },
  
fragment:    {   moduleshaderModuleentryPoint'psmain',
                   
targets: [ { formatpresentationFormat } ]
               }, 
  
primitive:   {   topology'triangle-strip' },
});

// Graphics buffer texture render targets
const screenTexture0 device.createTexture({
    
sizepresentationSize,
    
usage0x10|0x04//  GPUTextureUsage.RENDER_ATTACHMENT|GPUTextureUsage.TEXTURE_BINDING,
    
formatpresentationFormat // 'bgra8unorm',
});

const 
screenTexureView0 screenTexture0.createView();

const 
sceneUniformBindGroup1 device.createBindGroup({
  
layoutsceneUniformBindGroupLayout,
  
entries: [{ binding:  0resource: { buffermatrixUniformBuffer  }    },
            { 
binding 1resourcetextureData.s                  },
            { 
binding 2resourcescreenTexureView0              },
           ]
});

const 
pipeline1 device.createRenderPipeline({
  
layoutdevice.createPipelineLayout({bindGroupLayouts: [sceneUniformBindGroupLayout]}),
  
vertex:      {   moduleshaderModuleentryPoint'vsmain'
                   
buffers: [
                            { 
arrayStride4*3,attributes: [ {shaderLocation0offset0format'float32x3' } ] },
                            { 
arrayStride4*2,attributes: [ {shaderLocation1offset0format'float32x2' } ] }
                            ]
               },
  
fragment:    {   moduleshaderModuleentryPoint'psmain',
                   
targets: [ { formatpresentationFormat } ]
               }, 
  
primitive:   {   topology'triangle-strip' },
});





function 
draw() 
{
  {
  
createMatrixUniformmatrixUniformBuffer11);
    
  const 
commandEncoder device.createCommandEncoder();
  const 
renderPassDescriptor =  { // GPURenderPassDescriptor 
        
colorAttachments: [ { view:screenTexureView0,  loadOp:"clear"clearValue:[0.00.80.81],  storeOp:'store' },
                          ]};
  const 
passEncoder commandEncoder.beginRenderPass(renderPassDescriptor);
  
passEncoder.setViewport(0.0,  0.0,                   // x, y
                          
canvas.widthcanvas.height// width, height
                          
0);                      // minDepth, maxDepth                  
  
passEncoder.setPipeline(pipeline);
  
passEncoder.setVertexBuffer(0squareBuffer.v);
  
passEncoder.setVertexBuffer(1squareBuffer.t);
  
passEncoder.setBindGroup(0sceneUniformBindGroup);
  
passEncoder.draw(4100);
  
passEncoder.end();
  
device.queue.submit([commandEncoder.finish()]);
  }
  
  
// ------------------------------------------
  
  
{
  
createMatrixUniform(matrixUniformBuffer);
    
  const 
contextView context.getCurrentTexture().createView();
  const 
commandEncoder device.createCommandEncoder();
  const 
renderPassDescriptor =  { // GPURenderPassDescriptor 
        
colorAttachments: [ { view:contextViewloadOp:"clear"clearValue: [0.80.80.81],  storeOp:'store' 
                               ]};
  const 
passEncoder commandEncoder.beginRenderPass(renderPassDescriptor);
  
passEncoder.setViewport(0.0,  0.0,                   // x, y
                          
canvas.widthcanvas.height// width, height
                          
0);                      // minDepth, maxDepth                  
  
passEncoder.setPipeline(pipeline1);
  
passEncoder.setVertexBuffer(0squareBuffer.v);
  
passEncoder.setVertexBuffer(1squareBuffer.t);
  
passEncoder.setBindGroup(0sceneUniformBindGroup1);
  
passEncoder.draw(4100);
  
passEncoder.end();
  
device.queue.submit([commandEncoder.finish()]);
  }
  
  
//requestAnimationFrame(frame);
}
draw();




Thinks to Try


• Animate the camera for the first pass (offscreen)
• Develop a more complex scene to visualize the offscreen rendering concept
• Move around the scene as normal, but render a small quad in the top left corner with a 'rear' view of the camera (whats behind) - using an offscreen prerender pass first
• Add multiple render to texture passes (you can render to more than one texture)
• Render different information to textures - for example, store the depth information, color information, position - all in texture, then render then on the screen in a second pass



Resources and Links


• WebGPU Lab Example [LINK]

• Deferred Renderer Example (uses render to texture examples) [LINK]
























WebGPU by Example: Fractals, Image Effects, Ray-Tracing, Procedural Geometry, 2D/3D, Particles, Simulations WebGPU Compute graphics and animations using the webgpu api 12 week course kenwright learn webgpu api kenwright programming compute and graphics applications with html5 and webgpu api kenwright real-time 3d graphics with webgpu kenwright webgpu api develompent a quick start guide kenwright webgpu by example 2022 kenwright webgpu gems kenwright webgpu interactive compute and graphics visualization cookbook kenwright wgsl webgpu shading language cookbook kenwright wgsl webgpugems shading language cookbook kenwright



 
Advert (Support Website)

 
 Visitor:
Copyright (c) 2002-2025 xbdev.net - All rights reserved.
Designated articles, tutorials and software are the property of their respective owners.