Render to Texture
Simple example of rendering to an offscreen texture - then using it to render in the scene. The example uses the texture on square code.
Render to texture - drawing the square on the texture from another angle.
Functions Used: setVertexBuffer(), setIndexBuffer(), drawIndexed(), createBuffer(), getMappedRange(), getContext(), requestAdapter(), getPreferredCanvasFormat(), createCommandEncoder(), beginRenderPass(), setPipeline(), draw(), end(), submit(), getCurrentTexture(), createView(), createShaderModule()
To make the example more interesting - we render the offscreen using a different camera position - then for the on screen rendering we set it back to facing forwards.
You could use this type of technique to render mirrors or views from different perspectives - as you move the camera to that location - render to a texture then render it to the surface (think of a car mirror so you can see behind you).
You need to add a few things to render to an offscreen texture:
1. create an offscreen texture (normal texture but some extra flags so it can be used as a render surface)
2. another binding group - which is linked to the offscreen texture
3. create another pipeline which uses the offsceen binding group and texture
When you render offscreen, just make sure you set the pipeline and binding group.
Finally, when you build the render pass structure - set the colorAttachments to use an offscreen texture (not the texture for the context for the current screen).
// Load matrix library on dynamically (on-the-fly) let matprom = await fetch ( 'https://cdnjs.cloudflare.com/ajax/libs/gl-matrix/2.6.0/gl-matrix-min.js' ); let mattex = await matprom . text (); var script = document . createElement ( 'script' ); script . type = 'text/javascript' ; script . innerHTML = mattex ; document . head . appendChild ( script ); // ------------- let canvas = document . createElement ( 'canvas' ); document . body . appendChild ( canvas ); canvas . height = canvas . width = 512 ; const context = canvas . getContext ( 'webgpu' ); const adapter = await navigator . gpu . requestAdapter (); const device = await adapter . requestDevice (); const presentationFormat = navigator . gpu . getPreferredCanvasFormat (); context . configure ({ device : device , format : presentationFormat }); const presentationSize = [ canvas . width , canvas . height ]; async function loadTexture ( fileName = "https://webgpulab.xbdev.net/var/images/test512.png" ) { console . log ( 'loading image:' , fileName ); // Load image const img = document . createElement ( "img" ); img . src = fileName ; await Promise . all ([ img . decode () ]); let imgWidth = img . width ; let imgHeight = img . height ; const imageCanvas = document . createElement ( 'canvas' ); imageCanvas . width = imgWidth ; imageCanvas . height = imgHeight ; const imageCanvasContext = imageCanvas . getContext ( '2d' ); imageCanvasContext . drawImage ( img , 0 , 0 , imgWidth , imgHeight ); const imageData = imageCanvasContext . getImageData ( 0 , 0 , imgWidth , imgHeight ); let textureData = imageData . data ; console . log ( 'textureData.byteLength:' , textureData . byteLength ); // Create a texture and a sampler using WebGPU const sampler = device . createSampler ({ minFilter : "linear" , magFilter : "linear" }); const basicTexture = device . createTexture ({ size : [ imgWidth , imgHeight , 1 ], format : "rgba8unorm" , usage : GPUTextureUsage . COPY_DST | GPUTextureUsage . TEXTURE_BINDING }); await device . queue . writeTexture ( { texture : basicTexture }, textureData , { bytesPerRow : imgWidth * 4 }, [ imgWidth , imgHeight , 1 ] ); return { w : imgWidth , h : imgHeight , s : sampler , t : basicTexture }; } // end loadTexture(..) function createTexturedSquare ( device ) { const s = 0.7 ; let positionVertex = new Float32Array ([ s , s , 0.0 , - s , s , 0.0 , s , - s , 0.0 , - s , - s , 0.0 ]); const vBuffer = device . createBuffer ({ size : positionVertex . byteLength , usage : GPUBufferUsage . VERTEX | GPUBufferUsage . COPY_DST }); device . queue . writeBuffer ( vBuffer , 0 , positionVertex ); let uvVertex = new Float32Array ([ 1.0 , 0.0 , 0.0 , 0.0 , 1.0 , 1.0 , 0.0 , 1.0 , ]); const uvBuffer = device . createBuffer ({ size : uvVertex . byteLength , usage : GPUBufferUsage . VERTEX | GPUBufferUsage . COPY_DST }); device . queue . writeBuffer ( uvBuffer , 0 , uvVertex ); // return the vertex and texture buffers return { v : vBuffer , t : uvBuffer }; } function createMatrixUniform ( matrixUniformBuffer = 0 , camx = 0 , camy = 0 , camz = 1 ) { // Create the matrix in Javascript (using matrix library) const projectionMatrix = mat4 . create (); const viewMatrix = mat4 . create (); const viewProjectionMatrix = mat4 . create (); mat4 . perspective ( projectionMatrix , Math . PI / 2 , canvas . width / canvas . height , 0.001 , 500.0 ) mat4 . lookAt ( viewMatrix , [ camx , camy , camz ], [ 0 , 0 , 0 ], [ 0 , 1 , 0 ]); mat4 . multiply ( viewProjectionMatrix , projectionMatrix , viewMatrix ); // Create a buffer using WebGPU API (copy matrix into it) if ( matrixUniformBuffer == 0 ) matrixUniformBuffer = device . createBuffer ({ size : viewProjectionMatrix . byteLength , usage : GPUBufferUsage . UNIFORM | GPUBufferUsage . COPY_DST }); device . queue . writeBuffer ( matrixUniformBuffer , 0 , viewProjectionMatrix ); return matrixUniformBuffer ; } let shaderWGSL = ` @group(0) @binding(0) var<uniform> viewProjectionmMatrix : mat4x4<f32>; struct vsout { @builtin(position) Position: vec4<f32>, @location(0) uvs : vec2<f32> }; @vertex fn vsmain(@location(0) pos : vec3<f32>, @location(1) uvs : vec2<f32>) -> vsout { var r:vsout; r.Position = viewProjectionmMatrix * vec4<f32>(pos, 1.0); r.uvs = uvs; return r; } @group(0) @binding(1) var mySampler: sampler; @group(0) @binding(2) var myTexture: texture_2d<f32>; @fragment fn psmain(@location(0) uvs: vec2<f32>) -> @location(0) vec4<f32> { var texCol = textureSample(myTexture, mySampler, uvs ); return vec4<f32>( texCol.xyz, 0.5 ); //return vec4<f32>(1.0, 0.0, 0.5, 1.0); }`; const textureData = await loadTexture ( ); const squareBuffer = createTexturedSquare ( device ); const matrixUniformBuffer = createMatrixUniform (); const shaderModule = device . createShaderModule ({ code : shaderWGSL }); // Define the layout information for the shader (uniforms) const sceneUniformBindGroupLayout = device . createBindGroupLayout ({ entries : [{ binding : 0 , visibility : GPUShaderStage . VERTEX , buffer : { type : "uniform" } }, { binding : 1 , visibility : GPUShaderStage . FRAGMENT , sampler : { type : "filtering" } }, { binding : 2 , visibility : GPUShaderStage . FRAGMENT , texture : { sampleType : "float" , viewDimension : "2d" } }, ] }); const sceneUniformBindGroup = device . createBindGroup ({ layout : sceneUniformBindGroupLayout , entries : [{ binding : 0 , resource : { buffer : matrixUniformBuffer } }, { binding : 1 , resource : textureData . s }, { binding : 2 , resource : textureData . t . createView () }, ] }); const pipeline = device . createRenderPipeline ({ layout : device . createPipelineLayout ({ bindGroupLayouts : [ sceneUniformBindGroupLayout ]}), vertex : { module : shaderModule , entryPoint : 'vsmain' , buffers : [ { arrayStride : 4 * 3 , attributes : [ { shaderLocation : 0 , offset : 0 , format : 'float32x3' } ] }, { arrayStride : 4 * 2 , attributes : [ { shaderLocation : 1 , offset : 0 , format : 'float32x2' } ] } ] }, fragment : { module : shaderModule , entryPoint : 'psmain' , targets : [ { format : presentationFormat } ] }, primitive : { topology : 'triangle-strip' }, }); // Graphics buffer texture render targets const screenTexture0 = device . createTexture ({ size : presentationSize , usage : 0x10 | 0x04 , // GPUTextureUsage.RENDER_ATTACHMENT|GPUTextureUsage.TEXTURE_BINDING, format : presentationFormat // 'bgra8unorm', }); const screenTexureView0 = screenTexture0 . createView (); const sceneUniformBindGroup1 = device . createBindGroup ({ layout : sceneUniformBindGroupLayout , entries : [{ binding : 0 , resource : { buffer : matrixUniformBuffer } }, { binding : 1 , resource : textureData . s }, { binding : 2 , resource : screenTexureView0 }, ] }); const pipeline1 = device . createRenderPipeline ({ layout : device . createPipelineLayout ({ bindGroupLayouts : [ sceneUniformBindGroupLayout ]}), vertex : { module : shaderModule , entryPoint : 'vsmain' , buffers : [ { arrayStride : 4 * 3 , attributes : [ { shaderLocation : 0 , offset : 0 , format : 'float32x3' } ] }, { arrayStride : 4 * 2 , attributes : [ { shaderLocation : 1 , offset : 0 , format : 'float32x2' } ] } ] }, fragment : { module : shaderModule , entryPoint : 'psmain' , targets : [ { format : presentationFormat } ] }, primitive : { topology : 'triangle-strip' }, }); function draw () { { createMatrixUniform ( matrixUniformBuffer , 1 , 1 , 1 ); const commandEncoder = device . createCommandEncoder (); const renderPassDescriptor = { // GPURenderPassDescriptor colorAttachments : [ { view : screenTexureView0 , loadOp : "clear" , clearValue :[ 0.0 , 0.8 , 0.8 , 1 ], storeOp : 'store' }, ]}; const passEncoder = commandEncoder . beginRenderPass ( renderPassDescriptor ); passEncoder . setViewport ( 0.0 , 0.0 , // x, y canvas . width , canvas . height , // width, height 0 , 1 ); // minDepth, maxDepth passEncoder . setPipeline ( pipeline ); passEncoder . setVertexBuffer ( 0 , squareBuffer . v ); passEncoder . setVertexBuffer ( 1 , squareBuffer . t ); passEncoder . setBindGroup ( 0 , sceneUniformBindGroup ); passEncoder . draw ( 4 , 1 , 0 , 0 ); passEncoder . end (); device . queue . submit ([ commandEncoder . finish ()]); } // ------------------------------------------ { createMatrixUniform ( matrixUniformBuffer ); const contextView = context . getCurrentTexture (). createView (); const commandEncoder = device . createCommandEncoder (); const renderPassDescriptor = { // GPURenderPassDescriptor colorAttachments : [ { view : contextView , loadOp : "clear" , clearValue : [ 0.8 , 0.8 , 0.8 , 1 ], storeOp : 'store' } ]}; const passEncoder = commandEncoder . beginRenderPass ( renderPassDescriptor ); passEncoder . setViewport ( 0.0 , 0.0 , // x, y canvas . width , canvas . height , // width, height 0 , 1 ); // minDepth, maxDepth passEncoder . setPipeline ( pipeline1 ); passEncoder . setVertexBuffer ( 0 , squareBuffer . v ); passEncoder . setVertexBuffer ( 1 , squareBuffer . t ); passEncoder . setBindGroup ( 0 , sceneUniformBindGroup1 ); passEncoder . draw ( 4 , 1 , 0 , 0 ); passEncoder . end (); device . queue . submit ([ commandEncoder . finish ()]); } //requestAnimationFrame(frame); } draw ();
Thinks to Try
• Animate the camera for the first pass (offscreen)
• Develop a more complex scene to visualize the offscreen rendering concept
• Move around the scene as normal, but render a small quad in the top left corner with a 'rear' view of the camera (whats behind) - using an offscreen prerender pass first
• Add multiple render to texture passes (you can render to more than one texture)
• Render different information to textures - for example, store the depth information, color information, position - all in texture, then render then on the screen in a second pass
Resources and Links
• WebGPU Lab Example [LINK ]
• Deferred Renderer Example (uses render to texture examples) [LINK ]