Steganography (Hiding Images inside Images)
An image is made up of pixels, each pixel is 8 bits each. However, what happens if we put something else in the lower few bits? For example, another images?
Steganography of an image within another image using pixel level encoding.
Functions Used: requestAdapter(), getPreferredCanvasFormat(), createCommandEncoder(), beginRenderPass(), setPipeline(), draw(), end(), submit(), getCurrentTexture(), createView(), createShaderModule()
Complete Code
let div = document . createElement ( 'div' ); document . body . appendChild ( div ); div . style [ 'font-size' ] = '20pt' ; function log ( s ) { console . log ( s ); let args = [... arguments ]. join ( ' ' ); div . innerHTML += args + '<br><br>' ; } log ( 'WebGPU Compute Example' ); async function loadTexture ( fileName = "https://webgpulab.xbdev.net/var/images/test512.png" , width = 512 , height = 512 ) { console . log ( 'loading image:' , fileName ); // Load image const img = document . createElement ( "img" ); img . src = fileName ; await img . decode (); const originalWidth = img . width ; const originalHeight = img . height ; const imageCanvas = document . createElement ( 'canvas' ); imageCanvas . width = width ; imageCanvas . height = height ; const imageCanvasContext = imageCanvas . getContext ( '2d' ); // Draw the image onto the canvas, resizing it to the specified width and height imageCanvasContext . drawImage ( img , 0 , 0 , width , height ); const imageData = imageCanvasContext . getImageData ( 0 , 0 , width , height ); const textureData = imageData . data ; console . log ( 'textureData.byteLength:' , textureData . byteLength ); const basicTexture = device . createTexture ({ size : [ width , height , 1 ], format : "rgba8unorm" , usage : GPUTextureUsage . COPY_DST | GPUTextureUsage . TEXTURE_BINDING }); await device . queue . writeTexture ( { texture : basicTexture }, textureData , { bytesPerRow : width * 4 }, [ width , height , 1 ] ); return { w : width , h : height , t : basicTexture }; } if (! navigator . gpu ) { log ( "WebGPU is not supported (or is it disabled? flags/settings)" ); return; } const adapter = await navigator . gpu . requestAdapter (); const device = await adapter . requestDevice (); const imgWidth = 512 ; const imgHeight = imgWidth ; // ---------------------------------------------------------- const texture0 = await loadTexture ( 'https://webgpulab.xbdev.net/var/images/test512.png' , imgWidth ); const texture1 = await loadTexture ( 'https://webgpulab.xbdev.net/var/images/avatar.png' , imgWidth ); // ---------------------------------------------------------- // Basic canvas which will be used to display the output from the compute shader let canvasa = document . createElement ( 'canvas' ); document . body . appendChild ( canvasa ); canvasa . height = canvasa . width = imgWidth ; const context = canvasa . getContext ( 'webgpu' ); const presentationFormat = navigator . gpu . getPreferredCanvasFormat (); console . log ( 'presentationFormat:' , presentationFormat ); context . configure ({ device : device , usage : GPUTextureUsage . RENDER_ATTACHMENT | GPUTextureUsage . COPY_SRC | GPUTextureUsage . COPY_DST , format : "rgba8unorm" /*presentationFormat*/ }); let canvasTexture = context . getCurrentTexture (); // ---------------------------------------------------------- // Output texture - output from the compute shader written to this texture // Copy this texutre to the 'canvas' - needs to be the same size as the output // canvas size const texture2 = device . createTexture ({ size : [ imgWidth , imgHeight , 1 ], format : "rgba8unorm" , usage : GPUTextureUsage . COPY_DST | GPUTextureUsage . COPY_SRC | GPUTextureUsage . TEXTURE_BINDING | GPUTextureUsage . STORAGE_BINDING }); // ---------------------------------------------------------- const GCOMPUTE = GPUShaderStage . COMPUTE ; // Bind group layout and bind group const bindGroupLayout = device . createBindGroupLayout ({ entries : [ { binding : 0 , visibility : GCOMPUTE , texture : { sampleType : "float" } }, { binding : 1 , visibility : GCOMPUTE , texture : { sampleType : "float" } }, { binding : 2 , visibility : GCOMPUTE , storageTexture : { format : "rgba8unorm" , access : "write-only" , viewDimension : "2d" } } ] }); const bindGroup = device . createBindGroup ({ layout : bindGroupLayout , entries : [ { binding : 0 , resource : texture0 . t . createView () }, { binding : 1 , resource : texture1 . t . createView () }, { binding : 2 , resource : texture2 . createView () } ] }); // Compute shader code const computeShader = ` @group(0) @binding(0) var myTexture0: texture_2d<f32>; @group(0) @binding(1) var myTexture1: texture_2d<f32>; @group(0) @binding(2) var myTexture2: texture_storage_2d<rgba8unorm, write>; fn encode( uv:vec2<f32>, tex0:texture_2d<f32>, tex1:texture_2d<f32>, numBitsShift:u32 ) -> vec3<f32> { // Sample color from the first texture var texCol0 = textureLoad( tex0, vec2<i32>( i32(uv.x* ${ imgWidth } ), i32(uv.y* ${ imgHeight } ) ), 0 ); // Sample color from the second texture var texCol1 = textureLoad( tex1, vec2<i32>( i32(uv.x* ${ imgWidth } ), i32(uv.y* ${ imgHeight } ) ), 0 ); // 0-1.0 to 0-255 var encodedR = u32( floor(texCol0.r * 255.0) ); var encodedG = u32( floor(texCol0.g * 255.0) ); var encodedB = u32( floor(texCol0.b * 255.0) ); // shift last bits let hexvalue0:u32 = u32( 0xff & (0xff << numBitsShift) ); encodedR = encodedR & hexvalue0; encodedG = encodedG & hexvalue0; encodedB = encodedB & hexvalue0; // Extract last bits from texCol1 and merge them into encoded values (e.g., 111=7) binary to hex let hexvalue1:u32 = u32( 0xff >> (8 - numBitsShift) ); encodedR = encodedR | ((u32(floor(texCol1.r * 255.0)) >> (numBitsShift-1) ) & hexvalue1); encodedG = encodedG | ((u32(floor(texCol1.g * 255.0)) >> (numBitsShift-1) ) & hexvalue1); encodedB = encodedB | ((u32(floor(texCol1.b * 255.0)) >> (numBitsShift-1) ) & hexvalue1); // create color with encode data var encodedRf = f32(encodedR) / 255.0; var encodedGf = f32(encodedG) / 255.0; var encodedBf = f32(encodedB) / 255.0; // Combine the encoded colors var col = vec3<f32>(encodedRf, encodedGf, encodedBf); return col; } fn decode( colEncoded: vec3<f32>, numBitsShift:u32 ) -> vec3<f32> { // Convert color channels to 0-255 range var encodedR = u32(colEncoded.r * 255.0); var encodedG = u32(colEncoded.g * 255.0); var encodedB = u32(colEncoded.b * 255.0); // Extract the last numBitsShift bits of each color channel let hexvalue1 = u32(0xff >> (8 - numBitsShift)); var hiddenR = (encodedR & hexvalue1) << (8 - numBitsShift); var hiddenG = (encodedG & hexvalue1) << (8 - numBitsShift); var hiddenB = (encodedB & hexvalue1) << (8 - numBitsShift); // Convert back to 0-1.0 range var hiddenRf = f32(hiddenR) / 256; var hiddenGf = f32(hiddenG) / 256; var hiddenBf = f32(hiddenB) / 256; // Combine the extracted color channels var hiddenColor = vec3<f32>(hiddenRf, hiddenGf, hiddenBf); return hiddenColor; } @compute @workgroup_size(8, 8) fn main(@builtin(global_invocation_id) globalId : vec3<u32>, @builtin(local_invocation_id) localId : vec3<u32>, @builtin(workgroup_id) workgroupId : vec3<u32>, @builtin(num_workgroups) workgroupSize : vec3<u32> ) { var coords = vec2<f32>( f32(globalId.x), f32(globalId.y) ) * 3.0; var uv = vec2<f32>( f32(globalId.x), f32(globalId.y) ); // uvs * 2.0 - 1.0; uv = uv / ${ imgWidth } ; // 0.0 - 1.0 //uv = uv * 2.0 - 1.0; // -1 to 1.0 let numBitsShift :u32 = 5; let ecol = encode( uv, myTexture0, myTexture1, numBitsShift ); // only show encoded image for the 'left' side var color = vec4<f32>(ecol, 1.0); // set alpha to 1.0 so there isn't any transparency if ( uv.x > 0.5 ) { // decode right side of the image - see what it looks like let dcol = decode( ecol, numBitsShift ); color = vec4<f32>(dcol, 1.0); } textureStore(myTexture2, vec2<i32>( i32(globalId.x) , i32(globalId.y) ), color ); } `; // Pipeline setup const computePipeline = device . createComputePipeline ({ layout : device . createPipelineLayout ({ bindGroupLayouts : [ bindGroupLayout ]}), compute : { module : device . createShaderModule ({ code : computeShader }), entryPoint : "main" } }); // Commands submission const commandEncoder = device . createCommandEncoder (); const passEncoder = commandEncoder . beginComputePass (); passEncoder . setPipeline ( computePipeline ); passEncoder . setBindGroup ( 0 , bindGroup ); passEncoder . dispatchWorkgroups ( imgWidth / 8 , imgWidth / 8 ); await passEncoder . end (); await commandEncoder . copyTextureToTexture ( { texture : texture2 }, { texture : canvasTexture }, { width : imgWidth , height : imgHeight , depthOrArrayLayers : 1 } ); // Submit GPU commands. const gpuCommands = commandEncoder . finish (); await device . queue . submit ([ gpuCommands ]); console . log ( 'all good...' );
Things to Try
• Try varying the number of bits (see how much/little the main image is impacted)
• Write a small webpage that encodes and decodes .png image (download or upload .png image and it will view the hidden image or let you create one and download it)
• Try modifying the encoded image so it isn't so easy to 'see' - convert it first (e.g., histogram)
Resources and Links
• WebGPU Lab Demo [LINK ]