www.xbdev.net
xbdev - software development
Thursday February 19, 2026
Home | Contact | Support | WebGPU Graphics and Compute ... | WebGPU.. Games, Tutorials, Demos, Projects, and Code.....
     
 

WebGPU..

Games, Tutorials, Demos, Projects, and Code.....

 


Masking and Ortho


Render a transparent image but allow masking (certain pixels can be 'discarded') so you can see through certain parts. In this tutorial we create a full screen image which is drawn first- then for certain pixels they're allowed to be discarded (so you can render/see through to the background). In the background we draw a pair of cubes but using an ortho (orthogonal projection matrix - vs a perspective matrix) - so things do-not get smaller as they get further away.



Masked overlay image with a set of nested cubes (using transparncy) rendered in the background using ortho projection (fixed si...
Masked overlay image with a set of nested cubes (using transparncy) rendered in the background using ortho projection (fixed size with distance).


Functions Used: setVertexBuffer(), setIndexBuffer(), drawIndexed(), createBuffer(), getMappedRange(), getContext(), requestAdapter(), getPreferredCanvasFormat(), createCommandEncoder(), beginRenderPass(), setPipeline(), draw(), end(), submit(), getCurrentTexture(), createView(), createShaderModule()


The background image (full screen) is split into its own function object (easier to manage) and can be removed/added as needed. Just be sure it is drawn first.

background = function()
{

console.log('background.js');


const 
1.0;
this.positions =  new Float32Array([-s,  s,   0,  
                                    -
s, -s,  0,   
                                     
s, -s,  0,   
                                     
s,  s,   ]);

this.indices = new Uint32Array([ 0,1,2,    2,3,]);
  
this.colors  = new Float32Array([ 0,1,00,1,00,1,0,  0,1,]);

this.normals = new Float32Array([0,1,0,  0,1,0,  0,1,0,  0,1,0]);

this.uvs  = new Float32Array([0,01,01,10,]);

this.timer = new Float32Array([0.0]);
  
this.create async function(devicepresentationFormat)
{
this.positionBuffer device.createBuffer({
  
size:  this.positions.byteLength,
  
usageGPUBufferUsage.VERTEX GPUBufferUsage.COPY_DST
});

this.colorBuffer device.createBuffer({
  
size:  this.colors.byteLength,
  
usageGPUBufferUsage.VERTEX GPUBufferUsage.COPY_DST
});

this.normalBuffer device.createBuffer({
  
size:  this.normals.byteLength,
  
usageGPUBufferUsage.VERTEX GPUBufferUsage.COPY_DST
});

this.uvBuffer device.createBuffer({
  
size:  this.uvs.byteLength,
  
usageGPUBufferUsage.VERTEX GPUBufferUsage.COPY_DST
});

this.indicesBuffer device.createBuffer({
  
size:  this.indices.byteLength,
  
usageGPUBufferUsage.INDEX GPUBufferUsage.COPY_DST
});

this.timerBuffer device.createBuffer({
  
size:  4// single float
  
usageGPUBufferUsage.UNIFORM GPUBufferUsage.COPY_DST
});

device.queue.writeBuffer(this.positionBuffer0this.positions);
device.queue.writeBuffer(this.colorBuffer   0this.colors   );
device.queue.writeBuffer(this.normalBuffer  0this.normals  );
device.queue.writeBuffer(this.indicesBuffer 0this.indices  );
device.queue.writeBuffer(this.uvBuffer      0this.uvs      );
device.queue.writeBuffer(this.timerBuffer   0this.timer    );

// var vertWGSL = document.getElementById('vertex.wgsl').innerHTML;
// var fragWGSL = document.getElementById('fragment.wgsl').innerHTML;

var vertWGSL = `
struct Uniforms {
  viewMatrix : mat4x4<f32>,
  projMatrix : mat4x4<f32>,
};
@binding(0) @group(0) var<uniform> uniforms : Uniforms;

struct VSOut {
    @builtin(position) Position: vec4<f32>,
    @location(0)       color   : vec3<f32>,
    @location(1)       normal  : vec3<f32>,
    @location(2)       uvs     : vec2<f32>,
};

@vertex 
fn main(@location(0) inPos  : vec3<f32>,
        @location(1) color  : vec3<f32>,
        @location(2) normal : vec3<f32>,
        @location(3) uvs    : vec2<f32>) -> VSOut  

  var vsOut: VSOut;
  //vsOut.Position = uniforms.projMatrix * uniforms.viewMatrix * vec4<f32>( inPos, 1.0);
  vsOut.Position = vec4<f32>( inPos, 1.0);
  vsOut.uvs      = uvs;
  vsOut.color    = color;
  vsOut.normal   = (uniforms.viewMatrix * vec4<f32>(normal, 0.0)).xyz;
  return vsOut;
}
`;
  
var 
fragWGSL = `
@group(0) @binding(1) var mySampler: sampler;
@group(0) @binding(2) var myTexture: texture_2d<f32>;
@group(0) @binding(3) var <uniform> myTimer:   f32;

@fragment
fn main(@location(0) inColor: vec3<f32>,
        @location(1) normal : vec3<f32>,
        @location(2) uvs    : vec2<f32>) -> @location(0) vec4<f32> 
{
    let scrolluv = uvs;
    
    // return vec4 (rgba)
    let texCol = textureSample(myTexture, mySampler, scrolluv );
    
    if ( texCol.r > 0.5 )
    {
        discard;
    }
    

    // hard code reference direction (z direction)    
    let dir   = vec3<f32>(0.0, 0.0, 1.0);

    // scale brightness based on the normal vs ref drection
    let illum = abs( dot( dir, normal ) );

    // use texture for the color (scale it by the illum value)
    return vec4<f32>( texCol.xyz , 1.0 ); 
}
`;

// ----------------------------------------------------------------

let textureSampler device.createSampler({
     
minFilter"linear",
     
magFilter"linear",
  
     
addressModeU"repeat",
     
addressModeV"repeat",
     
addressModeW"repeat"
});

const 
img document.createElement("img");
img.src 'https://webgpulab.xbdev.net/var/images/gunscope.jpg';
await img.decode();

const 
basicTexture device.createTexture({
    
size: [img.widthimg.height1],
    
formatpresentationFormat // "bgra8unorm",
    
usage:  GPUTextureUsage.COPY_DST GPUTextureUsage.TEXTURE_BINDING
});

const 
imageCanvas document.createElement('canvas');
imageCanvas.width =  img.width;
imageCanvas.height img.height;
const 
imageCanvasContext imageCanvas.getContext('2d');
imageCanvasContext.drawImage(img00imageCanvas.widthimageCanvas.height);
const 
imageData imageCanvasContext.getImageData(00imageCanvas.widthimageCanvas.height);
let textureData= new Uint8Arrayimg.width img.height 4);
for (
let x=0x<img.width img.height 4x++)
{
   
textureData] = imageData.data];
}

device.queue.writeTexture( { texturebasicTexture },
            
textureData,
            {   
offset     :  0,
                
bytesPerRow:  img.width 4,
                
rowsPerImageimg.height
             
},
            [ 
img.width  ,  img.height,  1  ]   );


// ----------------------------------------------------------------

// dynamic world transforms (animate/rotate the shape)
//const rotation   = [0, 0, 0];
//let rotateXMat   = mat4.create();
//let rotateYMat   = mat4.create();
//let rotateZMat   = mat4.create();


//projectionMatrix     = mat4.create();
//const viewMatrix           = mat4.create();
//viewProjectionMatrix = mat4.create();
//modelMatrix          = mat4.create();

//mat4.perspective(projectionMatrix, Math.PI / 2, canvas.width / canvas.height, 0.1, 10.0)
//mat4.lookAt(viewMatrix, [0, 1.4, 1], [0, 0, 0], [0, 1, 0]);
//mat4.multiply(viewProjectionMatrix, projectionMatrix, viewMatrix);

this.vertexUniformBuffer device.createBuffer({
  
size128,
  
usageGPUBufferUsage.UNIFORM GPUBufferUsage.COPY_DST
});

//device.queue.writeBuffer(this.vertexUniformBuffer,   0,  viewMatrix );
//device.queue.writeBuffer(this.vertexUniformBuffer,   64, projectionMatrix          );

// ----------------------------------------------------------------

this.sceneUniformBindGroupLayout device.createBindGroupLayout({
  
entries: [
    { 
binding0visibilityGPUShaderStage.VERTEX,   buffer:  { type"uniform"  }   }, 
    { 
binding1visibilityGPUShaderStage.FRAGMENTsampler: { type"filtering"}   },
    { 
binding2visibilityGPUShaderStage.FRAGMENTtexture: { sampleType"float",
                                                                  
viewDimension"2d"} },
    { 
binding3visibilityGPUShaderStage.FRAGMENTbuffer:  { type"uniform"  }   }, 
  ]
});

this.uniformBindGroup device.createBindGroup({
  
layout:   this.sceneUniformBindGroupLayout,
  
entries: [
    { 
binding 0resource: { bufferthis.vertexUniformBuffer } },
    { 
binding 1resourcetextureSampler },
    { 
binding 2resourcebasicTexture.createView()  },
    { 
binding 3resource: { bufferthis.timerBuffer        } }
   ],
});

// ----------------------------------------------------------------

//const depthTexture = device.createTexture({
//  size   : presentationSize,
//  format : 'depth24plus',
//  usage  : GPUTextureUsage.RENDER_ATTACHMENT,
//});

// ----------------------------------------------------------------


this.pipeline device.createRenderPipeline({
    
layoutdevice.createPipelineLayout({bindGroupLayouts: [this.sceneUniformBindGroupLayout]}),
    
vertex:   {  module    device.createShaderModule({ 
                             
code vertWGSL }),
                 
entryPoint'main',
                 
buffers    : [ { arrayStride12attributes: [{ shaderLocation0,
                                                                  
format"float32x3",
                                                                  
offset0  }]        },
                                { 
arrayStride12attributes: [{ shaderLocation1,
                                                                  
format"float32x3",
                                                                  
offset0  }]        },
                                { 
arrayStride12attributes: [{ shaderLocation2,
                                                                  
format"float32x3",
                                                                  
offset0  }]        },
                                { 
arrayStride8,  attributes: [{ shaderLocation3,
                                                                  
format"float32x2",
                                                                  
offset0  }]        }
                              ]},
    
fragment: {  module    device.createShaderModule({ 
                             
code fragWGSL,     }),
                 
entryPoint'main',
                 
targets: [{  format presentationFormat  }] },
    
primitive: { topology  'triangle-list',
                 
frontFace "ccw",
                 
cullMode  'none',
                 
stripIndexFormatundefined },
   
depthStencil: {
                 
depthWriteEnabledtrue,
                 
depthCompare     'less',
                 
format           'depth24plus' }
});

}
// end create



this.draw async function( devicecontextdepthTextureviewMatrixprojectionMatrix 
{

  
// GPURenderPassDescriptor 
  
this.renderPassDescriptor = { 
        
colorAttachments:  [{    
             
view     undefined// asign later in frame
             
loadOp:   "clear"
             
clearValue: { r0.2g0.2b0.2a1.0 },
             
storeOp  'store' }],
        
depthStencilAttachment: {
             
viewdepthTexture.createView(),
             
depthLoadOp:"clear"
             
depthClearValue1.0,
             
depthStoreOp'store',
             
//// // // // // // // // // // // // // // // // // // stencilLoadValue: 0,
             //// // // // // // // // // // // // // // // // // // stencilStoreOp: 'store' 
          

  };
  
  
// --------------------------------------------------
  // Update uniform buffer 
 
  
device.queue.writeBuffer(this.vertexUniformBuffer,   0,  viewMatrix );

  
this.timer[0] = this.timer[0] + 0.01;
  
device.queue.writeBuffer(this.timerBuffer   0this.timer    );

  
device.queue.writeBuffer(this.vertexUniformBuffer64projectionMatrix);

  
// --------------------------------------------------
  
this.renderPassDescriptor.colorAttachments[0].view context.getCurrentTexture().createView();

  const 
commandEncoder device.createCommandEncoder();

  const 
renderPass commandEncoder.beginRenderPass(this.renderPassDescriptor);
  
renderPass.setPipeline(this.pipeline);
  
renderPass.setBindGroup(0this.uniformBindGroup);
  
renderPass.setVertexBuffer(0this.positionBuffer);
  
renderPass.setVertexBuffer(1this.colorBuffer);
  
renderPass.setVertexBuffer(2this.normalBuffer);
  
renderPass.setVertexBuffer(3this.uvBuffer);
  
renderPass.setIndexBuffer(this.indicesBuffer'uint32');
  
renderPass.drawIndexed(6100);
  
renderPass.end();
  
device.queue.submit([commandEncoder.finish()]);
  
}
// end render(..)
  
}// end background



The body of the code with everything else - draws the nested cubes, sets up WebGPU etc.

// Load matrix library on dynamically (on-the-fly)
let matprom await fetch'https://cdnjs.cloudflare.com/ajax/libs/gl-matrix/2.6.0/gl-matrix-min.js' );
let mattex  await matprom.text();
var 
script   document.createElement('script');
script.type  'text/javascript';
script.innerHTML mattex;
document.head.appendChild(script); 


const 
canvas document.createElement('canvas');
document.body.appendChildcanvas );
canvas.width  canvas.height 500;

const 
gpu navigator.gpu;
console.log'navigator.gpu:'gpu );

const 
adapter await gpu.requestAdapter();
const 
device  await adapter.requestDevice();
const 
context canvas.getContext('webgpu');

const 
presentationFormat navigator.gpu.getPreferredCanvasFormat();  // context. getPreferredFormat(adapter); - no longer supported
context.configure({
  
device,
  
formatpresentationFormat,
  
alphaMode'premultiplied',  /* IMPORTANT IMPORTANT - vs default opacity */
});

////////////////////////////////////////
// Create vertex buffers and load data
////////////////////////////////////////

function createCube()
{
    
// unit cube
    
const cubeVertexArray = new Float32Array([
      
// position,   color
       
1, -1,  1,   100,   
      -
1, -1,  1,   100,  
      -
1, -1, -1,   100,   
       
1, -1, -1,   100,  
       
1, -1,  1,   100,   
      -
1, -1, -1,   100,   

       
1,  1,  1,   010
       
1, -1,  1,   010,   
       
1, -1, -1,   010,   
       
1,  1, -1,   010,   
       
1,  1,  1,   010,  
       
1, -1, -1,   010

      -
1,  1,  1,   001,   
       
1,  1,  1,   001
       
1,  1, -1,   001,  
      -
1,  1, -1,   001,   
      -
1,  1,  1,   001,   
       
1,  1, -1,   001,   

      -
1, -1,  1,   110,  
      -
1,  1,  1,   110,  
      -
1,  1, -1,   110,  
      -
1, -1, -1,   110,  
      -
1, -1,  1,   110,  
      -
1,  1, -1,   110,  

       
1,  1,  1,   101,  
      -
1,  1,  1,   101,  
      -
1, -1,  1,   101,  
      -
1, -1,  1,   101,  
       
1, -1,  1,   101,  
       
1,  1,  1,   101,  

       
1, -1, -1,   011,  
      -
1, -1, -1,   011,  
      -
1,  1, -1,   011,  
       
1,  1, -1,   011,  
       
1, -1, -1,   011,  
      -
1,  1, -1,   011
    ]);

    const 
gpuBuffer device.createBuffer({ size:  cubeVertexArray.byteLength,
                                            
usageGPUBufferUsage.VERTEX GPUBufferUsage.COPY_DST });

    
device.queue.writeBuffer(gpuBuffer0cubeVertexArray);
  
    return { 
buffer:gpuBuffernumVertices:36stride:6*};
}

// --------------------------------------------------------------------------

let basicVertWGSL = `
@group(0) @binding(0) var<uniform> timer : f32;

struct Transforms {
    model      : mat4x4<f32>,
    view       : mat4x4<f32>,
    projection : mat4x4<f32>,
};
@group(0) @binding(1) var<uniform> transforms : Transforms;

struct VertexOutput {
  @builtin(position) Position : vec4<f32>,
  @location(0) fragColor      : vec3<f32>
};

@vertex
fn main(@location(0) position : vec3<f32>,
        @location(1) color    : vec3<f32>) -> VertexOutput {
        
  var mvp = transforms.projection * transforms.view * transforms.model;
        
  var output : VertexOutput;
  output.Position     = mvp * vec4<f32>(position, 1.0);
  output.fragColor    = color;
  return output;
}
`;

let basicPixelWGSL = `
@group(0) @binding(2) var<uniform> alpha : f32;

@fragment
fn main(@location(0) fragColor:   vec3<f32>) -> @location(0) vec4<f32> {
 
  return vec4<f32>(fragColor, alpha);
  
  // if you want a 'constant' color for the shape
  // return vec4<f32>(1.0, 0.0, 0.0, 1.0);
}
`;

// ----------------------------------------------------------------

function buildMatrixpr// position, rotation, scale
{
    
// if not set fall back to default values
    
if (!s= {x:1y:1z:1};
    if (!
r= {x:0y:0z:0};
    if (!
p= {x:0y:0z:0};
  
    
// Create the matrix in Javascript (using matrix library)
    
const modelMatrix          mat4.create();

    
// create the model transform with a rotation and translation
    
let translateMat mat4.create();   mat4.fromTranslationtranslateMatObject.values(p) );
    
let rotateXMat   mat4.create();   mat4.fromXRotation(rotateXMatr.x);
    
let rotateYMat   mat4.create();   mat4.fromYRotation(rotateYMatr.y);
    
let rotateZMat   mat4.create();   mat4.fromZRotation(rotateZMatr.z);
    
let scaleMat     mat4.create();   mat4.fromScaling(scaleMatObject.values(s) );

    
mat4.multiply(modelMatrixmodelMatrix,   translateMat);
    
mat4.multiply(modelMatrixmodelMatrix,   rotateXMat);
    
mat4.multiply(modelMatrixmodelMatrix,   rotateYMat);
    
mat4.multiply(modelMatrixmodelMatrix,   rotateZMat);
    
mat4.multiply(modelMatrixmodelMatrix,   scaleMat);
    return 
modelMatrix;
}

// build a model matrix (scale, rotate and position it wherever we want)
let modelMatrix buildMatrix();
   
// setup the projection
let projectionMatrix mat4.create(); 
// perspective(out, fovy, aspect, near, far)
mat4.perspective(projectionMatrixMath.PI 2canvas.width canvas.height0.001500.0);
// ortho(out, left, right, bottom, top, near, far)
let os 3;
mat4.ortho(projectionMatrix, -osos, -osos0.015.0);


// default camera `lookat`
let viewMatrix mat4.create();
mat4.lookAt(viewMatrix, [0,0,-4],  [0,0,0], [010]);


let mvpUniformBuffer device.createBuffer({
  
size64*3,
  
usageGPUBufferUsage.UNIFORM GPUBufferUsage.COPY_DST
});

device.queue.writeBuffer(mvpUniformBuffer,      0,      modelMatrix);
device.queue.writeBuffer(mvpUniformBuffer,      64,     viewMatrix);
device.queue.writeBuffer(mvpUniformBuffer,      128,    projectionMatrix);

// ----------------------------------------------------------------

const timerUniformBuffer device.createBuffer({ size:  4// single float for the timer
                                               
usageGPUBufferUsage.UNIFORM GPUBufferUsage.COPY_DST});

let timeData = new Float32Array( [0.0]);
device.queue.writeBuffer(timerUniformBuffer,   0,    timeData );

// ----------------------------------------------------------------

const alphaUniformBuffer device.createBuffer({ size:  4// single float for the timer
                                               
usageGPUBufferUsage.UNIFORM GPUBufferUsage.COPY_DST});

let alphaData = new Float32Array( [1.0]);
device.queue.writeBuffer(alphaUniformBuffer,   0,    alphaData );

// ----------------------------------------------------------------

const sceneUniformBindGroupLayout device.createBindGroupLayout({
  
entries: [ { binding0visibilityGPUShaderStage.VERTEX,   buffer: { type"uniform" } },
             { 
binding1visibilityGPUShaderStage.VERTEX,   buffer: { type"uniform" } },
             { 
binding2visibilityGPUShaderStage.FRAGMENTbuffer: { type"uniform" } },
           ]
});

const 
sceneUniformBindGroup device.createBindGroup({
  
layoutsceneUniformBindGroupLayout,
  
entries: [ {  binding0resource: { buffertimerUniformBuffer  } },
             {  
binding1resource: { buffermvpUniformBuffer    } },
             {  
binding2resource: { bufferalphaUniformBuffer  } },
           ]
});

// ----------------------------------------------------------------

let cubeData createCube();

// ----------------------------------------------------------------

const pipeline device.createRenderPipeline({
  
layoutdevice.createPipelineLayout({bindGroupLayouts: [sceneUniformBindGroupLayout]}),
  
vertex: {
    
moduledevice.createShaderModule({
      
codebasicVertWGSL
    
}),
    
entryPoint"main",
    
buffers: [ {arrayStridecubeData.stride,
                
attributes: [ {shaderLocation0offset0,      format'float32x3' }, // position
                              
{shaderLocation1offset3*4,    format'float32x3'  // color
             
] } ]
  },
  
fragment: {
    
moduledevice.createShaderModule({ codebasicPixelWGSL }),
    
entryPoint"main",
    
targets: [{ formatpresentationFormat,
                
blend: { color: {srcFactor:'src-alpha'dstFactor:"one-minus-src-alpha" ,operation:"add"},
                               
alpha: {srcFactor:'one',       dstFactor:"one" ,                operation:"add"}  }
              }]
  },
  
primitive: {
    
topology"triangle-list",
    
cullMode'back'
  
},
  
depthStencil: {
    
format"depth24plus",
    
depthWriteEnabledtrue,
    
depthCompare"less"
  
}
});

    
const 
depthTexture device.createTexture({
  
size: [canvas.widthcanvas.height1],
  
format"depth24plus",
  
usage:  GPUTextureUsage.RENDER_ATTACHMENT
})


let scope = new background();
await scope.create(devicepresentationFormat);


let rotation = {x:0y:0z:0};

function 
draw() {
  
  
scope.draw(devicecontextdepthTextureviewMatrixprojectionMatrix);
  
  
// update uniform buffer
  
timeData[0] += 0.005;
  
device.queue.writeBuffer(timerUniformBuffer0timeData);
    
  
// Draw 2 cubes - on inside the other
  
for (let k=0k<2k++)
  {
      
// update rotation on local cube
      
rotation.+= 0.02;
      
rotation.+= 0.03;
      
rotation.+= 0.01;
    
      
let scale = {x:1y:1z:1};
      if ( 
k==)
      { 
         
alphaData[0] = 1.0;
         
scale.scale.scale.0.5
      }
      if ( 
k==)
      {
         
alphaData[0] = 0.2;
         
scale.scale.scale.1.0
      }

      
device.queue.writeBuffer(alphaUniformBuffer,   0,    alphaData );
      
    
      
modelMatrix buildMatrixnullrotationscale );
      
device.queue.writeBuffer(mvpUniformBuffer,      0,      modelMatrix);
    
    
      const 
renderPassDescription = {
          
colorAttachments: [{
            
viewcontext.getCurrentTexture().createView(),
            
loadOp'load'// ( k==0 ? "clear":"load" ), - clear in the background drawn first
            
clearValue: [0.90.90.91], // clear screen color
            
storeOp'store'
          
}],
          
depthStencilAttachment: {
            
viewdepthTexture.createView(),
            
depthLoadOp:  'load'// ( k==0 ? "clear":"load"), - clear in the background drawn first
            
depthClearValue1,
            
depthStoreOp"store",
          }
      };

      
renderPassDescription.colorAttachments[0].view context.getCurrentTexture().createView();

      const 
commandEncoder device.createCommandEncoder();
      const 
renderPass commandEncoder.beginRenderPass(renderPassDescription);

      
renderPass.setPipeline(pipeline);
      
renderPass.setVertexBuffer(0cubeData.buffer);
      
renderPass.setBindGroup(0sceneUniformBindGroup);
      
renderPass.draw(cubeData.numVertices100);
      
renderPass.end();

      
device.queue.submit([commandEncoder.finish()]);
  }

  
requestAnimationFrame(draw);
};

draw();




Resources and Links


• WebGPU Lab Example []
















WebGPU by Example: Fractals, Image Effects, Ray-Tracing, Procedural Geometry, 2D/3D, Particles, Simulations WebGPU Compute graphics and animations using the webgpu api 12 week course kenwright learn webgpu api kenwright programming compute and graphics applications with html5 and webgpu api kenwright real-time 3d graphics with webgpu kenwright webgpu api develompent a quick start guide kenwright webgpu by example 2022 kenwright webgpu gems kenwright webgpu interactive compute and graphics visualization cookbook kenwright wgsl webgpu shading language cookbook kenwright wgsl webgpugems shading language cookbook kenwright



 
Advert (Support Website)

 
 Visitor:
Copyright (c) 2002-2025 xbdev.net - All rights reserved.
Designated articles, tutorials and software are the property of their respective owners.