How Does Photoshop Blend Two Images Together

How does photoshop blend two images together?

Photoshop blends two images together by performing a blend operation on each pixel in image A against its corresponding pixel in image B. Each pixel is a color consisting of multiple channels. Assuming we are working with RGB pixels, the channels in each pixel would be red, green and blue. To blend two pixels we blend their respective channels.

The blend operation that occurs for each blend mode in Photoshop can be summed up in the following macros:

#define ChannelBlend_Normal(A,B)     ((uint8)(A))
#define ChannelBlend_Lighten(A,B) ((uint8)((B > A) ? B:A))
#define ChannelBlend_Darken(A,B) ((uint8)((B > A) ? A:B))
#define ChannelBlend_Multiply(A,B) ((uint8)((A * B) / 255))
#define ChannelBlend_Average(A,B) ((uint8)((A + B) / 2))
#define ChannelBlend_Add(A,B) ((uint8)(min(255, (A + B))))
#define ChannelBlend_Subtract(A,B) ((uint8)((A + B < 255) ? 0:(A + B - 255)))
#define ChannelBlend_Difference(A,B) ((uint8)(abs(A - B)))
#define ChannelBlend_Negation(A,B) ((uint8)(255 - abs(255 - A - B)))
#define ChannelBlend_Screen(A,B) ((uint8)(255 - (((255 - A) * (255 - B)) >> 8)))
#define ChannelBlend_Exclusion(A,B) ((uint8)(A + B - 2 * A * B / 255))
#define ChannelBlend_Overlay(A,B) ((uint8)((B < 128) ? (2 * A * B / 255):(255 - 2 * (255 - A) * (255 - B) / 255)))
#define ChannelBlend_SoftLight(A,B) ((uint8)((B < 128)?(2*((A>>1)+64))*((float)B/255):(255-(2*(255-((A>>1)+64))*(float)(255-B)/255))))
#define ChannelBlend_HardLight(A,B) (ChannelBlend_Overlay(B,A))
#define ChannelBlend_ColorDodge(A,B) ((uint8)((B == 255) ? B:min(255, ((A << 8 ) / (255 - B)))))
#define ChannelBlend_ColorBurn(A,B) ((uint8)((B == 0) ? B:max(0, (255 - ((255 - A) << 8 ) / B))))
#define ChannelBlend_LinearDodge(A,B)(ChannelBlend_Add(A,B))
#define ChannelBlend_LinearBurn(A,B) (ChannelBlend_Subtract(A,B))
#define ChannelBlend_LinearLight(A,B)((uint8)(B < 128)?ChannelBlend_LinearBurn(A,(2 * B)):ChannelBlend_LinearDodge(A,(2 * (B - 128))))
#define ChannelBlend_VividLight(A,B) ((uint8)(B < 128)?ChannelBlend_ColorBurn(A,(2 * B)):ChannelBlend_ColorDodge(A,(2 * (B - 128))))
#define ChannelBlend_PinLight(A,B) ((uint8)(B < 128)?ChannelBlend_Darken(A,(2 * B)):ChannelBlend_Lighten(A,(2 * (B - 128))))
#define ChannelBlend_HardMix(A,B) ((uint8)((ChannelBlend_VividLight(A,B) < 128) ? 0:255))
#define ChannelBlend_Reflect(A,B) ((uint8)((B == 255) ? B:min(255, (A * A / (255 - B)))))
#define ChannelBlend_Glow(A,B) (ChannelBlend_Reflect(B,A))
#define ChannelBlend_Phoenix(A,B) ((uint8)(min(A,B) - max(A,B) + 255))
#define ChannelBlend_Alpha(A,B,O) ((uint8)(O * A + (1 - O) * B))
#define ChannelBlend_AlphaF(A,B,F,O) (ChannelBlend_Alpha(F(A,B),A,O))

To blend a single RGB pixel you would do the following:

ImageTColorR = ChannelBlend_Glow(ImageAColorR, ImageBColorR); 
ImageTColorB = ChannelBlend_Glow(ImageAColorB, ImageBColorB);
ImageTColorG = ChannelBlend_Glow(ImageAColorG, ImageBColorG);

ImageTColor = RGB(ImageTColorR, ImageTColorB, ImageTColorG);

If we wanted to perform a blend operation with a particular opacity, say 50%:

ImageTColorR = ChannelBlend_AlphaF(ImageAColorR, ImageBColorR, Blend_Subtract, 0.5F);

If you have pointers to the image data for images A, B, and T (our target), we can simplify the blending of all three channels using this macro:

#define ColorBlend_Buffer(T,A,B,M)      (T)[0] = ChannelBlend_##M((A)[0], (B)[0]),
(T)[1] = ChannelBlend_##M((A)[1], (B)[1]),
(T)[2] = ChannelBlend_##M((A)[2], (B)[2])

And can derive the following RGB color blend macros:

#define ColorBlend_Normal(T,A,B)        (ColorBlend_Buffer(T,A,B,Normal))
#define ColorBlend_Lighten(T,A,B) (ColorBlend_Buffer(T,A,B,Lighten))
#define ColorBlend_Darken(T,A,B) (ColorBlend_Buffer(T,A,B,Darken))
#define ColorBlend_Multiply(T,A,B) (ColorBlend_Buffer(T,A,B,Multiply))
#define ColorBlend_Average(T,A,B) (ColorBlend_Buffer(T,A,B,Average))
#define ColorBlend_Add(T,A,B) (ColorBlend_Buffer(T,A,B,Add))
#define ColorBlend_Subtract(T,A,B) (ColorBlend_Buffer(T,A,B,Subtract))
#define ColorBlend_Difference(T,A,B) (ColorBlend_Buffer(T,A,B,Difference))
#define ColorBlend_Negation(T,A,B) (ColorBlend_Buffer(T,A,B,Negation))
#define ColorBlend_Screen(T,A,B) (ColorBlend_Buffer(T,A,B,Screen))
#define ColorBlend_Exclusion(T,A,B) (ColorBlend_Buffer(T,A,B,Exclusion))
#define ColorBlend_Overlay(T,A,B) (ColorBlend_Buffer(T,A,B,Overlay))
#define ColorBlend_SoftLight(T,A,B) (ColorBlend_Buffer(T,A,B,SoftLight))
#define ColorBlend_HardLight(T,A,B) (ColorBlend_Buffer(T,A,B,HardLight))
#define ColorBlend_ColorDodge(T,A,B) (ColorBlend_Buffer(T,A,B,ColorDodge))
#define ColorBlend_ColorBurn(T,A,B) (ColorBlend_Buffer(T,A,B,ColorBurn))
#define ColorBlend_LinearDodge(T,A,B) (ColorBlend_Buffer(T,A,B,LinearDodge))
#define ColorBlend_LinearBurn(T,A,B) (ColorBlend_Buffer(T,A,B,LinearBurn))
#define ColorBlend_LinearLight(T,A,B) (ColorBlend_Buffer(T,A,B,LinearLight))
#define ColorBlend_VividLight(T,A,B) (ColorBlend_Buffer(T,A,B,VividLight))
#define ColorBlend_PinLight(T,A,B) (ColorBlend_Buffer(T,A,B,PinLight))
#define ColorBlend_HardMix(T,A,B) (ColorBlend_Buffer(T,A,B,HardMix))
#define ColorBlend_Reflect(T,A,B) (ColorBlend_Buffer(T,A,B,Reflect))
#define ColorBlend_Glow(T,A,B) (ColorBlend_Buffer(T,A,B,Glow))
#define ColorBlend_Phoenix(T,A,B) (ColorBlend_Buffer(T,A,B,Phoenix))

And example would be:

ColorBlend_Glow(TargetPtr, ImageAPtr, ImageBPtr);

The remainder of the photoshop blend modes involve converting RGB to HLS and back again.

#define ColorBlend_Hue(T,A,B)            ColorBlend_Hls(T,A,B,HueB,LuminationA,SaturationA)
#define ColorBlend_Saturation(T,A,B) ColorBlend_Hls(T,A,B,HueA,LuminationA,SaturationB)
#define ColorBlend_Color(T,A,B) ColorBlend_Hls(T,A,B,HueB,LuminationA,SaturationB)
#define ColorBlend_Luminosity(T,A,B) ColorBlend_Hls(T,A,B,HueA,LuminationB,SaturationA)

#define ColorBlend_Hls(T,A,B,O1,O2,O3) {
float64 HueA, LuminationA, SaturationA;
float64 HueB, LuminationB, SaturationL;
Color_RgbToHls((A)[2],(A)[1],(A)[0], &HueA, &LuminationA, &SaturationA);
Color_RgbToHls((B)[2],(B)[1],(B)[0], &HueB, &LuminationB, &SaturationB);
Color_HlsToRgb(O1,O2,O3,&(T)[2],&(T)[1],&(T)[0]);
}

These functions will be helpful in converting RGB to HLS.

int32 Color_HueToRgb(float64 M1, float64 M2, float64 Hue, float64 *Channel)
{
if (Hue < 0.0)
Hue += 1.0;
else if (Hue > 1.0)
Hue -= 1.0;

if ((6.0 * Hue) < 1.0)
*Channel = (M1 + (M2 - M1) * Hue * 6.0);
else if ((2.0 * Hue) < 1.0)
*Channel = (M2);
else if ((3.0 * Hue) < 2.0)
*Channel = (M1 + (M2 - M1) * ((2.0F / 3.0F) - Hue) * 6.0);
else
*Channel = (M1);

return TRUE;
}

int32 Color_RgbToHls(uint8 Red, uint8 Green, uint8 Blue, float64 *Hue, float64 *Lumination, float64 *Saturation)
{
float64 Delta;
float64 Max, Min;
float64 Redf, Greenf, Bluef;

Redf = ((float64)Red / 255.0F);
Greenf = ((float64)Green / 255.0F);
Bluef = ((float64)Blue / 255.0F);

Max = max(max(Redf, Greenf), Bluef);
Min = min(min(Redf, Greenf), Bluef);

*Hue = 0;
*Lumination = (Max + Min) / 2.0F;
*Saturation = 0;

if (Max == Min)
return TRUE;

Delta = (Max - Min);

if (*Lumination < 0.5)
*Saturation = Delta / (Max + Min);
else
*Saturation = Delta / (2.0 - Max - Min);

if (Redf == Max)
*Hue = (Greenf - Bluef) / Delta;
else if (Greenf == Max)
*Hue = 2.0 + (Bluef - Redf) / Delta;
else
*Hue = 4.0 + (Redf - Greenf) / Delta;

*Hue /= 6.0;

if (*Hue < 0.0)
*Hue += 1.0;

return TRUE;
}

int32 Color_HlsToRgb(float64 Hue, float64 Lumination, float64 Saturation, uint8 *Red, uint8 *Green, uint8 *Blue)
{
float64 M1, M2;
float64 Redf, Greenf, Bluef;

if (Saturation == 0)
{
Redf = Lumination;
Greenf = Lumination;
Bluef = Lumination;
}
else
{
if (Lumination <= 0.5)
M2 = Lumination * (1.0 + Saturation);
else
M2 = Lumination + Saturation - Lumination * Saturation;

M1 = (2.0 * Lumination - M2);

Color_HueToRgb(M1, M2, Hue + (1.0F / 3.0F), &Redf);
Color_HueToRgb(M1, M2, Hue, &Greenf);
Color_HueToRgb(M1, M2, Hue - (1.0F / 3.0F), &Bluef);
}

*Red = (uint8)(Redf * 255);
*Blue = (uint8)(Bluef * 255);
*Green = (uint8)(Greenf * 255);

return TRUE;
}

There are more resources on this topic, mainly:

  1. PegTop blend modes
  2. Forensic Photoshop
  3. Insight into Photoshop 7.0 Blend Modes
  4. SF - Basics - Blending Modes
  5. finish the blend modes
  6. Romz blog
  7. ReactOS RGB-HLS conversion functions

How to reproduce Photoshop's multiply blending in OpenCV?

I managed to sort this out. Feel free to comment with any suggested improvements.

First, I found a clue as to how to implement the multiply function in this post:

multiply blending

And here's a quick OpenCV implementation in C++.

Mat MultiplyBlend(const Mat& cvSource, const Mat& cvBackground) {

// assumption: cvSource and cvBackground are of type CV_8UC4

// formula: (cvSource.rgb * cvBackground.rgb * cvSource.a) + (cvBackground.rgb * (1-cvSource.a))
Mat cvAlpha(cvSource.size(), CV_8UC3, Scalar::all(0));
Mat input[] = { cvSource };
int from_to[] = { 3,0, 3,1, 3,2 };
mixChannels(input, 1, &cvAlpha, 1, from_to, 3);

Mat cvBackgroundCopy;
Mat cvSourceCopy;
cvtColor(cvSource, cvSourceCopy, CV_RGBA2RGB);
cvtColor(cvBackground, cvBackgroundCopy, CV_RGBA2RGB);

// A = cvSource.rgb * cvBackground.rgb * cvSource.a
Mat cvBlendResultLeft;
multiply(cvSourceCopy, cvBackgroundCopy, cvBlendResultLeft, 1.0 / 255.0);
multiply(cvBlendResultLeft, cvAlpha, cvBlendResultLeft, 1.0 / 255.0);
delete(cvSourceCopy);

// invert alpha
bitwise_not(cvAlpha, cvAlpha);

// B = cvBackground.rgb * (1-cvSource.a)
Mat cvBlendResultRight;
multiply(cvBackgroundCopy, cvAlpha, cvBlendResultRight, 1.0 / 255.0);
delete(cvBackgroundCopy, cvAlpha);

// A + B
Mat cvBlendResult;
add(cvBlendResultLeft, cvBlendResultRight, cvBlendResult);
delete(cvBlendResultLeft, cvBlendResultRight);

cvtColor(cvBlendResult, cvBlendResult, CV_RGB2RGBA);

return cvBlendResult;
}

Duplicate Photoshop's Color blend mode in ImageMagick

Based on Castles valuable answer, I tried to find the best solution of doing this in PHP. The implementation he cited has two major flaws: one that it doesn't take account of the opacity, if any and the second that is very slow and resource consuming. Processing a 500x500 pixels image in PHP would take about 15 seconds in which Apache would hold the processor up to 95%.

The fastest and least resources consuming I found was actually doing it in HTML5 by using canvas to process the image. The results are amazing and the image is being processed on the spot.

I will post below the final chunks of code, one for PHP and one for HTML. If you need to use this serverside, you can copy-paste the HTML code in Node.js and NodeCanvas: https://github.com/LearnBoost/node-canvas

PHP (with opacity):

<?php

function Lum($colour) {
return ($colour['r'] * 0.3) + ($colour['g'] * 0.59) + ($colour['b'] * 0.11);
}

function ClipColour($colour) {
$result = $colour;
$luminance = Lum($colour);

$cMin = min($colour['r'], $colour['g'], $colour['b']);
$cMax = max($colour['r'], $colour['g'], $colour['b']);

if ($cMin < 0.0) {
$result['r'] = $luminance + ((($colour['r'] - $luminance) * $luminance) / ($luminance - $cMin));
$result['g'] = $luminance + ((($colour['g'] - $luminance) * $luminance) / ($luminance - $cMin));
$result['b'] = $luminance + ((($colour['b'] - $luminance) * $luminance) / ($luminance - $cMin));
}

if ($cMax > 255) {
$result['r'] = $luminance + ((($colour['r'] - $luminance) * (255 - $luminance)) / ($cMax - $luminance));
$result['g'] = $luminance + ((($colour['g'] - $luminance) * (255 - $luminance)) / ($cMax - $luminance));
$result['b'] = $luminance + ((($colour['b'] - $luminance) * (255 - $luminance)) / ($cMax - $luminance));
}

return $result;
}

function SetLum($colour, $luminance) {

$result = array();

$diff = $luminance - Lum($colour);

$result['r'] = $colour['r'] + $diff;
$result['g'] = $colour['g'] + $diff;
$result['b'] = $colour['b'] + $diff;

return ClipColour($result);

}

function normalizeColor( $color ) {
$color['r'] = $color['r'] / 255;
$color['g'] = $color['g'] / 255;
$color['b'] = $color['b'] / 255;

return $color;
}

function denormalizeColor( $color ) {
$color['r'] = round($color['r'] * 255);
$color['g'] = round($color['g'] * 255);
$color['b'] = round($color['b'] * 255);

return $color;
}

$overlay_color = array('r'=>180,'g'=>22,'b'=>1, 'a' => 0.35);

$img = new Imagick();

if( !isset($_GET['case']) ) {
$_GET['case'] = '';
}

//unmodified version
$original = new Imagick('girl.jpg');

//photoshop image to compare
$ps = new Imagick('original.jpg');

$img->addImage($original);
$it = $original->getPixelIterator();

foreach( $it as $row => $pixels ) {
foreach ( $pixels as $column => $pixel ) {
$rgbIni = $pixel->getColor();

$rgb = SetLum($overlay_color, Lum($rgbIni));
$overlay_color = normalizeColor($overlay_color);
$rgb = normalizeColor($rgb);

$rgbIni = normalizeColor($rgbIni);

$rgb['r'] = ((1 - $overlay_color['a']) * $rgbIni['r']) + ($overlay_color['a'] * $rgb['r']);
$rgb['g'] = ((1 - $overlay_color['a']) * $rgbIni['g']) + ($overlay_color['a'] * $rgb['g']);
$rgb['b'] = ((1 - $overlay_color['a']) * $rgbIni['b']) + ($overlay_color['a'] * $rgb['b']);

$test = denormalizeColor($test);
$rgb = denormalizeColor($rgb);
$overlay_color = denormalizeColor($overlay_color);

$pixel->setColor('rgb('.round($rgb['r']).','. round($rgb['g']).','.round($rgb['b']).')');

}

$it->syncIterator();
}

//add modified version
$img->addImage($original);
$img->addImage($ps);

$img->resetIterator();
$combined = $img->appendImages(true); //stack images

header('content-type: image/jpeg');

$combined->setImageFormat("jpeg");

echo $combined;

?>

HTML:

<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<script>
var RGBA = function(r, g, b, a) {
this.R = r || 0;
this.G = g || 0;
this.B = b || 0;
this.A = a || 0.5;
}

function SetLum(initialColor, pixelColor) {

var initalColorLuminance = initialColor.R * 0.3 + initialColor.G * 0.59 + initialColor.B * 0.11;
var pixelColorLuminance = pixelColor.R * 0.3 + pixelColor.G * 0.59 + pixelColor.B * 0.11;

var diff = pixelColorLuminance - initalColorLuminance;

var response = new Array;
response[0] = initialColor.R + diff;
response[1] = initialColor.G + diff;
response[2] = initialColor.B + diff;

//console.log(response[0]);

return ClipColour(response);

}

function alphaComposite(mv, ov, a) {
return (mv * a) + (ov * (1 - a));
}

function ClipColour(color) { //function to prevent underexposure or overexposure on some pixels

var result = color;
var luminance = color[0] * 0.3 + color[1] * 0.59 + color[1] * 0.11;

var cMin = Math.min(color[0], color[1], color[2]);
var cMax = Math.max(color[0], color[1], color[2]);

if (cMin < 0.0) {
color[0] = luminance + (((color[0] - luminance) * luminance) / (luminance - cMin));
color[1] = luminance + (((color[1] - luminance) * luminance) / (luminance - cMin));
color[2] = luminance + (((color[2] - luminance) * luminance) / (luminance - cMin));
}

if (cMax > 255) {
color[0] = luminance + (((color[0] - luminance) * (255 - luminance)) / (cMax - luminance));
color[1] = luminance + (((color[1] - luminance) * (255 - luminance)) / (cMax - luminance));
color[2] = luminance + (((color[2] - luminance) * (255 - luminance)) / (cMax - luminance));
}

return color;
}

function processImage(image, targetColour) {
var canvas = document.createElement('canvas');
c = canvas.getContext('2d');

canvas.width = image.width;
canvas.height = image.height;

// Draw the building on the original canvas
c.drawImage(image, 0, 0, canvas.width, canvas.height);

// There's a (much) faster way to cycle through all the pixels using typed arrays,
// but I'm playing it safe so that the example works in all browsers.
var imageData = c.getImageData(0, 0, canvas.width, canvas.height),
imageDataPixels = imageData.data;

for (var i = 0, len = imageDataPixels.length; i < len; i += 4) {
var pixelColor = new RGBA(imageDataPixels[i], imageDataPixels[i+1], imageDataPixels[i+2], 1);
var test = SetLum(targetColour, pixelColor);

var r = Math.round(test[0]);
var g = Math.round(test[1]);
var b = Math.round(test[2]);

imageDataPixels[i] = alphaComposite(r, imageDataPixels[i], targetColour.A);
imageDataPixels[i + 1] = alphaComposite(g, imageDataPixels[i + 1], targetColour.A);
imageDataPixels[i + 2] = alphaComposite(b, imageDataPixels[i + 2], targetColour.A);
}

c.putImageData(imageData, 0, 0);

return canvas;
}

document.addEventListener('DOMContentLoaded', function() {
var image = new Image(),
processImageFile = null;

image.src = "girl.jpg";

image.addEventListener('load', function() {
var canvas = document.getElementById('canvas'),
c = canvas.getContext('2d'),
imageRGBA = new RGBA(180, 22, 1, 0.35);

canvas.width = image.width;
canvas.height = image.height;

c.drawImage(image, 0, 0);

processImageFile = processImage(image, imageRGBA);
c.drawImage(processImageFile, 0, 0);
});
});
</script>
</head>
<body>

<img src="girl.jpg" />
<br />

<canvas id="canvas"></canvas>

<br />
<img src="original.jpg" />
</body>

Efficiently composing/rendering multiple layers for Photoshop-like image editor

Without knowing your target platform, some sort of hardware acceleration is highly recommended. OpenGL 2.0+ (or ES 2.0+) is the most likely thing to help — using GLSL you get a C-style language in which the interesting bit for you would be supplying a fragment program, which is what the GPU should do per pixel based on the input textures in order to produce a colour for output. Where you're outputting to is implicit, but you can output to an image then subsequently use that image as input, which would hook right in to your idea (2). Depending on the exact hardware you're targeting, it may be relevant that Direct3d has a very similar construct in HLSL and NVidia supply a more proprietary equivalent called Cg that I think nowadays can compile to GLSL or HLSL.

Otherwise: idea (1) is a smart move, especially if the user is allowed to open images of arbitrary size. It causes the time spent to be a function of the size of the brush, not the image. You need to be reasonably precise in your thinking if you're doing things at subpixel accuracy.

Idea (2) potentially has precision ramifications (especially if coupled to hardware). To maintain the exact same results, obviously your intermediate buffers need to be of the same precision as your intermediate variables, which in a typical consumer-oriented drawing application often means that file inputs are 8bpp/channel but intermediate storage needs to be at least 16bpp/channel if you don't want errors to accumulate. This is likely to be the biggest potential barrier to hardware acceleration, since older hardware tends to limit you to 8bpp/channel intermediate buffers. Modern hardware can do floating point buffers of decent precision.

It is possible to gain an advantage from precomputation on layers to be applied afterwards, but the information per pixel generally needs to be more complicated than merely a colour, and may end up being no more simple than just storing the original buffers. Probably the smart thing to do is peephole optimisation. So if you have two additive layers on top of each other, you can easily replace that with a single additive layer. Ditto for two multiplicative or two XOR. So you'd implement a loop that looked at the effect queue, finds any patterns that it knows how to turn into a simpler form and makes those substitutions. And repeat until no substitutions can be found. For optimisation purposes you may even want to implement some compound operations that aren't directly offered to the user. Though, again, you need to consider precision.

In the common case, with normal blending applied to all layers, you'd end up with a single operation for arbitrarily many layers above.

blend two images on a javascript canvas

Pixastic is a special framework for advanced use of canvas, here are blending examples: http://www.pixastic.com/lib/docs/actions/blend/

If you would like do this alone, you can extract pixel data from 2 images, blend it with a mathematical equation, and put into a canvas. Here is information how to get and put pixel data from/to canvas:
http://ajaxian.com/archives/canvas-image-data-optimization-tip


Update:
Simple example with alpha blending of 2 images in proportion 50-50.
(Images borrowed from http://www.pixastic.com/sample/Butterfly.jpg and http://www.pixastic.com/sample/Flower.jpg )

<img src="Butterfly.jpg" id="img1">
<img src="Flower.jpg" id="img2">
<p>Blended image<br><canvas id="canvas"></canvas></p>
<script>
window.onload = function () {
var img1 = document.getElementById('img1');
var img2 = document.getElementById('img2');
var canvas = document.getElementById("canvas");
var context = canvas.getContext("2d");
var width = img1.width;
var height = img1.height;
canvas.width = width;
canvas.height = height;

var pixels = 4 * width * height;
context.drawImage(img1, 0, 0);
var image1 = context.getImageData(0, 0, width, height);
var imageData1 = image1.data;
context.drawImage(img2, 0, 0);
var image2 = context.getImageData(0, 0, width, height);
var imageData2 = image2.data;
while (pixels--) {
imageData1[pixels] = imageData1[pixels] * 0.5 + imageData2[pixels] * 0.5;
}
image1.data = imageData1;
context.putImageData(image1, 0, 0);
};
</script>


Related Topics



Leave a reply



Submit