Image Edge Smoothing with Opencv

Image edge smoothing with opencv

Do you want to get something like this?

Sample Image

If yes, then here is the code:

#include <iostream>
#include <vector>
#include <string>
#include <fstream>
#include <opencv2/opencv.hpp>

using namespace cv;
using namespace std;

int main(int argc, char **argv)
{
cv::namedWindow("result");
Mat img=imread("TestImg.png");
Mat whole_image=imread("D:\\ImagesForTest\\lena.jpg");
whole_image.convertTo(whole_image,CV_32FC3,1.0/255.0);
cv::resize(whole_image,whole_image,img.size());
img.convertTo(img,CV_32FC3,1.0/255.0);

Mat bg=Mat(img.size(),CV_32FC3);
bg=Scalar(1.0,1.0,1.0);

// Prepare mask
Mat mask;
Mat img_gray;
cv::cvtColor(img,img_gray,cv::COLOR_BGR2GRAY);
img_gray.convertTo(mask,CV_32FC1);
threshold(1.0-mask,mask,0.9,1.0,cv::THRESH_BINARY_INV);

cv::GaussianBlur(mask,mask,Size(21,21),11.0);
imshow("result",mask);
cv::waitKey(0);

// Reget the image fragment with smoothed mask
Mat res;

vector<Mat> ch_img(3);
vector<Mat> ch_bg(3);
cv::split(whole_image,ch_img);
cv::split(bg,ch_bg);
ch_img[0]=ch_img[0].mul(mask)+ch_bg[0].mul(1.0-mask);
ch_img[1]=ch_img[1].mul(mask)+ch_bg[1].mul(1.0-mask);
ch_img[2]=ch_img[2].mul(mask)+ch_bg[2].mul(1.0-mask);
cv::merge(ch_img,res);
cv::merge(ch_bg,bg);

imshow("result",res);
cv::waitKey(0);
cv::destroyAllWindows();
}

And I think this link will be interestiong for you too: Poisson Blending

Smooth the edges of binary images (Face) using Python and Open CV

You can do that in Python/OpenCV with the help of Skimage by blurring the binary image. Then apply a one-sided clip.

Input:

Sample Image

import cv2
import numpy as np
import skimage.exposure

# load image
img = cv2.imread('bw_image.png')

# blur threshold image
blur = cv2.GaussianBlur(img, (0,0), sigmaX=3, sigmaY=3, borderType = cv2.BORDER_DEFAULT)

# stretch so that 255 -> 255 and 127.5 -> 0
# C = A*X+B
# 255 = A*255+B
# 0 = A*127.5+B
# Thus A=2 and B=-127.5
#aa = a*2.0-255.0 does not work correctly, so use skimage
result = skimage.exposure.rescale_intensity(blur, in_range=(127.5,255), out_range=(0,255))

# save output
cv2.imwrite('bw_image_antialiased.png', result)

# Display various images to see the steps
cv2.imshow('result', result)

cv2.waitKey(0)
cv2.destroyAllWindows()


Sample Image

You will have to adjust the amount of blur for the degree of aliasing in the image.

Smoothing Edges of a Binary Image

Here is the result I obtained with your image: Sample Image

My method is mostly based on several cv::medianBlurapplied on a scaled-up image.

Here is the code:

cv::Mat vesselImage = cv::imread(filename); //the original image
cv::threshold(vesselImage, vesselImage, 125, 255, THRESH_BINARY);
cv::Mat blurredImage; //output of the algorithm
cv::pyrUp(vesselImage, blurredImage);

for (int i = 0; i < 15; i++)
cv::medianBlur(blurredImage, blurredImage, 7);

cv::pyrDown(blurredImage, blurredImage);
cv::threshold(blurredImage, blurredImage, 200, 255, THRESH_BINARY);

The jagged edges are due to the thresholding. If you are comfortable with an output image that is non-binary (i.e. with 256 shades of grAy), you can just remove it and you get this image: Sample Image

How can I soften just the edges of this image

Here is one way using OpenCV, Numpy and Skimage. I assume you actually have an image with a transparent background and not just checkerboard pattern.

Input:

Sample Image

import cv2
import numpy as np
import skimage.exposure

# load image with alpha channel
img = cv2.imread('lena_circle.png', cv2.IMREAD_UNCHANGED)

# extract only bgr channels
bgr = img[:, :, 0:3]

# extract alpha channel
a = img[:, :, 3]

# blur alpha channel
ab = cv2.GaussianBlur(a, (0,0), sigmaX=2, sigmaY=2, borderType = cv2.BORDER_DEFAULT)

# stretch so that 255 -> 255 and 127.5 -> 0
aa = skimage.exposure.rescale_intensity(ab, in_range=(127.5,255), out_range=(0,255))

# replace alpha channel in input with new alpha channel
out = img.copy()
out[:, :, 3] = aa

# save output
cv2.imwrite('lena_circle_antialias.png', out)

# Display various images to see the steps
# NOTE: In and Out show heavy aliasing. This seems to be an artifact of imshow(), which did not display transparency for me. However, the saved image looks fine

cv2.imshow('In',img)
cv2.imshow('BGR', bgr)
cv2.imshow('A', a)
cv2.imshow('AB', ab)
cv2.imshow('AA', aa)
cv2.imshow('Out', out)

cv2.waitKey(0)
cv2.destroyAllWindows()


Sample Image

I am by no means an expert with OpenCV. I looked at cv2.normalize(), but it did not look like I could provide my own sets of input and output values. So I also tried using the following adding the clipping to be sure there were no over-flows or under-flows:

aa = a*2.0 - 255.0
aa[aa<0] = 0
aa[aa>0] = 255


where I computed that from solving simultaneous equations such that in=255 becomes out=255 and in=127.5 becomes out=0 and doing a linear stretch between:

C = A*X+B
255 = A*255+B
0 = A*127.5+B
Thus A=2 and B=-127.5


But that does not work nearly as well as skimage rescale_intensity.

Edge Smoothing and filling inner contours in opencv with iOS

how about?

morphologyEx(grey,grey,MORPH_CLOSE,getStructuringElement( MORPH_ELLIPSE,Size(7,7)));

although the silhouette gets merged for the left hand

simple

edit:slightly more involved

Mat tmp=grey.clone();
morphologyEx(tmp,tmp,MORPH_GRADIENT,getStructuringElement(MORPH_ELLIPSE,Size(3,3)));
bitwise_not(tmp,tmp);
Mat smallholes=Mat::zeros(tmp.size(), CV_8UC1);
vector<vector<Point>> contours;
findContours(tmp,contours,CV_RETR_LIST,CV_CHAIN_APPROX_SIMPLE);
for(int i = 0; i < contours.size(); i++)
{
double area = contourArea(Mat(contours[i]));
if(area<100)
drawContours(smallholes, contours, i, 255, -1);
}
Mat done;
bitwise_or(grey,smallholes,done);
morphologyEx(done,done,MORPH_CLOSE,getStructuringElement(MORPH_ELLIPSE,Size(3,3)));

long

how to refine or blur or smooth just the edges?

If you have an edge mask of the image, do that:

  1. Dilate(edgemask)
  2. Copy(source: original, destination: blurred)
  3. Smooth(blurred)
  4. Copy(source: blurred, destination: original, mask: edgemask)

And your "original" image will be blurred.

How to blur/ feather the edges of an object in an image using Opencv?

Similar to what I have mentioned here, you can do it in the following steps:

  • Load original image and find contours.
  • Blur the original image and save it in a different variable.
  • Create an empty mask and draw the detected contours on it.
  • Use np.where() method to select the pixels from the mask (contours) where you want blurred values and then replace it.

import cv2
import numpy as np

image = cv2.imread('./asdf.jpg')
blurred_img = cv2.GaussianBlur(image, (21, 21), 0)
mask = np.zeros(image.shape, np.uint8)

gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 60, 255, cv2.THRESH_BINARY)[2]
contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

cv2.drawContours(mask, contours, -1, (255,255,255),5)
output = np.where(mask==np.array([255, 255, 255]), blurred_img, image)

Original image

Detected contours

Blurred edges

Make Edges of the Image smooth

Here is how to replace the background with some colored image rather than transparency in Python/OpenCV.

  • Read the input
  • Convert to gray
  • Threshold
  • Blur then stretch gray to black to antialias
  • Get the external contours and the largest contour
  • Draw the largest contour as white on black background
  • Dilate to add your black border (if desired)
  • Create a colored (red) background image
  • Apply the mask to the input
  • Apply the inverted mask to the background
  • Add the two results together
  • Save the result

Input:

Sample Image

import cv2
import numpy as np
import skimage.exposure

# load image
img = cv2.imread('bunny.jpg')

# convert to gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# threshold
thresh = cv2.threshold(gray, 32, 255, cv2.THRESH_BINARY)[1]

# blur threshold image
blur = cv2.GaussianBlur(thresh, (0,0), sigmaX=3, sigmaY=3, borderType = cv2.BORDER_DEFAULT)

# stretch so that 255 -> 255 and 127.5 -> 0
stretch = skimage.exposure.rescale_intensity(blur, in_range=(127.5,255), out_range=(0,255)).astype(np.uint8)

# threshold again
thresh2 = cv2.threshold(stretch, 0, 255, cv2.THRESH_BINARY)[1]

# get external contour
contours = cv2.findContours(thresh2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
big_contour = max(contours, key=cv2.contourArea)

# draw white filled contour on black background
contour = np.zeros_like(thresh, dtype=np.uint8)
cv2.drawContours(contour, [big_contour], 0, 255, -1)

# dilate mask for dark border
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20,20))
mask = cv2.morphologyEx(contour, cv2.MORPH_DILATE, kernel)

# create red colored background image
bckgrnd = np.full_like(img, (0,0,255), dtype=np.uint8)

# apply mask to img
img_masked = cv2.bitwise_and(img, img, mask=mask)

# apply inverse mask to colored background image
bckgrnd_masked = cv2.bitwise_and(bckgrnd, bckgrnd, mask=255-mask)

# combine the two
result = cv2.add(img_masked, bckgrnd_masked)

# save output
cv2.imwrite('bunny_thresh2.png', thresh)
cv2.imwrite('bunny_mask2.png', mask)
cv2.imwrite('bunny_masked2.png', img_masked)
cv2.imwrite('bunny_background_masked2.png', bckgrnd_masked)
cv2.imwrite('bunny_result2.png', result)

# Display various images to see the steps
cv2.imshow('gray',gray)
cv2.imshow('thresh', thresh)
cv2.imshow('blur', blur)
cv2.imshow('stretch', stretch)
cv2.imshow('thresh2', thresh2)
cv2.imshow('contour', contour)
cv2.imshow('mask', mask)
cv2.imshow('img_masked', img_masked)
cv2.imshow('bckgrnd_masked', bckgrnd_masked)
cv2.imshow('result', result)

cv2.waitKey(0)
cv2.destroyAllWindows()

Threshold image:

Sample Image

Mask image:

Sample Image

Mask applied to image:

Sample Image

Inverted mask applied to background:

Sample Image

Result:

Sample Image



Related Topics



Leave a reply



Submit