系列文章目录
前言
一、处理前后图片预览
二、源码展示
1.源码1
代码如下(本文使用源码1):
# 绘制素描图 sketch:草图,素描,梗概
'''
首先呢,将彩色图转换成灰度图;
然后对灰度图进行求其反色的操作;
对得到的反色的图片结果采用一个高斯模糊的操作;
然后采用颜色亮化(color dodge)的技术再将第一步的灰度图和第三步操作后的图片进行混合,这样就成功地把图片搞成素描了。
'''
from PIL import Image, ImageFilter, ImageOps
# 将像素点颜色亮化
def dodge(a, b, alpha):
return min(int(a*255/(256-b*alpha)), 255)
# 绘制素描
def drawSketch(img, blur=25, alpha=1.0):
img1 = img.convert('L') # 图片转换成灰色
img2 = img1.copy() # 复制灰度图片
img2 = ImageOps.invert(img2) # 将图灰度片反色,invert:使前后倒置或反转
for i in range(blur): # 模糊度
img2 = img2.filter(ImageFilter.BLUR)
width, height = img1.size # 获取图片的大小
# 遍历两张图片的像素矩阵
for x in range(width):
for y in range(height):
a = img1.getpixel((x, y)) # Returns the pixel value at a given position
b = img2.getpixel((x, y)) # 返回指定位置的像素点值
img1.putpixel((x, y), dodge(a, b, alpha)) # Modifies the pixel at the given position.修改指定位置的像素点值
# 调用系统接口显示图片
img1.show()
# 保存图片到指定路径
img1.save(saveUrl)
print('绘制素描成功')
if __name__ == '__main__':
getUrl = './images/6.jpg'
saveUrl = './images/fff.jpg'
img = Image.open(getUrl) # 获取图片
drawSketch(img)
2.源码2(批量转换)
效果展示:
代码如下:
from PIL import Image, ImageFilter, ImageOps
import numpy as np
import os
def plot_sketch(origin_picture, out_picture) :
a = np.asarray(Image.open(origin_picture).convert('L')).astype('float')
depth = 10. # (0-100)
grad = np.gradient(a) # 取图像灰度的梯度值
grad_x, grad_y = grad # 分别取横纵图像梯度值
grad_x = grad_x * depth / 100.
grad_y = grad_y * depth / 100.
A = np.sqrt(grad_x ** 2 + grad_y ** 2 + 1.0)
uni_x = grad_x / A
uni_y = grad_y / A
uni_z = 1. / A
vec_el = np.pi / 2.2 # 光源的俯视角度,弧度值
vec_az = np.pi / 4. # 光源的方位角度,弧度值
dx = np.cos(vec_el) * np.cos(vec_az) # 光源对x 轴的影响
dy = np.cos(vec_el) * np.sin(vec_az) # 光源对y 轴的影响
dz = np.sin(vec_el) # 光源对z 轴的影响
b = 255 * (dx * uni_x + dy * uni_y + dz * uni_z) # 光源归一化
b = b.clip(0, 255)
im = Image.fromarray(b.astype('uint8')) # 重构图像
im.save(out_picture)
print("转换成功,请查看 : ", out_picture)
def plot_sketch2(origin_picture, out_picture, alpha=1.0):
img = Image.open(origin_picture)
blur = 20
img1 = img.convert('L') # 图片转换成灰色
img2 = img1.copy()
img2 = ImageOps.invert(img2)
for i in range(blur): # 模糊度
img2 = img2.filter(ImageFilter.BLUR)
width, height = img1.size
for x in range(width):
for y in range(height):
a = img1.getpixel((x, y))
b = img2.getpixel((x, y))
img1.putpixel((x, y), min(int(a*255/(256-b*alpha)), 255))
img1.save(out_picture)
if __name__ == '__main__':
origin_picture = "pictures/1.jpg"
out_picture = "sketchs/sketch.jpg"
plot_sketch(origin_picture, out_picture)
origin_path = "./pictures"
out_path = "./sketchs"
dirs = os.listdir(origin_path)
for file in dirs:
origin_picture = origin_path + "/" + file
out_picture = out_path + "/" + "sketch_of_" + file
plot_sketch2(origin_picture, out_picture)
3.源码3
代码如下:
import cv2
import numpy as np
def dodgeNaive(image, mask):
# determine the shape of the input image
width, height = image.shape[:2]
# prepare output argument with same size as image
blend = np.zeros((width, height), np.uint8)
for col in range(width):
for row in range(height):
# do for every pixel
if mask[col, row] == 255:
# avoid division by zero
blend[col, row] = 255
else:
# shift image pixel value by 8 bits
# divide by the inverse of the mask
tmp = (image[col, row] << 8) / (255 - mask)
# print('tmp={}'.format(tmp.shape))
# make sure resulting value stays within bounds
if tmp.any() > 255:
tmp = 255
blend[col, row] = tmp
return blend
def dodgeV2(image, mask):
return cv2.divide(image, 255 - mask, scale=256)
def burnV2(image, mask):
return 255 - cv2.divide(255 - image, 255 - mask, scale=256)
def rgb_to_sketch(src_image_name, dst_image_name):
img_rgb = cv2.imread(src_image_name)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
# 读取图片时直接转换操作
# img_gray = cv2.imread('example.jpg', cv2.IMREAD_GRAYSCALE)
img_gray_inv = 255 - img_gray
img_blur = cv2.GaussianBlur(img_gray_inv, ksize=(21, 21),
sigmaX=0, sigmaY=0)
img_blend = dodgeV2(img_gray, img_blur)
cv2.imshow('original', img_rgb)
cv2.imshow('gray', img_gray)
cv2.imshow('gray_inv', img_gray_inv)
cv2.imshow('gray_blur', img_blur)
cv2.imshow("pencil sketch", img_blend)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite(dst_image_name, img_blend)
if __name__ == '__main__':
src_image_name = '2.jpg'
dst_image_name = 'sketch2.jpg'
rgb_to_sketch(src_image_name, dst_image_name)
4.源码4
代码如下:
引用地址:https://blog.csdn.net/qq_46092061/article/details/117657754
# 绘制素描图 sketch:草图,素描,梗概
from PIL import Image
import numpy as np
# 绘制素描图
def makeSketch(getUrl, saveUrl):
# 获取图片,将图片转化为,mode=L:灰度图,每个像素用 8 位二进制代码表示,astype:并将像素点转化为浮点类型的二维矩阵
a = np.array(Image.open(getUrl).convert('L')).astype('float')
depth = 10. # (0-100)
'''
梯度计算采用二阶精确中心差分
在内点和一阶或二阶精确的一面
(向前或向后)边界上的差异。
因此,返回的梯度具有与输入数组相同的形状。
'''
grad = np.gradient(a) # 获取图像灰度的梯度值 返回元组 gradient:梯度
grad_x, grad_y = grad # 分别取横纵图像梯度值
grad_x = grad_x * depth / 100.
grad_y = grad_y * depth / 100.
A = np.sqrt(grad_x ** 2 + grad_y ** 2 + 1.)
uni_x = grad_x / A
uni_y = grad_y / A
uni_z = 1. / A
vec_el = np.pi / 2.2 # 光源的俯视角度,弧度值
vec_az = np.pi / 4. # 光源的方位角度,弧度值
dx = np.cos(vec_el) * np.cos(vec_az) # 光源对 x轴的影响
dy = np.cos(vec_el) * np.sin(vec_az) # 光源对 y轴的影响
dz = np.sin(vec_el) # 光源对z 轴的影响
b = 255 * (dx * uni_x + dy * uni_y + dz * uni_z) # 光源归一化
b = b.clip(0, 255)
im = Image.fromarray(b.astype('uint8')) # 重构图像 a = np.asarray(im)
im.save(saveUrl)
print('绘制素描画成功')
if __name__ == '__main__':
getUrl = r'./images/2.jpg'
saveUrl = r'./images/hh.jpg'
makeSketch(getUrl, saveUrl)
总结
分享:
成熟是一种明亮而不刺眼的’光辉,一种圆润而不腻耳的声响,一种不再需要对别人察言观色的从容,一种终于停止向周围申诉求告的大气,一种不理会喧闹的微笑,一种洗刷了偏激的淡漠,一种无需声张的厚实,一种能够看的很远却并不陡峭的高度。——余秋雨《山居笔记》