gpt4 book ai didi

python-3.x - 白平衡来自已知点的照片

转载 作者:行者123 更新时间:2023-12-01 01:42:55 28 4
gpt4 key购买 nike

白平衡是一个相当广泛的主题,但我看到的大多数答案都涵盖了整个图像的自动白平衡技术,该图像没有已知的白色、灰色和黑色点。我似乎找不到许多从已知点涵盖白平衡的内容。我有一个脚本(下面),它拍摄色卡(Spyder Checkr 48)的图像并返回白色、20% 灰色和黑色色卡块:

Color       L      A     B     sR   sG   sB   aR   aG   aB
Card White 96.04 2.16 2.6 249 242 238 247 242 237
20% Gray 80.44 1.17 2.05 202 198 195 199 196 193
Card Black 16.91 1.43 -0.81 43 41 43 46 46 47

问题:由于我知道图像特定部分的真实 LAB、sRGB 和 Adob​​eRGB 值,那么对图像进行白平衡的最佳方法是什么?

Here is a link to the images I am working with.这是提取色卡块的代码(我目前在 Windows 上运行它,Python 3.7):
from __future__ import print_function
import cv2
import imutils
import numpy as np
from matplotlib import pyplot as plt
import os
import sys

image = cv2.imread("PATH_TO_IMAGE")
template = cv2.imread("PATH_TO_TEMPLATE")
rtemplate = cv2.imread("PATH_TO_RIGHT_TEMPLATE")


def sift(image):
sift = cv2.xfeatures2d.SIFT_create()
kp, des = sift.detectAndCompute(image, None)
return kp, des

def sift_match(im1, im2, vis=False, save=False):
MIN_MATCH_COUNT = 10
FLANN_INDEX_KDTREE = 0
kp1, des1 = sift(im1)
kp2, des2 = sift(im2)

index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=7)

search_params = dict(checks=100)

flann = cv2.FlannBasedMatcher(index_params, search_params)

matches = flann.knnMatch(des1, des2, k=2)

# Need to draw only good matches, so create a mask
matchesMask = [[0, 0] for i in range(len(matches))]

if vis is True:
draw_params = dict(matchColor=(0, 255, 0),
singlePointColor=(255, 0, 0),
matchesMask=matchesMask,
flags=0)

im3 = cv2.drawMatchesKnn(im1, kp1, im2, kp2, matches, None, **draw_params)

if save:
cv2.imwrite("tempSIFT_Match.png", im3)

plt.imshow(im3), plt.show()
good = []
for m, n in matches:
if m.distance < 0.75 * n.distance:
good.append(m)
return kp1, des1, kp2, des2, good



def smartextractor(im1, im2, vis=False):

# Detect features and compute descriptors.
kp1, d1, kp2, d2, matches = sift_match(im1, im2, vis)
kp1 = np.asarray(kp1)
kp2 = np.asarray(kp2)

# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)

for i, match in enumerate(matches):
points1[i, :] = kp1[match.queryIdx].pt
points2[i, :] = kp2[match.trainIdx].pt

# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)

if h is None:
print("could not find homography")
return None, None

# Use homography
height, width, channels = im2.shape
im1Reg = cv2.warpPerspective(im1, h, (width, height))

return im1Reg, h


def show_images(images, cols=1, titles=None):
"""
Display a list of images in a single figure with matplotlib.
"""
assert ((titles is None) or (len(images) == len(titles)))
n_images = len(images)
if titles is None: titles = ['Image (%d)' % i for i in range(1, n_images + 1)]
fig = plt.figure()
for n, (image, title) in enumerate(zip(images, titles)):
a = fig.add_subplot(cols, np.ceil(n_images / float(cols)), n + 1)
if image.ndim == 2:
plt.gray()
plt.imshow(image)
a.set_title(title)
fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)
plt.show()


def Sobel(img, bilateralFilter=True):
# timestart = time.clock()
try:
img = cv2.imread(img, 0)
except TypeError:
None
try:
rheight, rwidth, rdepth = img.shape
img1 = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
except ValueError:
raise TypeError
# cv2.imwrite('temp.png',img)
_, s, v = cv2.split(img1)
b, g, r = cv2.split(img)
if bilateralFilter is True:
s = cv2.bilateralFilter(s, 11, 17, 17)
v = cv2.bilateralFilter(v, 11, 17, 17)
b = cv2.bilateralFilter(b, 11, 17, 17)
g = cv2.bilateralFilter(g, 11, 17, 17)
r = cv2.bilateralFilter(r, 11, 17, 17)
# calculate sobel in x,y,diagonal directions with the following kernels
sobelx = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype=np.float32)
sobely = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], dtype=np.float32)
sobeldl = np.array([[0, 1, 2], [-1, 0, 1], [-2, -1, 0]], dtype=np.float32)
sobeldr = np.array([[2, 1, 0], [1, 0, -1], [0, -1, -2]], dtype=np.float32)
# calculate the sobel on value of hsv
gx = cv2.filter2D(v, -1, sobelx)
gy = cv2.filter2D(v, -1, sobely)
gdl = cv2.filter2D(v, -1, sobeldl)
gdr = cv2.filter2D(v, -1, sobeldr)
# combine sobel on value of hsv
xylrv = 0.25 * gx + 0.25 * gy + 0.25 * gdl + 0.25 * gdr

# calculate the sobel on saturation of hsv
sx = cv2.filter2D(s, -1, sobelx)
sy = cv2.filter2D(s, -1, sobely)
sdl = cv2.filter2D(s, -1, sobeldl)
sdr = cv2.filter2D(s, -1, sobeldr)
# combine sobel on value of hsv
xylrs = 0.25 * sx + 0.25 * sy + 0.25 * sdl + 0.25 * sdr

# combine value sobel and saturation sobel
xylrc = 0.5 * xylrv + 0.5 * xylrs
xylrc[xylrc < 6] = 0

# calculate the sobel on value on green
grx = cv2.filter2D(g, -1, sobelx)
gry = cv2.filter2D(g, -1, sobely)
grdl = cv2.filter2D(g, -1, sobeldl)
grdr = cv2.filter2D(g, -1, sobeldr)
# combine sobel on value on green
xylrgr = 0.25 * grx + 0.25 * gry + 0.25 * grdl + 0.25 * grdr

# calculate the sobel on blue
bx = cv2.filter2D(b, -1, sobelx)
by = cv2.filter2D(b, -1, sobely)
bdl = cv2.filter2D(b, -1, sobeldl)
bdr = cv2.filter2D(b, -1, sobeldr)
# combine sobel on value on blue
xylrb = 0.25 * bx + 0.25 * by + 0.25 * bdl + 0.25 * bdr

# calculate the sobel on red
rx = cv2.filter2D(r, -1, sobelx)
ry = cv2.filter2D(r, -1, sobely)
rdl = cv2.filter2D(r, -1, sobeldl)
rdr = cv2.filter2D(r, -1, sobeldr)
# combine sobel on value on red
xylrr = 0.25 * rx + 0.25 * ry + 0.25 * rdl + 0.25 * rdr

# combine value sobel and saturation sobel
xylrrgb = 0.33 * xylrgr + 0.33 * xylrb + 0.33 * xylrr
xylrrgb[xylrrgb < 6] = 0

# combine HSV and RGB sobel outputs
xylrc = 0.5 * xylrc + 0.5 * xylrrgb
xylrc[xylrc < 6] = 0
xylrc[xylrc > 25] = 255

return xylrc

print("extracting image")
extractedImage, _ = smartextractor(image, template)

print("extracting right image")
rextractedImage, _ = smartextractor(extractedImage, rtemplate, vis=False)
grextractedImage = cv2.cvtColor(rextractedImage, cv2.COLOR_BGR2GRAY)
bfsobelImg = Sobel(rextractedImage)
sobelImg = Sobel(rextractedImage, bilateralFilter=False)
csobelImg = cv2.add(bfsobelImg, sobelImg)
csobelImg[csobelImg < 6] = 0
csobelImg[csobelImg > 18] = 255

csobelImg = csobelImg.astype(np.uint8)
img2 = csobelImg.copy()
ret, thresh = cv2.threshold(img2, 18, 255, 0)
contours = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

contours = imutils.grab_contours(contours)
contours = sorted(contours, key=cv2.contourArea, reverse=True)



count = 0
trigger = False
for c in contours:
# approximate the contour
peri = cv2.arcLength(c, True)
contours[count] = cv2.approxPolyDP(c, 0.05 * peri, True)

if len(contours[count]) == 4:
if trigger is False:
screenCnt = contours[count]
trigger = True

count += 1

tl = screenCnt[0]
tr = screenCnt[1]
bl = screenCnt[3]
br = screenCnt[2]

tLy, tLx = tl[0]
tRy, tRx = tr[0]
bLy, bLx = bl[0]
bRy, bRx = br[0]

ratio = .15
realSpace = (3/16)
boxwidth = int(((tRx - tLx) + (bRx - bLx))*.5 - (tLx + bLx)*.5)
boxheight = int(((bRy - tRy) + (bLy - tLy))*.5 - (tRy + tLy)*.5)
spaceWidth = int((boxwidth + boxheight)*.5*realSpace)
boxcenter = [int(((bRy - tRy)*.5 + (bLy - tLy)*.5)*.5), int(((tRx - tLx)*.5 + (bRx - bLx)*.5)*.5)]
roitl = [boxcenter[0] - int(ratio*boxheight), boxcenter[1] - int(ratio*boxwidth)]
roitr = [boxcenter[0] - int(ratio*boxheight), boxcenter[1] + int(ratio*boxwidth)]
roibl = [boxcenter[0] + int(ratio*boxheight), boxcenter[1] - int(ratio*boxwidth)]
roibr = [boxcenter[0] + int(ratio*boxheight), boxcenter[1] + int(ratio*boxwidth)]

spacing = int((boxwidth + boxheight)*.5)+spaceWidth
roiWhite = np.array((roitl, roitr, roibr, roibl))


roiGray = np.array(([roitl[1], roitl[0]+spacing*1], [roitr[1], roitr[0]+spacing*1],
[roibr[1], roibr[0]+spacing*1], [roibl[1], roibl[0]+spacing*1]))

roiBlack = np.array(([roitl[1], roitl[0]+spacing*6], [roitr[1], roitr[0]+spacing*6],
[roibr[1], roibr[0]+spacing*6], [roibl[1], roibl[0]+spacing*6]))

whiteAvgb, whiteAvgg, whiteAvgr, _ = cv2.mean(rextractedImage[(roitl[0]+spacing*0):(roibr[0]+spacing*0),
roitl[1]:roibr[1]])
grayAvgb, grayAvgg, grayAvgr, _ = cv2.mean(rextractedImage[(roitl[0]+spacing*1):(roibr[0]+spacing*1),
roitl[1]:roibr[1]])
blackAvgb, blackAvgg, blackAvgr, _ = cv2.mean(rextractedImage[(roitl[0]+spacing*6):(roibr[0]+spacing*6),
roitl[1]:roibr[1]])

whiteROI = rextractedImage[(roitl[0]+spacing*0):(roibr[0]+spacing*0), roitl[1]:roibr[1]]
grayROI = rextractedImage[(roitl[0]+spacing*1):(roibr[0]+spacing*1), roitl[1]:roibr[1]]
blackROI = rextractedImage[(roitl[0]+spacing*6):(roibr[0]+spacing*6), roitl[1]:roibr[1]]
imageList = [whiteROI, grayROI, blackROI]
show_images(imageList, cols=1)

correctedImage = rextractedImage.copy()

whiteROI[:, :, 0] = whiteAvgb
whiteROI[:, :, 1] = whiteAvgg
whiteROI[:, :, 2] = whiteAvgr

grayROI[:, :, 0] = grayAvgb
grayROI[:, :, 1] = grayAvgg
grayROI[:, :, 2] = grayAvgr

blackROI[:, :, 0] = blackAvgb
blackROI[:, :, 1] = blackAvgg
blackROI[:, :, 2] = blackAvgr

imageList = [whiteROI, grayROI, blackROI]
show_images(imageList, cols=1)

# SPYDER COLOR CHECKR Values: http://www.bartneck.de/2017/10/24/patch-color-definitions-for-datacolor-spydercheckr-48/

blank = np.zeros_like(csobelImg)
maskedImg = blank.copy()
maskedImg = cv2.fillConvexPoly(maskedImg, roiWhite, 255)
maskedImg = cv2.fillConvexPoly(maskedImg, roiGray, 255)
maskedImg = cv2.fillConvexPoly(maskedImg, roiBlack, 255)

res = cv2.bitwise_and(rextractedImage, rextractedImage, mask=maskedImg)
# maskedImg = cv2.fillConvexPoly(maskedImg, roi2Black, 255)

cv2.drawContours(blank, contours, -1, 255, 3)

outputSquare = np.zeros_like(csobelImg)
cv2.drawContours(outputSquare, [screenCnt], -1, 255, 3)

imageList = [rextractedImage, grextractedImage, bfsobelImg, sobelImg, csobelImg, blank, outputSquare, maskedImg, res]
show_images(imageList, cols=3)

sys.exit()

最佳答案

给定白块的 RGB 值,可以通过除以该值来校正图像的白平衡。也就是说,应用线性变换使白块在三个 channel 中具有相同的电平:

lum = (whiteR + whiteG + whiteB)/3
imgR = imgR * lum / whiteR
imgG = imgG * lum / whiteG
imgB = imgB * lum / whiteB

乘以 lum使平均强度不会改变。

(如果使用适当的权重, lum 的计算会更好:0.2126、0.7152、0.0722,但我想保持简单。如果输入的白色偏离标记,这只会产生很大的不同,在这种情况下你也会有其他问题。)

备注 这种变换最适用于线性 RGB 空间。如果图像以 sRGB 或类似格式存储(来自相机的原始图像将是线性 RGB,JPEG 将是 sRGB),则图像和白色的 RGB 值都应首先转换为线性 RGB。见 here为相关方程。

为了获得更好的精度,您还可以使用灰度补丁的 RGB 值来应用上述内容。对于每个 channel ,取从白色和灰色块中得出的平均倍增因子 ( whiteR/lum),并将其应用于图像。

在确定白色 RGB 值和校正白平衡之前,可以从图像中减去黑色电平。这将改善对比度和色彩感知,但不是白平衡的一部分。

全色校正要复杂得多,我不会深入讨论。

关于python-3.x - 白平衡来自已知点的照片,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/54470148/

28 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com