- c - 在位数组中找到第一个零
- linux - Unix 显示有关匹配两种模式之一的文件的信息
- 正则表达式替换多个文件
- linux - 隐藏来自 xtrace 的命令
我正在尝试使用均匀圆形 LBP(1 个单位半径邻域中的 8 个点)实现基本的人脸识别系统。我正在拍摄一张图片,将其大小调整为 200 x 200 像素,然后将图片拆分为 8x8 小图片。然后我计算每个小图像的直方图并获得直方图列表。为了比较 2 张图像,我计算相应直方图之间的卡方距离并生成分数。
这是我的统一 LBP 实现:
import numpy as np
import math
uniform = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 58, 6: 5, 7: 6, 8: 7, 9: 58, 10: 58, 11: 58, 12: 8, 13: 58, 14: 9, 15: 10, 16: 11, 17: 58, 18: 58, 19: 58, 20: 58, 21: 58, 22: 58, 23: 58, 24: 12, 25: 58, 26: 58, 27: 58, 28: 13, 29: 58, 30: 14, 31: 15, 32: 16, 33: 58, 34: 58, 35: 58, 36: 58, 37: 58, 38: 58, 39: 58, 40: 58, 41: 58, 42: 58, 43: 58, 44: 58, 45: 58, 46: 58, 47: 58, 48: 17, 49: 58, 50: 58, 51: 58, 52: 58, 53: 58, 54: 58, 55: 58, 56: 18, 57: 58, 58: 58, 59: 58, 60: 19, 61: 58, 62: 20, 63: 21, 64: 22, 65: 58, 66: 58, 67: 58, 68: 58, 69: 58, 70: 58, 71: 58, 72: 58, 73: 58, 74: 58, 75: 58, 76: 58, 77: 58, 78: 58, 79: 58, 80: 58, 81: 58, 82: 58, 83: 58, 84: 58, 85: 58, 86: 58, 87: 58, 88: 58, 89: 58, 90: 58, 91: 58, 92: 58, 93: 58, 94: 58, 95: 58, 96: 23, 97: 58, 98: 58, 99: 58, 100: 58, 101: 58, 102: 58, 103: 58, 104: 58, 105: 58, 106: 58, 107: 58, 108: 58, 109: 58, 110: 58, 111: 58, 112: 24, 113: 58, 114: 58, 115: 58, 116: 58, 117: 58, 118: 58, 119: 58, 120: 25, 121: 58, 122: 58, 123: 58, 124: 26, 125: 58, 126: 27, 127: 28, 128: 29, 129: 30, 130: 58, 131: 31, 132: 58, 133: 58, 134: 58, 135: 32, 136: 58, 137: 58, 138: 58, 139: 58, 140: 58, 141: 58, 142: 58, 143: 33, 144: 58, 145: 58, 146: 58, 147: 58, 148: 58, 149: 58, 150: 58, 151: 58, 152: 58, 153: 58, 154: 58, 155: 58, 156: 58, 157: 58, 158: 58, 159: 34, 160: 58, 161: 58, 162: 58, 163: 58, 164: 58, 165: 58, 166: 58, 167: 58, 168: 58, 169: 58, 170: 58, 171: 58, 172: 58, 173: 58, 174: 58, 175: 58, 176: 58, 177: 58, 178: 58, 179: 58, 180: 58, 181: 58, 182: 58, 183: 58, 184: 58, 185: 58, 186: 58, 187: 58, 188: 58, 189: 58, 190: 58, 191: 35, 192: 36, 193: 37, 194: 58, 195: 38, 196: 58, 197: 58, 198: 58, 199: 39, 200: 58, 201: 58, 202: 58, 203: 58, 204: 58, 205: 58, 206: 58, 207: 40, 208: 58, 209: 58, 210: 58, 211: 58, 212: 58, 213: 58, 214: 58, 215: 58, 216: 58, 217: 58, 218: 58, 219: 58, 220: 58, 221: 58, 222: 58, 223: 41, 224: 42, 225: 43, 226: 58, 227: 44, 228: 58, 229: 58, 230: 58, 231: 45, 232: 58, 233: 58, 234: 58, 235: 58, 236: 58, 237: 58, 238: 58, 239: 46, 240: 47, 241: 48, 242: 58, 243: 49, 244: 58, 245: 58, 246: 58, 247: 50, 248: 51, 249: 52, 250: 58, 251: 53, 252: 54, 253: 55, 254: 56, 255: 57}
def bilinear_interpolation(i, j, y, x, img):
fy, fx = int(y), int(x)
cy, cx = math.ceil(y), math.ceil(x)
# calculate the fractional parts
ty = y - fy
tx = x - fx
w1 = (1 - tx) * (1 - ty)
w2 = tx * (1 - ty)
w3 = (1 - tx) * ty
w4 = tx * ty
return w1 * img[i + fy, j + fx] + w2 * img[i + fy, j + cx] + \
w3 * img[i + cy, j + fx] + w4 * img[i + cy, j + cx]
def thresholded(center, pixels):
out = []
for a in pixels:
if a > center:
out.append(1)
else:
out.append(0)
return out
def uniform_circular(img, P, R):
ysize, xsize = img.shape
transformed_img = np.zeros((ysize - 2 * R,xsize - 2 * R), dtype=np.uint8)
for y in range(R, len(img) - R):
for x in range(R, len(img[0]) - R):
center = img[y,x]
pixels = []
for point in range(0, P):
r = R * math.cos(2 * math.pi * point / P)
c = R * math.sin(2 * math.pi * point / P)
pixels.append(bilinear_interpolation(y, x, r, c, img))
values = thresholded(center, pixels)
res = 0
for a in range(0, P):
res += values[a] << a
transformed_img.itemset((y - R,x - R), uniform[res])
transformed_img = transformed_img[R:-R,R:-R]
return transformed_img
我在 AT&T database 上做了一个实验每个主题拍摄 2 张图库图像和 8 张探测图像。实验的 ROC 结果是:
在上述 ROC 中,x 轴表示错误接受率,y 轴表示真实接受率。根据统一 LBP 标准,准确性似乎很差。我确定我的实现有问题。如果有人可以帮助我,那就太好了。感谢阅读。
编辑:
我想我在上面的代码中犯了一个错误。我顺时针方向走,而关于 LBP 的论文建议我在分配权重时应该逆时针方向走。行:c = R * math.sin(2 * math.pi * point/P)
应该是 c = -R * math.sin(2 * math.pi * point/P)
。编辑后的结果更糟。这表明我的代码有问题。我想我选择插值坐标的方式搞砸了。
编辑:接下来我尝试复制@bytefish 的代码here并使用uniform hashmap实现Uniform Circular LBP。
def uniform_circular(img, P, R):
ysize, xsize = img.shape
transformed_img = np.zeros((ysize - 2 * R,xsize - 2 * R), dtype=np.uint8)
for point in range(0, P):
x = R * math.cos(2 * math.pi * point / P)
y = -R * math.sin(2 * math.pi * point / P)
fy, fx = int(y), int(x)
cy, cx = math.ceil(y), math.ceil(x)
# calculate the fractional parts
ty = y - fy
tx = x - fx
w1 = (1 - tx) * (1 - ty)
w2 = tx * (1 - ty)
w3 = (1 - tx) * ty
w4 = tx * ty
for i in range(R, ysize - R):
for j in range(R, xsize - R):
t = w1 * img[i + fy, j + fx] + w2 * img[i + fy, j + cx] + \
w3 * img[i + cy, j + fx] + w4 * img[i + cy, j + cx]
center = img[i,j]
pixels = []
res = 0
transformed_img[i - R,j - R] += (t > center) << point
for i in range(R, ysize - R):
for j in range(R, xsize - R):
transformed_img[i - R,j - R] = uniform[transformed_img[i - R,j - R]]
这是相同的 ROC:
我尝试用 C++ 实现相同的代码。这是代码:
#include <stdio.h>
#include <stdlib.h>
#include <opencv2/opencv.hpp>
using namespace cv;
int* uniform_circular_LBP_histogram(Mat& src) {
int i, j;
int radius = 1;
int neighbours = 8;
Size size = src.size();
int *hist_array = (int *)calloc(59,sizeof(int));
int uniform[] = {0,1,2,3,4,58,5,6,7,58,58,58,8,58,9,10,11,58,58,58,58,58,58,58,12,58,58,58,13,58,14,15,16,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,17,58,58,58,58,58,58,58,18,58,58,58,19,58,20,21,22,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,23,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,24,58,58,58,58,58,58,58,25,58,58,58,26,58,27,28,29,30,58,31,58,58,58,32,58,58,58,58,58,58,58,33,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,34,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,35,36,37,58,38,58,58,58,39,58,58,58,58,58,58,58,40,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,41,42,43,58,44,58,58,58,45,58,58,58,58,58,58,58,46,47,48,58,49,58,58,58,50,51,52,58,53,54,55,56,57};
Mat dst = Mat::zeros(size.height - 2 * radius, size.width - 2 * radius, CV_8UC1);
for (int n = 0; n < neighbours; n++) {
float x = static_cast<float>(radius) * cos(2.0 * M_PI * n / static_cast<float>(neighbours));
float y = static_cast<float>(radius) * -sin(2.0 * M_PI * n / static_cast<float>(neighbours));
int fx = static_cast<int>(floor(x));
int fy = static_cast<int>(floor(y));
int cx = static_cast<int>(ceil(x));
int cy = static_cast<int>(ceil(x));
float ty = y - fy;
float tx = y - fx;
float w1 = (1 - tx) * (1 - ty);
float w2 = tx * (1 - ty);
float w3 = (1 - tx) * ty;
float w4 = 1 - w1 - w2 - w3;
for (i = 0; i < 59; i++) {
hist_array[i] = 0;
}
for (i = radius; i < size.height - radius; i++) {
for (j = radius; j < size.width - radius; j++) {
float t = w1 * src.at<uchar>(i + fy, j + fx) + \
w2 * src.at<uchar>(i + fy, j + cx) + \
w3 * src.at<uchar>(i + cy, j + fx) + \
w4 * src.at<uchar>(i + cy, j + cx);
dst.at<uchar>(i - radius, j - radius) += ((t > src.at<uchar>(i,j)) && \
(abs(t - src.at<uchar>(i,j)) > std::numeric_limits<float>::epsilon())) << n;
}
}
}
for (i = radius; i < size.height - radius; i++) {
for (j = radius; j < size.width - radius; j++) {
int val = uniform[dst.at<uchar>(i - radius, j - radius)];
dst.at<uchar>(i - radius, j - radius) = val;
hist_array[val] += 1;
}
}
return hist_array;
}
int main( int argc, char** argv )
{
Mat src;
int i,j;
src = imread( argv[1], 0 );
if( argc != 2 || !src.data )
{
printf( "No image data \n" );
return -1;
}
const int width = 200;
const int height = 200;
Size size = src.size();
Size new_size = Size();
new_size.height = 200;
new_size.width = 200;
Mat resized_src;
resize(src, resized_src, new_size, 0, 0, INTER_CUBIC);
int count = 1;
for (i = 0; i <= width - 8; i += 25) {
for (j = 0; j <= height - 8; j += 25) {
Mat new_mat = resized_src.rowRange(i, i + 25).colRange(j, j + 25);
int *hist = uniform_circular_LBP_histogram(new_mat);
int z;
for (z = 0; z < 58; z++) {
std::cout << hist[z] << ",";
}
std::cout << hist[z] << "\n";
count += 1;
}
}
return 0;
}
相同的 ROC:
我还做了一个基于排名的实验。并得到这条CMC曲线。
关于 CMC 曲线的一些细节:X 轴代表等级。 (1-10) 和 Y 轴表示精度 (0-1)。所以,我得到了 80% 以上的 Rank1 准确率。
最佳答案
我不知道 python,但很可能您的代码已损坏。
我的建议是,点击这两个链接,并尝试将其中一个 C++ 代码移植到 python。第一个链接还包含一些关于 LBP 的信息。
http://www.bytefish.de/blog/local_binary_patterns/
https://github.com/berak/uniform-lbp
还有一件事我可以说,你说你正在将图像调整为 200x200。你为什么要那样做?据我所知,AT&T 图片比那个小,你只是让图片变大,但我认为这不会帮助你,而且它可能会对性能产生负面影响。
关于c++ - Uniform Circular LBP人脸识别实现,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/20581267/
我正在寻找一种用C# 编写的人脸、情感和语音识别方法。对于人脸识别,我早期使用的是 Emgu CV,它不准确,而且在弱光条件下性能非常低。我还需要找到用户的情绪。不管是悲伤还是快乐。但我发现使用 Em
我正在尝试使用 Apple 的 ARKit 3.0(Reality Kit)设置面部 anchor 但失败了。1. 以前只支持前置摄像头。现在还是这样吗?2.如何让Reality Kit的ARView
当我调试人脸 API 时抛出以下错误。 UnknownHostException@830035410936}“java.net.UnknownHostException:无法解析主机“api.proj
用例如下 我们的系统中有面孔列表 用户将上传一张图片 我们希望显示与上传图像匹配的面孔列表,例如置信度 >0.8 现在查看how to ,我的理解如下 使用人脸检测API,我们需要首先上传所有图像,包
用例如下 我们的系统中有面孔列表 用户将上传一张图片 我们希望显示与上传图像匹配的面孔列表,例如置信度 >0.8 现在查看how to ,我的理解如下 使用人脸检测API,我们需要首先上传所有图像,包
我正在寻找一种完美的方法来平滑二进制图像的边缘。问题是二值图像似乎是一个阶梯状的边界,这对我进一步的掩蔽过程来说非常不愉快。 我附加了一个原始二进制图像,该图像将被转换为平滑边缘,并且我还提供了预期的
我需要一个 java 库来确定哪个图像是“人体”;这是一张“人脸”;这是一个“动物”;这是一个“风景”等等。 有这样的东西吗? 谢谢 最佳答案 我不认为那里有什么方便的东西。特别是因为你在这里有非常广
自从过去 2 天以来,我一直在努力弄清楚出了什么问题?我正在使用 Microsoft 认知服务开发用于人脸识别的 Cordova android 应用程序。为了拍摄图像,我使用了 Cordova Ca
我是一名优秀的程序员,十分优秀!