gpt4 book ai didi

python - 如何标记和测量 Blob 的大小?

转载 作者:行者123 更新时间:2023-12-03 20:21:32 25 4
gpt4 key购买 nike

我正在用 Python 学习图像分析,我只是一个初学者。我能够编写一个代码(我在下面分享它)来检测这个纳米粒子图像中的 Blob (纳米粒子):

Nanoparticle image

我可以使用 cv2.connectedComponents 检测到有 10 个纳米粒子,但现在我需要:

  • 用数字标记每个纳米粒子以生成最终图像。
  • 计算组成每个纳米粒子的像素数,以便我可以确定它们的大小。

  • 我试图研究,但找不到任何适合我的东西。有愿意帮助我的人吗?如果你能提出一个代码就太好了,如果你也能解释一下,那就太好了!
    import numpy as np
    import cv2
    from matplotlib import pyplot as plt
    img = cv2.imread('Izzie - - 0002.tif')

    #show figure using matplotlib
    plt.figure(1)
    plt.subplot(2, 2, 1) # Figure 1 has subplots 2 raws, 2 columns, and this is plot 1
    plt.gca().set_title('Original')
    plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) # , cmap='gray'

    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    plt.figure(1)
    plt.subplot(2, 2, 2) # Figure 1 has subplots 2 raw, 2 columns, and this is plot 2
    plt.gca().set_title('Gray')
    plt.imshow(cv2.cvtColor(gray, cv2.COLOR_BGR2RGB)) # , cmap='gray'


    # In global thresholding (normal methods), we used an arbitrary chosen value as a threshold
    # In contrast, Otsu's method
    # avoids having to choose a value and determines it automatically.
    #The method returns two outputs. The first is the threshold that was used and the secon
    # output is the thresholded image.

    ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)

    print('Ret = ', ret) # Applies an arbitrary threshold of 128

    plt.figure(1)
    plt.subplot(2, 2, 3)
    plt.gca().set_title('Threshold')
    plt.imshow(cv2.cvtColor(thresh, cv2.COLOR_BGR2RGB))


    #-------------------------------------------------------------------------------------------
    # MORPHOLOGICAL TRANSFORMATION
    # noise removal using morphological trasnformations
    # For more info see: https://opencv-python
    tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.html

    # Set up the kernel - structuring element
    kernel = np.ones((3,3), np.uint8) # 3x3 array of 1s of datatype 8-bytes

    # Remove noise using Opening (erosion followed by dilation)
    opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 4)
    plt.figure(2)
    plt.subplot(2, 2, 1)
    plt.gca().set_title('Noise rem')
    plt.imshow(cv2.cvtColor(opening, cv2.COLOR_BGR2RGB))


    # sure background area
    # dilation operation
    sure_bg = cv2.dilate(opening,kernel,iterations=3)

    plt.figure(2)
    plt.subplot(2, 2, 2)
    plt.gca().set_title('Dilated img')
    plt.imshow(cv2.cvtColor(sure_bg, cv2.COLOR_BGR2RGB))



    # Apply a distance transformation to transform the image into a gradient of B&W pixels and detect possible connected objects
    dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)

    plt.figure(2)
    plt.subplot(2, 2, 3)
    plt.gca().set_title('Dist_transform')
    plt.imshow(cv2.cvtColor(dist_transform, cv2.COLOR_BGR2RGB))



    # Apply a threshold to go back to binary B&W image
    ret, sure_fg = cv2.threshold(dist_transform, 0.7 * dist_transform.max(),255,0)
    print('Ret treshold: ', ret)

    plt.figure(2)
    plt.subplot(2, 2, 4)
    plt.gca().set_title('Threshold')
    plt.imshow(cv2.cvtColor(sure_fg, cv2.COLOR_BGR2RGB))


    # Finding unknown region
    sure_fg = np.uint8(sure_fg) # creates an 8-bit unsigned matrix

    plt.figure(3)
    plt.subplot(1, 2, 1)
    plt.gca().set_title('Sure_fg')
    plt.imshow(cv2.cvtColor(sure_fg, cv2.COLOR_BGR2RGB))


    unknown = cv2.subtract(sure_bg,sure_fg)

    plt.figure(3)
    plt.subplot(1, 2, 2)
    plt.gca().set_title('Unknown')
    plt.imshow(cv2.cvtColor(unknown, cv2.COLOR_BGR2RGB))


    #----------------------------------------------------------------------------------------------------------------------#

    # Marker labelling
    # Connected components counts all black objects in the image. For explaination see: https://www.youtube.com/watch?v=hMIrQdX4BkE
    # It gives 2 objects in return, the number of objects and a picture with labelled objects.

    n_objects, markers = cv2.connectedComponents(sure_fg)

    plt.figure(4)
    plt.subplot(2, 1, 1)
    plt.gca().set_title('markers')
    plt.imshow(markers)


    # Add one to all labels so that sure background is not 0, but 1
    markers = markers+1

    # Now, mark the region of unknown with zero
    markers[unknown==255] = 0


    markers = cv2.watershed(img, markers)
    img[markers == 8] = [255, 0, 0] # Overlay red circles (-1 val) to img. 2, 3, 4 are all the different objects detected in the image

    plt.figure(4)
    plt.subplot(2, 1, 2)
    plt.gca().set_title('markers')
    plt.imshow(img)



    print('Number of particles detected: ', n_objects-2)


    plt.show()

    最佳答案

    如果您的粒子(几乎)是黑色的,请不要使用 Otsu 阈值,而是使用固定值来掩盖(几乎)黑色像素。在逆二值化图像上,您可以应用形态学闭运算(获得整个粒子)和开运算(去除背景噪声),参见 cv2.morphologyEx .之后,您会找到所有轮廓以获取粒子和比例,请参阅 cv2.findContours .我们确定所有轮廓的边界矩形以在输入图像中的粒子上放置一些标签,并通过将粒子边界框的宽度/高度除以粒子的宽度来计算粒子的水平和垂直直径scale 的边界框。

    在我的代码中,我省略了一些东西,包括 Matplotlib 输出。 (在写作时,我刚刚注意到,您还有很多代码;我没有看到滚动条……我没有看到,也没有包含该代码。)

    import cv2
    from matplotlib import pyplot as plt
    from skimage import io # Only needed for web grabbing images, use cv2.imread for local images

    # Read image from web; Attention: it's already RGB
    img = io.imread('/image/J46nA.jpg')

    # Convert to grayscale; Attention: Source is RGB from web grabbing
    gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)

    # Use fixed threshold to mask black areas
    _, thresh = cv2.threshold(gray, 30, 255, cv2.THRESH_BINARY_INV)

    # Morphological closing to get whole particles; opening to get rid of noise
    img_mop = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7)))
    img_mop = cv2.morphologyEx(img_mop, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15)))

    # Find contours
    cnts, _ = cv2.findContours(img_mop, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

    # Get bounding rectangles for the scale and the particles
    thr_size = 2000
    scale = [cv2.boundingRect(cnt) for cnt in cnts if cv2.contourArea(cnt) > thr_size]
    particles = [cv2.boundingRect(cnt) for cnt in cnts if cv2.contourArea(cnt) < thr_size]

    # Iterate all particles, add label and diameters to input image
    for i, p in enumerate(particles):
    x = p[0]
    y = max(0, p[1]-10)
    d_h = p[2] / scale[0][2] * 500
    d_v = p[3] / scale[0][2] * 500
    cv2.putText(img, str(i), (x, y), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
    print('Particle ' + str(i) + ' | Horizontal diameter: ' + '{:.2f}'.format(d_h) +
    ' nm, vertical diameter: ' + '{:.2f}'.format(d_v) + ' nm')

    cv2.imshow('img', cv2.resize(img, dsize=(0, 0), fx=0.5, fy=0.5))
    cv2.imshow('thresh', cv2.resize(thresh, dsize=(0, 0), fx=0.5, fy=0.5))
    cv2.imshow('img_mop', cv2.resize(img_mop, dsize=(0, 0), fx=0.5, fy=0.5))
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    thresh具有固定阈值的图像:

    Thresholded image
    img_mop图像,应用形态学操作后(注意:尺度仍然存在,所以我们可以用它来近似大小):

    Morph image

    最后,输入/输出图像 ìmg带有相应的标签(由于图像大小限制,此处必须使用 JPG):

    Final image

    最后但并非最不重要的是, print输出:

    Particle 0 | Horizontal diameter: 20.83 nm, vertical diameter: 23.03 nm
    Particle 1 | Horizontal diameter: 20.83 nm, vertical diameter: 20.83 nm
    Particle 2 | Horizontal diameter: 19.74 nm, vertical diameter: 17.54 nm
    Particle 3 | Horizontal diameter: 23.03 nm, vertical diameter: 23.03 nm
    Particle 4 | Horizontal diameter: 24.12 nm, vertical diameter: 24.12 nm
    Particle 5 | Horizontal diameter: 21.93 nm, vertical diameter: 20.83 nm
    Particle 6 | Horizontal diameter: 24.12 nm, vertical diameter: 23.03 nm
    Particle 7 | Horizontal diameter: 21.93 nm, vertical diameter: 23.03 nm
    Particle 8 | Horizontal diameter: 19.74 nm, vertical diameter: 21.93 nm
    Particle 9 | Horizontal diameter: 19.74 nm, vertical diameter: 19.74 nm

    希望有帮助!

    关于python - 如何标记和测量 Blob 的大小?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/59129982/

    25 4 0
    Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
    广告合作:1813099741@qq.com 6ren.com