Jelajahi Sumber

first commit

greglight 3 tahun lalu
induk
melakukan
65349758d1
5 mengubah file dengan 1259 tambahan dan 0 penghapusan
  1. 29 0
      Dockerfile_cpu
  2. 104 0
      Dockerfile_cuda
  3. 480 0
      lossy_image.py
  4. 607 0
      lossy_image_cuda.py
  5. 39 0
      vtk_test.py

+ 29 - 0
Dockerfile_cpu

@@ -0,0 +1,29 @@
+FROM python:3.7.3
+
+ENV PATH /usr/local/bin:$PATH
+
+ENV PYTHON_VERSION 3.7.3
+
+COPY lossy_image.py / 
+COPY vtk_test.py / 
+
+RUN pip3 install --upgrade pip && \
+    pip3 install matplotlib && \
+    pip3 install Pillow && \
+    pip3 install opencv-python && \
+    pip3 install glob2 && \
+    pip3 install matplotlib && \
+    pip3 install scikit-image && \
+    pip3 install numpy && \
+    pip3 install cmake --upgrade && \
+    pip3 install numba && \
+    python3 -m pip install --upgrade pip && \
+    python3 -m pip install vtk && \
+    apt update -y && \
+    apt install libgl1-mesa-glx -y
+
+CMD [ "python3", "./lossy_image.py" ]
+
+
+
+

+ 104 - 0
Dockerfile_cuda

@@ -0,0 +1,104 @@
+FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04
+
+
+RUN apt-get clean && apt-get update && apt-get install -y locales
+RUN locale-gen en_US.UTF-8
+ENV LANG en_US.UTF-8
+ENV LANGUAGE en_US:en
+ENV LC_ALL en_US.UTF-8
+
+
+# Install all dependencies for OpenCV
+RUN apt-get -y update && \
+    apt-get -y install \
+        python3 \
+        python3-dev \
+        git \
+        wget \
+        unzip \
+        cmake \
+        build-essential \
+        pkg-config \
+        libatlas-base-dev \
+        gfortran \
+        libgtk-3-dev \
+        libavcodec-dev \
+        libavformat-dev \
+        libswscale-dev \
+        libjpeg-dev \
+        libpng-dev \
+        libtiff-dev \
+        libv4l-dev \
+    && \
+
+# install python dependencies
+    wget https://bootstrap.pypa.io/get-pip.py && \
+    python3 get-pip.py && \
+    rm get-pip.py && \
+    pip3 install numpy && \
+    pip3 install matplotlib && \
+    pip3 install Pillow && \
+    pip3 install glob2 && \
+    pip3 install scikit-image && \
+    pip3 install numba && \
+    pip3 install vtk && \
+    pip3 install pyCUDA && \
+
+# Install OpenCV
+    git clone https://github.com/opencv/opencv.git && \
+    cd opencv  && \
+    git checkout 4.2.0 && \
+    cd .. && \
+    git clone https://github.com/opencv/opencv_contrib.git && \
+    cd opencv_contrib && \
+    git checkout 4.2.0 && \
+    cd .. && \
+    cd opencv  && \
+
+# Prepare build
+    mkdir build && cd build && \
+    cmake -D CMAKE_BUILD_TYPE=RELEASE \
+      -D BUILD_PYTHON_SUPPORT=ON \
+      -D CMAKE_INSTALL_PREFIX=/usr/local \
+      -D OPENCV_EXTRA_MODULES_PATH=/opencv_contrib/modules \
+      -D BUILD_EXAMPLES=OFF \
+      -D PYTHON_DEFAULT_EXECUTABLE=/usr/bin/python3 \
+      -D BUILD_opencv_python3=ON \
+      -D BUILD_opencv_python2=OFF \
+      -D WITH_IPP=OFF \
+      -D WITH_FFMPEG=ON \
+      -D WITH_CUDA=ON \
+      -DCUDA_ARCH=35 \
+      -DCUDA_ARCH_BIN=6.1,7.0,7.5 \
+      -D CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda-10.1 \
+      -D WITH_CUBLAS=ON \
+      -D WITH_V4L=ON .. \
+    && \
+
+# Install
+    make -j$(nproc) && \
+    make install && \
+    ldconfig \
+    && \
+
+# Clean
+   apt-get -y remove \
+        python3-dev \
+        libatlas-base-dev \
+        gfortran \
+        libgtk-3-dev \
+        libavcodec-dev \
+        libavformat-dev \
+        libswscale-dev \
+        libjpeg-dev \
+        libpng-dev \
+        libtiff-dev \
+        libv4l-dev \
+    && \
+    apt-get clean && \
+    rm -rf /opencv /opencv_contrib /var/lib/apt/lists/*
+
+COPY lossy_image_cuda.py /
+COPY vtk_test.py /
+
+CMD [ "python3", "./lossy_image_cuda.py" ]

+ 480 - 0
lossy_image.py

@@ -0,0 +1,480 @@
+import os
+import cv2
+import matplotlib.pyplot as plt
+import numpy as np
+from PIL import Image
+import glob
+import sys
+from numba import jit, cuda
+from numba import njit, literal_unroll, prange
+from numba.typed import List
+import math
+from vtk_test import numpy_to_vti
+import shutil
+import time
+
+
+# This function is used to create two lists, one with all the background pixels in the studied area, the other for
+# the foreground pixels.
+
+@jit
+def fill(file, tophat, mask, h1, w1):
+    background = List()
+    foreground = List()
+    background.append(120)
+    foreground.append(120)
+
+    for i in range(h1):
+        for j in range(w1):
+            if tophat[i, j] == 0 and mask[i, j] == True:
+                background.append(file[i, j])
+            elif tophat[i, j] != 0 and mask[i, j] == True:
+                foreground.append(file[i, j])
+
+    return background[1:], foreground[1:]
+
+
+#  This function checks if there is a container in the data set
+
+def testing(img, w, h, test):
+    center = [int(w / 2), int(h / 2)]
+
+    if test == 0:
+        circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 1000,
+                                   param1=20, param2=30,
+                                   minRadius=int(0.95 * (min(center[0], center[1], w - center[0], h - center[1]))),
+                                   maxRadius=int((min(center[0], center[1], w - center[0], h - center[1]))))
+
+        if np.all(circles == None):
+            print('No circle detected')
+            return 0
+
+    print('circle detected')
+
+    return 1
+
+
+# This function is used to detect containers in the pictures
+
+def hough(img, w, h, flag, rayon, ref, ind, param2, param1, seuil):
+    center = [int(w / 2), int(h / 2)]
+
+    if flag == 0:
+
+        circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 1000,
+                                   param1=20, param2=39,
+                                   minRadius=int(0.75 * (min(center[0], center[1], w - center[0], h - center[1]))),
+                                   maxRadius=int((min(center[0], center[1], w - center[0], h - center[1]))))
+
+        if np.all(circles == None):
+
+            print('ok1')
+
+            circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 1000,
+                                       param1=20, param2=30,
+                                       minRadius=int(
+                                           0.6 * (min(center[0], center[1], w - center[0], h - center[1]))),
+                                       maxRadius=int((min(center[0], center[1], w - center[0], h - center[1]))))
+
+            mini = np.min(circles[:, :, 2])
+
+            if np.all(circles == None):
+                print('ok2')
+                circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 1000,
+                                           param1=20, param2=30,
+                                           minRadius=int(
+                                               0.6 * (min(center[0], center[1], w - center[0], h - center[1]))),
+                                           maxRadius=int((min(center[0], center[1], w - center[0], h - center[1]))))
+
+                mini = np.min(circles[:, :, 2])
+
+        else:
+            mini = np.min(circles[:, :, 2])
+            print('gg', circles[:, :, 2])
+
+            mask = create_circular_mask2(h, w, radius=mini - 0.015 * mini)
+            masked_img = img.copy()
+            masked_img[~mask] = 0
+
+            try:
+                circles = cv2.HoughCircles(masked_img, cv2.HOUGH_GRADIENT, 1, 1000,
+                                           param1=20, param2=30,
+                                           minRadius=int(
+                                               0.6 * (min(center[0], center[1], w - center[0], h - center[1]))),
+                                           maxRadius=int((min(center[0], center[1], w - center[0], h - center[1]))))
+
+                mini = np.min(circles[:, :, 2])
+
+                print('2 circles detected')
+
+            except:
+
+                print('1 circle detected')
+
+        print(mini)
+
+        ref = mini - 0.05 * mini
+
+        return mini - 0.05 * mini, ref, ind
+
+
+    else:
+        circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 1000,
+                                   param1=param1, param2=param2,
+                                   minRadius=int(rayon - 10),
+                                   maxRadius=int(rayon))
+
+        # 22
+
+        if np.all(circles == None):
+            print(ind)
+            if ind == 0:
+                return ref, ref, ind, param2, param1
+
+            else:
+                return rayon, rayon, ind, param2, param1
+
+        mini = np.min(circles[:, :, 2])
+
+        print('mini', mini)
+
+        if 0.98 * ref < mini < ref:
+
+            return ref, mini, ind, param2, param1
+
+        else:
+            ind = 1
+            if seuil == 1.8:
+                param2 = 9
+
+                param1 = 85
+
+            else:
+
+                param2 = 4
+                param1 = 130
+
+            return mini, mini, ind, param2, param1
+
+
+# This function crops the picture in a circular way, giving it the appropriate center and radius of the mask.
+
+def create_circular_mask(h, w, center=None, radius=None):
+    if center is None:  # use the middle of the image
+        center = [int(w / 2), int(h / 2)]
+
+    if radius is None:  # use the smallest distance between the center and image walls
+        radius = min(center[0], center[1], w - center[0], h - center[1])
+
+    else:
+        radius = radius * min(center[0], center[1], w - center[0], h - center[1])
+
+    Y, X = np.ogrid[:h, :w]
+    dist_from_center = np.sqrt((X - center[0]) ** 2 + (Y - center[1]) ** 2)
+
+    if radius == 0:
+        mask = np.ones((h, w))
+
+    else:
+        if center != [int(w / 2), int(h / 2)]:
+            radius = radius + (1 / 3) * radius
+
+        mask = dist_from_center <= radius
+
+    return mask
+
+
+def create_circular_mask2(h, w, center=None, radius=None):
+    if center is None:  # use the middle of the image
+        center = [int(w / 2), int(h / 2)]
+    if radius is None:  # use the smallest distance between the center and image walls
+        radius = min(center[0], center[1], w - center[0], h - center[1])
+
+    Y, X = np.ogrid[:h, :w]
+    dist_from_center = np.sqrt((X - center[0]) ** 2 + (Y - center[1]) ** 2)
+
+    mask = dist_from_center <= radius
+
+    return mask
+
+
+# This function keeps the main information in the pictures.
+
+def cleaning4(image, seuil):
+    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
+    tophat = cv2.morphologyEx(image, cv2.MORPH_TOPHAT, kernel)
+    im = tophat
+
+    return tophat
+
+
+# This function ensure that all the objects touching the border of the mask are erased if they are small enough
+# (no loss of important data)
+
+@jit
+def cleaning5(edge, h1, w1, labels4, rayon_init):
+    center_x = h1 / 2
+    center_y = w1 / 2
+    del_list = List()
+    fin_list = List()
+
+    for i in prange(h1):
+        for j in prange(w1):
+            if edge[i, j] == 255:
+                if labels4[i, j] not in del_list:
+                    del_list.append(labels4[i, j])
+
+    print(del_list)
+
+    for k in del_list:
+
+        flag = 0
+
+        if k not in fin_list:
+            coords4 = np.argwhere(labels4 == k)
+
+            for j in prange(len(coords4[:, 0]) - 1):
+
+                if math.sqrt(
+                        (coords4[j, 0] - center_x) ** 2 + (coords4[j, 1] - center_y) ** 2) < 0.97 * rayon_init:
+                    flag = 1
+
+                    break
+
+            if flag == 0:
+                fin_list.append(k)
+
+    print(fin_list)
+
+    return fin_list
+
+
+@jit
+def cleaning6(h1, w1, labels4, file):
+    for i in prange(h1):
+        for j in prange(w1):
+            if labels4[i, j] == 1000:
+                file[i, j] = 0
+
+    return file
+
+
+# This function is searching for the appropriate h value for denoising step. Also it is making groups of data sets
+#  which have near standard deviation value.
+
+def find_param(center, files):
+    index = int(len(files) / 3)
+    files = files[:]
+
+    cpt = 0
+
+    flag3 = 0
+
+    for myFile in files:
+        file = cv2.imread(myFile, 0)
+
+        h1, w1 = file.shape[0], file.shape[1]
+
+        mask = create_circular_mask(h1, w1, center=center, radius=1 / 3)
+        masked_img = file.copy()
+        masked_img[~mask] = 0
+
+        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
+        tophat = cv2.morphologyEx(masked_img, cv2.MORPH_TOPHAT, kernel)
+        blackhat = cv2.morphologyEx(masked_img, cv2.MORPH_BLACKHAT, kernel)
+        im = blackhat + tophat
+
+        counts = np.bincount(file.ravel()).argmax()
+
+        val = np.count_nonzero(file == counts)
+
+        ratio = val / len(file.ravel())
+
+        if flag3 == 0:
+            print(np.std(file))
+
+            if np.std(file) > 5:
+                seuil = 3
+
+            elif np.std(file) > 3:
+                seuil = 5
+
+            elif np.std(file) > 1.7:
+                seuil = 1.8
+
+        flag3 = 1
+
+        ret, tophat = cv2.threshold(im, seuil, 255, cv2.THRESH_BINARY)
+
+        background, foreground = fill(file, tophat, mask, h1, w1)
+
+        print('% background', float(len(background) / (len(background) + len(foreground))))
+        print('% foreground', float(len(foreground) / (len(background) + len(foreground))))
+
+        if float(len(background) / (len(background) + len(foreground))) > 0.1 and float(
+                len(foreground) / (len(background) + len(foreground))) > 0.2:
+            sigma = np.std(background)
+
+            h = 1.2 * sigma
+
+            return h, seuil
+
+        cpt += 1
+
+    return 0, 0
+
+
+Datas = ['Eucrib_03', 'Archie_04', 'Pseudoskorpion', 'C7', 'Probe_7', 'Gama_01', 'Z275', 'Screwjoint']
+
+for data in Datas:
+
+    files = glob.glob('/data/' + str(data) + '/slices_8bit/*.tif')
+    files.sort(key=lambda x: int(''.join(filter(str.isdigit, x))))
+
+    X_data = []
+
+    image = Image.open(files[0]).convert('L')
+    image = np.array(image)
+    h1, w1 = image.shape[0], image.shape[1]
+
+    # Looking for different part of the pictures to find the best h value.
+
+    for n in [[int(w1 / 2), int(h1 / 2)], [int(w1 / 2), int(h1 / 3)], [int(w1 / 3), int(h1 / 2)],
+              [int(2 * w1 / 3), int(h1 / 2)],
+              [int(w1 / 2), int(2 * h1 / 3)]]:
+        h, seuil = find_param(n, files)
+
+        print(seuil)
+        if h != 0:
+            print('h', h)
+            break
+
+    cpt2 = 0
+    radius = 0
+    flag = 0
+    rayon = 0
+    test = 0
+    cpt8 = 1
+    x = np.linspace(1, X_data.shape[0], 1)
+
+    ind = 0
+
+    # defining hough parameters.
+
+    if seuil == 1.8:
+
+        param2 = 11
+
+        param1 = 70
+
+    else:
+
+        param2 = 37
+        param1 = 270
+
+    fact = h1 / 256
+    value = round(len(files) / fact)
+    compteur = 1
+
+    for myFile in files:
+
+        file = cv2.imread(myFile, 0)
+
+        h1, w1 = file.shape[0], file.shape[1]
+        file = cv2.fastNlMeansDenoising(file, h=h)
+
+        cpt2 += 1
+
+        if test == 0:
+            rayon_init = testing(file, w1, h1, test)
+
+        test = 1
+
+        file = cleaning4(file, seuil)
+
+        kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
+
+        file2 = cv2.dilate(file, kernel2, iterations=4)
+        file2 = cv2.morphologyEx(file2, cv2.MORPH_CLOSE, kernel2, iterations=4)
+
+        if rayon_init != 0:
+
+            if flag == 0:
+                rayon_init, ref, ind = hough(file2, w1, h1, flag, 0, 0, ind, param2, param1, seuil)
+                last = rayon_init
+
+                flag = 1
+
+            rayon_init, last, ind, param2, param1 = hough(file2, w1, h1, flag, last, ref, ind, param2,
+                                                          param1, seuil)
+
+            print('ref', ref)
+
+            print('rad', rayon_init)
+
+            file3 = cv2.dilate(file, kernel2, iterations=2)
+            file3 = cv2.morphologyEx(file3, cv2.MORPH_CLOSE, kernel2, iterations=2)
+            ret, file3 = cv2.threshold(file3, seuil, 255, cv2.THRESH_BINARY)
+
+            ret, labels4 = cv2.connectedComponents(file3)
+
+            mask = create_circular_mask2(h1, w1, radius=rayon_init)
+
+            del_list = List()
+            edge = cv2.Canny(mask.astype(np.uint8), 0, 1)
+
+            start = time.time()
+
+            fin_list = cleaning5(edge, h1, w1, labels4, rayon_init)
+
+            end = time.time()
+            print((str(round(end - start, 4))))
+
+            print('fin', fin_list)
+
+            cpt8 += 1
+
+            for i in fin_list:
+                labels4[labels4 == i] = 1000
+
+            file = cleaning6(h1, w1, labels4, file)
+
+            masked_img = file.copy()
+            masked_img[~mask] = 0
+
+            full = file.copy()
+
+            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
+
+            # Reconstruction of possible important pixels erased due to the proximity with container.
+
+            for i in range(100):
+                masked_img = cv2.dilate(masked_img, kernel, iterations=1)
+                masked_img = np.minimum(masked_img, full)
+
+            file = masked_img
+
+        file = Image.fromarray(file)
+
+        # Pictures are resized to avoid having a file too big to be displayed on the web page.
+
+        new_width = 256
+        new_height = 256
+        im = file.resize((new_width, new_height), Image.ANTIALIAS)
+
+        if compteur == value:
+
+            X_data.append(file)
+
+            compteur = 1
+
+        else:
+
+            compteur += 1
+
+    X_data = np.array(X_data)
+
+    # Creation of VTI file
+
+    numpy_to_vti(X_data, (0, 0, 0), (float(X_data.shape[1] / X_data.shape[0]), 1, 1),
+                 '/data/' + str(data) + '_cpu.vti')

+ 607 - 0
lossy_image_cuda.py

@@ -0,0 +1,607 @@
+# coding=utf-8
+
+import pycuda.driver as drv
+import pycuda.tools
+import pycuda.autoinit
+from pycuda.compiler import SourceModule
+import pycuda.gpuarray as gpuarray
+import pycuda.cumath
+from pycuda.elementwise import ElementwiseKernel
+from pycuda.compiler import SourceModule
+
+import numpy as np
+import cv2
+import os
+import matplotlib.pyplot as plt
+from PIL import Image
+import glob
+import sys
+from numba import jit, cuda
+from numba import njit, literal_unroll, prange
+from numba.typed import List
+import math
+from vtk_test import numpy_to_vti
+import shutil
+import time
+from collections import Counter
+
+mod = SourceModule("""
+__global__ void KNN_Mono(unsigned char *dest_r, unsigned char *img_r, int imageW, int imageH, float Noise, float lerpC)
+{
+
+    #define KNN_WINDOW_RADIUS   3
+    #define NLM_BLOCK_RADIUS    3
+    #define KNN_WEIGHT_THRESHOLD    0.00078125f
+    #define KNN_LERP_THRESHOLD      0.79f
+    const float KNN_WINDOW_AREA = (2.0 * KNN_WINDOW_RADIUS + 1.0) * (2.0 * KNN_WINDOW_RADIUS + 1.0) ;
+    const float INV_KNN_WINDOW_AREA = (1.0 / KNN_WINDOW_AREA);
+
+    const long int   ix = blockDim.x * blockIdx.x + threadIdx.x;
+    const long int   iy = blockDim.y * blockIdx.y + threadIdx.y;
+    const float  x = (float)ix  + 1.0f;
+    const float  y = (float)iy  + 1.0f;
+    const float limxmin = NLM_BLOCK_RADIUS + 2;
+    const float limxmax = imageW - NLM_BLOCK_RADIUS - 2;
+    const float limymin = NLM_BLOCK_RADIUS + 2;
+    const float limymax = imageH - NLM_BLOCK_RADIUS - 2;
+
+
+    long int index4;
+    long int index5;
+    if(ix>limxmin && ix<limxmax && iy>limymin && iy<limymax){
+        //Normalized counter for the weight threshold
+        float fCount = 0;
+        //Total sum of pixel weights
+        float sumWeights = 0;
+        //Result accumulator
+        float clr = 0.0;
+        float clr00 = 0.0;
+        float clrIJ = 0.0;
+        //Center of the KNN window
+        index4 = x + (y * imageW);
+        index5 = imageW * (iy + 1) + ix + 1;
+
+        clr00 = img_r[index4];
+        for(float i = -NLM_BLOCK_RADIUS; i <= NLM_BLOCK_RADIUS; i++)
+            for(float j = -NLM_BLOCK_RADIUS; j <= NLM_BLOCK_RADIUS; j++) {
+                long int index2 = x + j + (y + i) * imageW;
+                clrIJ = img_r[index2];
+                float distanceIJ = ((clrIJ - clr00) * (clrIJ - clr00)) / 65536.0;
+                //Derive final weight from color and geometric distance
+                float   weightIJ = (__expf(- (distanceIJ * Noise + (i * i + j * j) * INV_KNN_WINDOW_AREA))) / 256.0;
+                clr += clrIJ * weightIJ;
+                //Sum of weights for color normalization to [0..1] range
+                sumWeights     += weightIJ;
+                //Update weight counter, if KNN weight for current window texel
+                //exceeds the weight threshold
+                fCount         += (weightIJ > KNN_WEIGHT_THRESHOLD) ? INV_KNN_WINDOW_AREA : 0;
+        }
+
+        //Normalize result color by sum of weights
+        sumWeights = 0.0039f / sumWeights;
+        clr *= sumWeights;
+        //Choose LERP quotient basing on how many texels
+        //within the KNN window exceeded the weight threshold
+        float lerpQ = (fCount > KNN_LERP_THRESHOLD) ? lerpC : 1.0f - lerpC;
+
+        clr = clr + (clr00 / 256.0 - clr) * lerpQ;
+
+        dest_r[index5] = (int)(clr * 256.0);
+    }
+}
+""")
+
+
+# This function is used to create two lists, one with all the background pixels in the studied area, the other for
+# the foreground pixels.
+
+@jit
+def fill(file, tophat, mask, h1, w1):
+    background = List()
+    foreground = List()
+    background.append(120)
+    foreground.append(120)
+
+    for i in range(h1):
+        for j in range(w1):
+            if tophat[i, j] == 0 and mask[i, j] == True:
+                background.append(file[i, j])
+            elif tophat[i, j] != 0 and mask[i, j] == True:
+                foreground.append(file[i, j])
+
+    return background[1:], foreground[1:]
+
+
+#  This function checks if there is a container in the data set
+
+def testing(img, w, h, test):
+    center = [int(w / 2), int(h / 2)]
+
+    print('param1', param1)
+
+    if test == 0:
+
+        detector = cv2.cuda.createHoughCirclesDetector(1, 1000000, param1, 15, minRadius=int(
+            param2 * (min(center[0], center[1], w - center[0], h - center[1]))), maxRadius=int(
+            (min(center[0], center[1], w - center[0], h - center[1]))))
+
+        circles = detector.detect(img).download()
+
+        cimg = cv2.cvtColor(img.download(), cv2.COLOR_GRAY2BGR)
+        circles2 = np.uint16(np.around(circles))
+        for i in circles2[0, :]:
+            # draw the outer circle
+            cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
+            # draw the center of the circle
+            cv2.circle(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)
+
+        cv2.imwrite('/data/test/ok_test.tif', cimg)
+
+        if np.all(circles == None):
+            print('No circle detected')
+            return 0
+
+    print('circle detected')
+
+    return 1
+
+
+# This function is used to detect containers in the pictures
+
+
+def hough(img, w, h, flag, rayon, ref, ind, param2, param1, seuil):
+    center = [int(w / 2), int(h / 2)]
+
+    if flag == 2:
+        return 1, 1, ind, param2, param1
+
+    if flag == 0:
+
+        detector = cv2.cuda.createHoughCirclesDetector(1, 1000000, param1, 15, minRadius=int(
+            param2 * (min(center[0], center[1], w - center[0], h - center[1]))), maxRadius=int(
+            (min(center[0], center[1], w - center[0], h - center[1]))))
+
+        circles = detector.detect(img).download()
+
+        mini = int((np.median(circles[:, :, 2]) + 1.5 * np.min(circles[:, :, 2])) / 2.5)
+
+        print(mini)
+
+        ref = mini
+
+        return mini, ref, ind
+
+
+    else:
+
+        if int(rayon - 10) <= 0:
+            return 1, 1, ind, param2, param1
+
+        detector = cv2.cuda.createHoughCirclesDetector(1, 100000, param1, 10, minRadius=int(rayon - 10),
+                                                       maxRadius=int(rayon + 10))
+
+        circles = detector.detect(img).download()
+
+        if np.all(circles == None):
+            print(ind)
+            if ind == 0:
+                return ref, ref, ind, param2, param1
+
+            else:
+                return rayon, rayon, ind, param2, param1
+
+        mini = np.mean(circles[:, :, 2])
+        print('mini', mini)
+
+        if 0.90 * ref < mini < 2 * ref:
+
+            return ref, mini, ind, param2, param1
+
+        else:
+            ind = 1
+            if seuil == 1.8:
+
+                param2 = 0.82
+
+                param1 = 20
+
+            else:
+
+                param2 = 0.95
+
+                param1 = 95
+
+            return mini, mini, ind, param2, param1
+
+
+# This function crops the picture in a circular way, giving it the appropriate center and radius of the mask.
+
+
+def create_circular_mask(h, w, center=None, radius=None):
+    if center is None:  # use the middle of the image
+        center = [int(w / 2), int(h / 2)]
+
+    if radius is None:  # use the smallest distance between the center and image walls
+        radius = min(center[0], center[1], w - center[0], h - center[1])
+
+    else:
+        radius = radius * min(center[0], center[1], w - center[0], h - center[1])
+
+    Y, X = np.ogrid[:h, :w]
+    dist_from_center = np.sqrt((X - center[0]) ** 2 + (Y - center[1]) ** 2)
+
+    if radius == 0:
+        mask = np.ones((h, w))
+
+    else:
+        if center != [int(w / 2), int(h / 2)]:
+            radius = radius + (1 / 3) * radius
+
+        mask = dist_from_center <= radius
+
+    return mask
+
+
+def create_circular_mask2(h, w, center=None, radius=None):
+    if center is None:  # use the middle of the image
+        center = [int(w / 2), int(h / 2)]
+    if radius is None:  # use the smallest distance between the center and image walls
+        radius = min(center[0], center[1], w - center[0], h - center[1])
+
+    Y, X = np.ogrid[:h, :w]
+    dist_from_center = np.sqrt((X - center[0]) ** 2 + (Y - center[1]) ** 2)
+
+    mask = dist_from_center <= radius
+
+    return mask
+
+
+# This function keeps the main information in the pictures.
+
+
+def cleaning4(image, seuil):
+    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
+
+    filter = cv2.cuda.createMorphologyFilter(cv2.MORPH_TOPHAT, cv2.CV_8UC1, kernel)
+    tophat = filter.apply(image)
+
+    im = tophat
+
+    return tophat
+
+
+# This function ensure that all the objects touching the border of the mask are erased if they are small enough
+# (no loss of important data)
+
+
+@njit
+def cleaning5(edge, h1, w1, labels4, rayon_init, rate):
+    center_x = h1 / 2
+    center_y = w1 / 2
+    del_list = List()
+    fin_list = List()
+
+    for i in prange(h1):
+        for j in prange(w1):
+            if edge[i, j] == 255:
+                if labels4[i, j] not in del_list:
+                    del_list.append(labels4[i, j])
+
+    for k in del_list:
+
+        flag = 0
+
+        if k not in fin_list:
+
+            for i in prange(h1):
+                for j in prange(w1):
+                    if labels4[i, j] == k:
+                        # 0.9
+                        if math.sqrt(
+                                (i - center_x) ** 2 + (j - center_y) ** 2) < rate * rayon_init:
+                            flag = 1
+
+                            break
+
+            if flag == 0:
+                fin_list.append(k)
+
+    print(del_list)
+    print(fin_list)
+
+    return fin_list
+
+
+@jit
+def cleaning6(h1, w1, labels4, file):
+    for i in prange(h1):
+        for j in prange(w1):
+            if labels4[i, j] == 1000:
+                file[i, j] = 0
+
+    return file
+
+
+# This function is searching for the appropriate h value for denoising step. Also it is making groups of data sets
+#  which have near standard deviation value.
+
+def find_param(center, files):
+    index = int(len(files) / 3)
+    files = files[:]
+
+    cpt = 0
+
+    flag3 = 0
+
+    for myFile in files:
+        file = cv2.imread(myFile, 0)
+
+        h1, w1 = file.shape[0], file.shape[1]
+
+        mask = create_circular_mask(h1, w1, center=center, radius=1 / 3)
+        masked_img = file.copy()
+        masked_img[~mask] = 0
+
+        np1 = np.array(masked_img).astype(np.uint8)
+
+        cu1 = cv2.cuda_GpuMat()
+        cu1.upload(np1)
+
+        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
+
+        filter2 = cv2.cuda.createMorphologyFilter(cv2.MORPH_TOPHAT, cv2.CV_8UC1, kernel)
+        tophat = filter2.apply(cu1).download()
+
+        filter3 = cv2.cuda.createMorphologyFilter(cv2.MORPH_BLACKHAT, cv2.CV_8UC1, kernel)
+        blackhat = filter3.apply(cu1).download()
+
+        im = blackhat + tophat
+
+        if flag3 == 0:
+            print(np.std(file))
+
+            if np.std(file) > 5:
+                seuil = 3
+
+
+            elif np.std(file) > 3:
+                seuil = 5
+
+
+            elif np.std(file) > 1.7:
+                seuil = 1.8
+
+        flag3 = 1
+
+        ret, tophat = cv2.threshold(im, seuil, 255, cv2.THRESH_BINARY)
+
+        background, foreground = fill(file, tophat, mask, h1, w1)
+
+        print('% background', float(len(background) / (len(background) + len(foreground))))
+        print('% foreground', float(len(foreground) / (len(background) + len(foreground))))
+
+        if float(len(background) / (len(background) + len(foreground))) > 0.1 and float(
+                len(foreground) / (len(background) + len(foreground))) > 0.2:
+            sigma = np.std(background)
+
+            h = 1.2 * sigma
+
+            return h, seuil
+
+        cpt += 1
+
+    return 0, 0
+
+
+Datas = ['Probe_7', 'Z275', 'C7', 'Pseudoskorpion', 'Eucrib_03', 'Archie_04', 'Gama_01', 'Screwjoint']
+
+for data in Datas:
+
+    start = time.time()
+
+    print(data)
+
+    files = glob.glob('/data/' + str(data) + '/slices_8bit/*.tif')
+    files.sort(key=lambda x: int(''.join(filter(str.isdigit, x))))
+
+    X_data = []
+
+    image = Image.open(files[0]).convert('L')
+    image = np.array(image)
+
+    h1, w1 = image.shape[0], image.shape[1]
+
+    fact = h1 / 256
+    value = round(fact)
+
+    # Looking for different part of the pictures to find the best h value.
+
+    for n in [[int(w1 / 2), int(h1 / 2)], [int(w1 / 2), int(h1 / 3)], [int(w1 / 3), int(h1 / 2)],
+              [int(2 * w1 / 3), int(h1 / 2)],
+              [int(w1 / 2), int(2 * h1 / 3)]]:
+        h, seuil = find_param(n, files)
+
+        print(seuil)
+        if h != 0:
+            print('h', h)
+            break
+
+    cpt2 = 0
+    radius = 0
+    flag = 0
+    rayon = 0
+    test = 0
+    cpt8 = 1
+
+    ind = 0
+
+    print('seuil', seuil)
+
+    # defining hough parameters.
+
+    if seuil == 1.8:
+
+        param2 = 0.8
+
+        param1 = 80
+
+        rate = 0.99
+
+    else:
+
+        param2 = 0.95
+
+        param1 = 70
+
+        rate = 0.98
+
+    compteur = 1
+
+    for myFile in files:
+
+        KNN_Mono_GPU = mod.get_function("KNN_Mono")
+
+        image_brut_CV = cv2.imread(myFile, 0)
+        h1, w1 = image_brut_CV.shape
+        nb_pixels = h1 * w1
+
+        # Set blocks et Grid sizes
+        nb_ThreadsX = 8
+        nb_ThreadsY = 8
+        nb_blocksX = (w1 // nb_ThreadsX) + 1
+        nb_blocksY = (h1 // nb_ThreadsY) + 1
+
+        # Set KNN parameters
+
+        KNN_Noise = h
+        Noise = 1.0 / (KNN_Noise * KNN_Noise)
+        lerpC = 0.2
+
+        # Algorithm GPU using PyCuda
+
+        r_gpu = drv.mem_alloc(image_brut_CV.size * image_brut_CV.dtype.itemsize)
+        drv.memcpy_htod(r_gpu, image_brut_CV)
+        img_r_gpu = drv.mem_alloc(image_brut_CV.size * image_brut_CV.dtype.itemsize)
+        drv.memcpy_htod(img_r_gpu, image_brut_CV)
+        res_r = np.empty_like(image_brut_CV)
+
+        KNN_Mono_GPU(r_gpu, img_r_gpu, np.intc(w1), np.intc(h1), np.float32(Noise), np.float32(lerpC), block=(nb_ThreadsX, nb_ThreadsY, 1), grid=(nb_blocksX, nb_blocksY))
+        drv.memcpy_dtoh(res_r, r_gpu)
+
+        r_gpu.free()
+        img_r_gpu.free()
+
+        file = res_r
+
+        np1 = np.array(file).astype(np.uint8)
+
+        cu1 = cv2.cuda_GpuMat()
+        cu1.upload(np1)
+
+        cpt2 += 1
+
+        if test == 0:
+            rayon_init = testing(cu1, w1, h1, test)
+
+        test = 1
+
+        kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
+
+        if rayon_init != 0:
+
+            if flag == 0:
+                rayon_init, ref, ind = hough(cu1, w1, h1, flag, 0, 0, ind, param2, param1, seuil)
+                last = rayon_init
+
+                flag = 1
+
+                file = cleaning4(cu1, seuil)
+
+                file2 = file
+
+            if rayon_init != 1:
+                rayon_init, last, ind, param2, param1 = hough(cu1, w1, h1, flag, last, ref, ind, param2,
+                                                              param1, seuil)
+
+                print('ref', ref)
+
+                print('param1', param1)
+
+                file = cleaning4(cu1, seuil)
+
+                file2 = file
+
+                print('rad', rayon_init)
+
+                filter6 = cv2.cuda.createMorphologyFilter(cv2.MORPH_DILATE, cv2.CV_8UC1, kernel2, iterations=2)
+                file3 = filter6.apply(file)
+
+                filter7 = cv2.cuda.createMorphologyFilter(cv2.MORPH_CLOSE, cv2.CV_8UC1, kernel2, iterations=2)
+                file3 = filter7.apply(file3)
+
+                file3 = file3.download()
+
+                file3 = cv2.threshold(file3, 5, 255, cv2.THRESH_BINARY)[1]
+
+                ret, labels4 = cv2.connectedComponents(file3)
+
+                mask = create_circular_mask2(h1, w1, radius=rayon_init)
+
+                edge = cv2.Canny(mask.astype(np.uint8), 0, 1)
+
+                fin_list = cleaning5(edge, h1, w1, labels4, rayon_init, rate)
+
+                cpt8 += 1
+
+                for i in fin_list:
+                    labels4[labels4 == i] = 1000
+
+                file = cleaning6(h1, w1, labels4, file.download())
+
+                masked_img = file.copy()
+                masked_img[~mask] = 0
+
+                file = masked_img
+
+                file = Image.fromarray(file)
+
+                # Pictures are resized to avoid having a file too big to be displayed on the web page.
+
+                file = file.resize((256, 256), Image.ANTIALIAS)
+
+                file = np.array(file)
+
+            else:
+                file = np.zeros((256, 256))
+                flag = 2
+
+
+        else:
+            file = Image.fromarray(file.download())
+            file = file.resize((256, 256), Image.ANTIALIAS)
+
+            file = np.array(file)
+
+        if compteur == value:
+
+            X_data.append(file)
+
+            compteur = 1
+
+        else:
+
+            compteur += 1
+
+        print(cpt2)
+
+    X_data = np.array(X_data)
+
+    print(X_data.shape)
+
+    # Creation of VTI file
+
+    numpy_to_vti(X_data, (0, 0, 0), (1, 1, 1),
+                 '/data/' + str(data) + '_gpu.vti')
+
+    end = time.time()
+    print((str(round(end - start, 4))))

+ 39 - 0
vtk_test.py

@@ -0,0 +1,39 @@
+import os
+import vtk
+from vtk.util import numpy_support
+from vtk.util.numpy_support import numpy_to_vtk, get_vtk_array_type
+import shutil
+
+
+def numpy_to_vti(array, origin, spacing, filename):
+    """This function write a VtkImageData vti file from a numpy array.
+
+    :param array: input array
+    :type array: :class:`numpy.ndarray`
+    :param origin: the origin of the array
+    :type origin: array like object of values
+    :param spacing: the step in each dimension
+    :type spacing: array like object of values
+    :param filename: output filename (.vti)
+    :type filename: str
+    """
+
+    if array.ndim != 3:
+        raise ValueError("Only works with 3 dimensional arrays")
+
+    vtkArray = numpy_to_vtk(num_array=array.flatten('F'), deep=True,
+                            array_type=get_vtk_array_type(array.dtype))
+
+    imageData = vtk.vtkImageData()
+    imageData.SetOrigin(origin)
+    imageData.SetSpacing(spacing)
+    imageData.SetDimensions(array.shape)
+    imageData.GetPointData().SetScalars(vtkArray)
+
+    writer = vtk.vtkXMLImageDataWriter()
+    writer.SetFileName(filename)
+    writer.SetInputData(imageData)
+    writer.Write()
+
+    return ()
+