keras框架下dense_unet对稻田草地分割
程序员文章站
2024-03-15 10:35:11
...
如图所示是图像拓展后的数据集,主要分割稻田路径。
老生长谈,搞起来!
数据集拓展程序:
import cv2
import os
import numpy as np
from PIL import Image
# 图像混合
# file1=r"./3dcm_png"
# file2=r"./2gu_label"
file1=r"./image"
file2=r"./mask"
files=os.listdir(file1)
files2=os.listdir(file2)
path3=r"./extendimg"
path4=r"./extendmask"
if not os.path.exists(path3):
os.mkdir(path3)
if not os.path.exists(path4):
os.mkdir(path4)
def contrast_img(img1, c, b): # 亮度就是每个像素所有通道都加上b
rows, cols, channels = img1.shape
print(img1.shape)
print(img1.dtype)
# 新建全零(黑色)图片数组:np.zeros(img1.shape, dtype=uint8)
blank = np.zeros(img1.shape, img1.dtype)
dst = cv2.addWeighted(img1, c, blank, 1-c, b)
return dst
# cv2.imshow('original_img', img)
# cv2.imshow("contrast_img", dst)
for i in range(0,len(files)):
path=os.path.join(file1,files[i])
print(path)
img1 = cv2.imread(path,cv2.IMREAD_COLOR)# (380, 308, 3) #读取图片1
img1=cv2.resize(img1,(320,320),0)
res=contrast_img(img1, 0.90, 10)
# img1=img1.astype(np.uint8)
# 第一幅图的权重是 0.7 第二幅图的权重是 0.3。大概就是第二幅图浮现加在第一个图上
#g(x) = αf(x) + β,其中:α(>0)、β常称为增益与偏置值,分别控制图片的对比度和亮度。
# res = np.uint8(np.clip((0.92 * img1 + 30), 0, 255)) # 1.5是对比度 10是亮度
# res = np.uint16(np.clip((1.15 * img1 + 80), 0, 65535)) # 1.5是对比度 10是亮度
# res = np.uint16(np.clip((1.0 * img1 + 0.), 0, 65535)) # 1.5是对比度 10是亮度
prefix=str(i+160).zfill(5)+".png"
savepath=os.path.join(path3,prefix)
cv2.imwrite(savepath,res) #保存
# cv2.imwrite("res.jpg",res) #保存
# cv2.imshow('dst', res) #显示
# cv2.waitKey(0)
path2m=os.path.join(file2,files2[i])
mask=cv2.imread(path2m,0)
print(path2m)
mask=cv2.resize(mask,(320,320),0)
mask=mask.astype(np.uint8)
#mask=cv2.cvtColor(mask,cv2.COLOR_BGR2GRAY)
mask=np.where(mask>1,255,0)
savepath=os.path.join(path4,prefix)
cv2.imwrite(savepath,mask) #保存
labelme转为实际图片脚本:
#!/usr/bin/env python
# coding=utf-8
'''
'''
import argparse
import base64
import json
import os
import os.path as osp
import PIL.Image
import yaml
from labelme.logger import logger
from labelme import utils
NAME_LABEL_MAP = {
'_background_': 0,
"road": 1,
}
def translate(in_dir, out_dir, start_index):
json_files = os.listdir(in_dir)
for jf in json_files:
id_name = str(start_index).zfill(4)
start_index = start_index+1
#判断该文件是否是JSON文件
if jf.endswith('json'):
json_file = os.path.join(in_dir, jf)
with open(osp.join(out_dir, 'data.txt'), 'a') as f:
f.write(json_file+'\n')
# 获取文件名
img_name, _ = os.path.splitext(jf)
img_name = id_name+'_'+img_name
# 获取图像数据
data = json.load(open(json_file))
imageData = data.get('imageData')
if not imageData:
imagePath = os.path.join(os.path.dirname(json_file), data['imagePath'])
with open(imagePath, 'rb') as f:
imageData = f.read()
imageData = base64.b64encode(imageData).decode('utf-8')
img = utils.img_b64_to_arr(imageData)
# 读取Json文件中shapes 即标注内容
label_name_to_value = {'_background_': 0,
'road': 1,
}
for shape in sorted(data['shapes'], key=lambda x: x['label']):
label_name = shape['label']
if label_name in label_name_to_value:
label_value = label_name_to_value[label_name]
else:
label_value = len(label_name_to_value)
label_name_to_value[label_name] = label_value
# 每一个组织存一张mask emily
points = shape['points']
shape_type = shape.get('shape_type', None)
mask_temp = utils.shape_to_mask(img.shape, points, shape_type)
# 保存mask
mask_temp_name=img_name+'_'+label_name+'_mask.png'
PIL.Image.fromarray(mask_temp).save(osp.join(out_dir, mask_temp_name))
lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value)
label_names = [None] * (max(label_name_to_value.values()) + 1)
for name, value in label_name_to_value.items():
label_names[value] = name
lbl_viz = utils.draw_label(lbl, img, label_names)
# 保存原图
PIL.Image.fromarray(img).save(osp.join(out_dir, img_name+'.png'))
# 保存mask
mask_name=img_name+'_mask.png'
utils.lblsave(osp.join(out_dir, mask_name), lbl)
# 保存原图带mask蒙版
viz_name=img_name+'_labelviz.png'
PIL.Image.fromarray(lbl_viz).save(osp.join(out_dir, viz_name))
with open(osp.join(out_dir, 'label_names.txt'), 'w') as f:
for lbl_name in label_names:
f.write(lbl_name + '\n')
# logger.warning('info.yaml is being replaced by label_names.txt')
# info = dict(label_names=label_names)
# with open(osp.join(out_dir, 'info.yaml'), 'w') as f:
# yaml.safe_dump(info, f, default_flow_style=False)
logger.info('{} Saved to: {}'.format(img_name,out_dir))
if __name__ == '__main__':
in_dir = r'./pic'
out_dir = r'./OUT'
# 0 对应起始Index
translate(in_dir,out_dir,0)
下面是本章使用的denseunet分割模型
# -*- coding: utf-8 -*-
from keras.models import *
from keras.layers import Input, Concatenate, Conv2D, MaxPooling2D, UpSampling2D, Dropout, Cropping2D,BatchNormalization,Dropout,Activation,Dense
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as keras
# from keras.regularizers import l2
from keras import regularizers
# from custom_layers import Scale
from loss import *
def DenseUnet():
inputs = Input(shape=(320, 320, 3))
conv1_1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
BatchNorm1_1 = BatchNormalization(axis=3, gamma_regularizer=regularizers.l2(1e-4), beta_regularizer=regularizers.l2(1e-4))(conv1_1)
ReLU1_1 = Activation('relu')(BatchNorm1_1)
conv1_2 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(ReLU1_1)
drop1_2 = Dropout(0)(conv1_2)
# Merge1 = merge([conv1_1,drop1_2], mode = 'concat', concat_axis = 3)
Merge1=Concatenate(axis=3)([conv1_1,drop1_2])
pool1 = MaxPooling2D(pool_size=(2, 2))(Merge1)
conv2_1 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
BatchNorm2_1 = BatchNormalization(axis=3, gamma_regularizer=regularizers.l2(1e-4), beta_regularizer=regularizers.l2(1e-4))(conv2_1)
ReLU2_1 = Activation('relu')(BatchNorm2_1)
conv2_2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(ReLU2_1)
drop2_2 = Dropout(0)(conv2_2)
# Merge2 = merge([conv2_1,drop2_2], mode = 'concat', concat_axis = 3)
Merge2=Concatenate(axis=3)([conv2_1,drop2_2])
pool2 = MaxPooling2D(pool_size=(2, 2))(Merge2)
conv3_1 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
BatchNorm3_1 = BatchNormalization(axis=3, gamma_regularizer=regularizers.l2(1e-4), beta_regularizer=regularizers.l2(1e-4))(conv3_1)
ReLU3_1 = Activation('relu')(BatchNorm3_1)
conv3_2 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(ReLU3_1)
drop3_2 = Dropout(0)(conv3_2)
# Merge3 = merge([conv3_1,drop3_2], mode = 'concat', concat_axis = 3)
Merge3=Concatenate(axis=3)([conv3_1,drop3_2])
pool3 = MaxPooling2D(pool_size=(2, 2))(Merge3)
conv4_1 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
BatchNorm4_1 = BatchNormalization(axis=3, gamma_regularizer=regularizers.l2(1e-4), beta_regularizer=regularizers.l2(1e-4))(conv4_1)
ReLU4_1 = Activation('relu')(BatchNorm4_1)
conv4_2 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(ReLU4_1)
drop4_2 = Dropout(0)(conv4_2)
# Merge4 = merge([conv4_1,drop4_2], mode = 'concat', concat_axis = 3)
Merge4=Concatenate(axis=3)([conv4_1,drop4_2])
drop4 = Dropout(0.5)(Merge4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5_1 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
BatchNorm5_1 = BatchNormalization(axis=3, gamma_regularizer=regularizers.l2(1e-4), beta_regularizer=regularizers.l2(1e-4))(conv5_1)
ReLU5_1 = Activation('relu')(BatchNorm5_1)
conv5_2 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(ReLU5_1)
drop5_2 = Dropout(0)(conv5_2)
# Merge5 = merge([conv5_1,drop5_2], mode = 'concat', concat_axis = 3)
Merge5=Concatenate(axis=3)([conv5_2,drop5_2])
drop5 = Dropout(0.5)(Merge5)
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
# merge6 = merge([drop4,up6], mode = 'concat', concat_axis = 3)
merge6=Concatenate(axis=3)([drop4,up6])
conv6_1 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
BatchNorm6_1 = BatchNormalization(axis=3, gamma_regularizer=regularizers.l2(1e-4), beta_regularizer=regularizers.l2(1e-4))(conv6_1)
ReLU6_1 = Activation('relu')(BatchNorm6_1)
conv6_2 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(ReLU6_1)
drop6_2 = Dropout(0)(conv6_2)
# Merge6 = merge([conv6_1,drop6_2], mode = 'concat', concat_axis = 3)
Merge6=Concatenate(axis=3)([conv6_1,drop6_2])
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(Merge6))
# merge7 = merge([Merge3,up7], mode = 'concat', concat_axis = 3)
merge7=Concatenate(axis=3)([Merge3,up7])
conv7_1 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
BatchNorm7_1 = BatchNormalization(axis=3, gamma_regularizer=regularizers.l2(1e-4), beta_regularizer=regularizers.l2(1e-4))(conv7_1)
ReLU7_1 = Activation('relu')(BatchNorm7_1)
conv7_2 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(ReLU7_1)
drop7_2 = Dropout(0)(conv7_2)
# Merge7 = merge([conv7_1,drop7_2], mode = 'concat', concat_axis = 3)
Merge7=Concatenate(axis=3)([conv7_1,drop7_2])
up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(Merge7))
# merge8 = merge([Merge2,up8], mode = 'concat', concat_axis = 3)
merge8=Concatenate(axis=3)([Merge2,up8])
conv8_1 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
BatchNorm8_1 = BatchNormalization(axis=3, gamma_regularizer=regularizers.l2(1e-4), beta_regularizer=regularizers.l2(1e-4))(conv8_1)
ReLU8_1 = Activation('relu')(BatchNorm8_1)
conv8_2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(ReLU8_1)
drop8_2 = Dropout(0)(conv8_2)
# Merge8 = merge([conv8_1,drop8_2], mode = 'concat', concat_axis = 3)
Merge8=Concatenate(axis=3)([conv8_1,drop8_2])
up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(Merge8))
# merge9 = merge([Merge1,up9], mode = 'concat', concat_axis = 3)
merge9=Concatenate(axis=3)([Merge1,up9])
conv9_1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
BatchNorm9_1 = BatchNormalization(axis=3, gamma_regularizer=regularizers.l2(1e-4), beta_regularizer=regularizers.l2(1e-4))(conv9_1)
ReLU9_1 = Activation('relu')(BatchNorm9_1)
conv9_2 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(ReLU9_1)
drop9_2 = Dropout(0)(conv9_2)
# Merge9 = merge([conv9_1,drop9_2], mode = 'concat', concat_axis = 3)
Merge9=Concatenate(axis=3)([conv9_1,drop9_2])
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(Merge9)
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)#sigmoid
#conv10 = Conv2D(1, 1, activation = 'softmax')(conv9)#sigmoid
model = Model(input = inputs, output = conv10)
model_dice=dice_loss(smooth=1e-5)
model.compile(optimizer = Adam(lr = 1e-4), loss = model_dice,metrics=['accuracy'])
# model.compile(optimizer = Adam(lr = 1e-4), loss=[focal_loss(alpha=.25, gamma=2)],metrics=['accuracy'])
# model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])
return model
最终分割效果忘记了截图,还是不错的,重点是网络模型!