安县建设局网站,wordpress 能上传apk吗,重庆做网络优化公司电话,新网域名自助管理平台论文原文创新点提出了一种端到端的做语义分割的方法#xff0c;在这里插入图片描述如图#xff0c;直接拿分割的ground truth作为监督信息#xff0c;训练一个端到端的网络#xff0c;让网络做p像素级别的预测。如何设计网络结构如何做像素级别的预测在这里插入图片描述在V…论文原文创新点提出了一种端到端的做语义分割的方法在这里插入图片描述如图直接拿分割的ground truth作为监督信息训练一个端到端的网络让网络做p像素级别的预测。如何设计网络结构如何做像素级别的预测在这里插入图片描述在VGG16中的第一个全连接层的维度是25088×4096的将之解释为512x7x7x4096的卷积核这样最后就会得到一个featuremap。这样做的好处在于可以实现迁移学习的fine-tune。最后我们对得到的feature map进行bilinear上采样就是反卷积层。就可以得到和原图一样大小的语义分割后的图了。如何保证精度我们在做upsampling时步长是32输入为3x500x500的时候输出是544×544边缘很不好。所以我们采用skip layer的方法在浅层处减小upsampling的步长得到的fine layer 和 高层得到的coarse layer做融合然后再upsampling得到输出。这种做法兼顾local和global信息即文中说的combining what and where取得了不错的效果提升。FCN-32s为59.4FCN-16s提升到了62.4FCN-8s提升到62.7。可以看出效果还是很明显的。在这里插入图片描述论文结果在这里插入图片描述在这里插入图片描述代码实现FCN8#codingutf-8from keras.models import *from keras.layers import *import osdef crop(o1, o2, i):o_shape2 Model(i, o2).output_shapeoutputHeight2 o_shape2[1]outputWidth2 o_shape2[2]o_shape1 Model(i, o1).output_shapeoutputHeight1 o_shape1[1]outputWidth1 o_shape1[2]cx abs(outputWidth1 - outputWidth2)cy abs(outputHeight2 - outputHeight1)if outputWidth1 outputWidth2:o1 Cropping2D(cropping((0,0), (0, cx)))(o1)else:o2 Cropping2D( cropping((0,0) , ( 0 , cx )))(o2)if outputHeight1 outputHeight2 :o1 Cropping2D( cropping((0,cy) , ( 0 , 0 )))(o1)else:o2 Cropping2D( cropping((0, cy ) , ( 0 , 0 )))(o2)return o1, o2def FCN8(nClasses, input_height416, input_width608, vgg_level3):img_input Input(shape(input_height, input_width, 3))x Conv2D(64, (3, 3), activationrelu, paddingsame, nameblock1_conv1)(img_input)x Conv2D(64, (3, 3), activationrelu, paddingsame, nameblock1_conv2)(x)x MaxPooling2D((2, 2), strides(2, 2), nameblock1_pool)(x)f1 x# Block 2x Conv2D(128, (3, 3), activationrelu, paddingsame, nameblock2_conv1)(x)x Conv2D(128, (3, 3), activationrelu, paddingsame, nameblock2_conv2)(x)x MaxPooling2D((2, 2), strides(2, 2), nameblock2_pool)(x)f2 x# Block 3x Conv2D(256, (3, 3), activationrelu, paddingsame, nameblock3_conv1)(x)x Conv2D(256, (3, 3), activationrelu, paddingsame, nameblock3_conv2)(x)x Conv2D(256, (3, 3), activationrelu, paddingsame, nameblock3_conv3)(x)x MaxPooling2D((2, 2), strides(2, 2), nameblock3_pool)(x)f3 x# Block 4x Conv2D(512, (3, 3), activationrelu, paddingsame, nameblock4_conv1)(x)x Conv2D(512, (3, 3), activationrelu, paddingsame, nameblock4_conv2)(x)x Conv2D(512, (3, 3), activationrelu, paddingsame, nameblock4_conv3)(x)x MaxPooling2D((2, 2), strides(2, 2), nameblock4_pool)(x)f4 x# Block 5x Conv2D(512, (3, 3), activationrelu, paddingsame, nameblock5_conv1)(x)x Conv2D(512, (3, 3), activationrelu, paddingsame, nameblock5_conv2)(x)x Conv2D(512, (3, 3), activationrelu, paddingsame, nameblock5_conv3)(x)x MaxPooling2D((2, 2), strides(2, 2), nameblock5_pool)(x)f5 xx Flatten(nameflatten)(x)x Dense(4096, activationrelu, namefc1)(x)x Dense(4096, activationrelu, namefc2)(x)#x Dense(1000, activationsoftmax, namepredictions)(x)#vgg Model(img_input, x)#vgg.load_weights(VGG_Weights_path)o f5o (Conv2D(4096, (7, 7), activationrelu, paddingsame))(o)o Dropout(0.5)(o)o (Conv2D(4096, (1, 1), activationrelu, paddingsame))(o)o Dropout(0.5)(o)o (Conv2D(nClasses, (1, 1), kernel_initializerhe_normal))(o)o Conv2DTranspose(nClasses, kernel_size(4, 4), strides(2, 2), use_biasFalse)(o)o2 f4o2 (Conv2D(nClasses, (1, 1), kernel_initializerhe_normal))(o2)o, o2 crop(o, o2, img_input)o Add()([o, o2])o Conv2DTranspose(nClasses, kernel_size(4, 4), strides(2, 2), use_biasFalse)(o)o2 f3o2 (Conv2D(nClasses, (1, 1), kernel_initializerhe_normal))(o2)o2, o crop(o2, o, img_input)o Add()([o2, o])o Conv2DTranspose(nClasses , kernel_size(16,16), strides(8,8), use_biasFalse)(o)o_shape Model(img_input, o).output_shapeoutputHeight o_shape[1]outputWidth o_shape[2]o (Reshape((-1, outputHeight*outputWidth)))(o)o (Permute((2, 1)))(o)o (Activation(softmax))(o)model Model(img_input, o)model.outputWidth outputWidthmodel.outputHeight outputHeightreturn modelFCN32#codingutf-8from keras.models import *from keras.layers import *import osdef FCN32(n_classes, input_height416, input_width608, vgg_level3):img_input Input(shape(3, input_height, input_width))x Conv2D(64, (3, 3), activationrelu, paddingsame, nameblock1_conv1)(img_input)x Conv2D(64, (3, 3), activationrelu, paddingsame, nameblock1_conv2)(x)x MaxPooling2D((2, 2), strides(2, 2), nameblock1_pool)(x)f1 x# Block 2x Conv2D(128, (3, 3), activationrelu, paddingsame, nameblock2_conv1)(x)x Conv2D(128, (3, 3), activationrelu, paddingsame, nameblock2_conv2)(x)x MaxPooling2D((2, 2), strides(2, 2), nameblock2_pool)(x)f2 x# Block 3x Conv2D(256, (3, 3), activationrelu, paddingsame, nameblock3_conv1)(x)x Conv2D(256, (3, 3), activationrelu, paddingsame, nameblock3_conv2)(x)x Conv2D(256, (3, 3), activationrelu, paddingsame, nameblock3_conv3)(x)x MaxPooling2D((2, 2), strides(2, 2), nameblock3_pool)(x)f3 x# Block 4x Conv2D(512, (3, 3), activationrelu, paddingsame, nameblock4_conv1)(x)x Conv2D(512, (3, 3), activationrelu, paddingsame, nameblock4_conv2)(x)x Conv2D(512, (3, 3), activationrelu, paddingsame, nameblock4_conv3)(x)x MaxPooling2D((2, 2), strides(2, 2), nameblock4_pool)(x)f4 x# Block 5x Conv2D(512, (3, 3), activationrelu, paddingsame, nameblock5_conv1)(x)x Conv2D(512, (3, 3), activationrelu, paddingsame, nameblock5_conv2)(x)x Conv2D(512, (3, 3), activationrelu, paddingsame, nameblock5_conv3)(x)x MaxPooling2D((2, 2), strides(2, 2), nameblock5_pool)(x)f5 xx Flatten(nameflatten)(x)x Dense(4096, activationrelu, namefc1)(x)x Dense(4096, activationrelu, namefc2)(x)x Dense(1000, activationsoftmax, namepredictions)(x)#vgg Model(img_input, x)#vgg.load_weights(VGG_Weights_path)o f5o (Conv2D(4096, (7, 7), activationrelu, paddingsame))(o)o Dropout(0.5)(o)o (Conv2D(4096, (1, 1), activationrelu, paddingsame))(o)o Dropout(0.5)(o)o (Conv2D(n_classes, (1, 1), kernel_initializerhe_normal))(o)o Conv2DTranspose(n_classes, kernel_size(64, 64), strides(32, 32), use_biasFalse)(o)o_shape Model(img_input, o).output_shapeoutputHeight o_shape[1]outputWidth o_shape[2]o (Reshape((-1, outputHeight*outputWidth)))(o)o (Permute((2, 1)))(o)o (Activation(softmax))(o)model Model(img_input, o )model.outputWidth outputWidthmodel.outputHeight outputHeightreturn model欢迎关注我的微信公众号GiantPadaCV期待和你一起交流机器学习深度学习图像算法优化技术比赛及日常生活等。图片.pnghttps://www.jianshu.com/p/70c4354a1035Python量化投资网携手4326手游为资深游戏玩家推荐《《梦幻西游》42届武神坛之战消极比赛处理决定》