当前位置: 首页 > news >正文

企业网站建设尚未实现宣传功能音乐网站怎么建设

企业网站建设尚未实现宣传功能,音乐网站怎么建设,搜索优化软件,外卖网站那家做的好1#xff0c;本文介绍 U-Net v2 通过引入创新的跳跃连接设计来提升医学图像分割的精度。这一版本专注于更有效地融合不同层级的特征#xff0c;包括高级特征中的语义信息和低级特征中的细节信息。通过这种优化#xff0c;U-Net v2 能够在低级特征中注入丰富的语义#xff…1本文介绍 U-Net v2 通过引入创新的跳跃连接设计来提升医学图像分割的精度。这一版本专注于更有效地融合不同层级的特征包括高级特征中的语义信息和低级特征中的细节信息。通过这种优化U-Net v2 能够在低级特征中注入丰富的语义同时细化高级特征从而实现更精准的对象边界描绘和小结构提取。 其主要技术创新包括 多级特征提取使用深度神经网络编码器从输入图像中提取不同层次的特征。语义与细节融合(Semantics and Detail Infusion, SDI)模块通过哈达玛积操作将高级特征中的语义信息与低级特征中的细节信息融合到各层级的特征图中。改进的跳跃连接这些新型跳跃连接增强了各层特征的语义和细节表现从而在解码器阶段实现更高精度的分割。 关于UNetV2的详细介绍可以看论文https://arxiv.org/abs/2311.17791 本文将讲解如何将UNetV2融合进yolov8 话不多说上代码 2 将UNetV2融合进yolov8 2.1 步骤一 找到如下的目录ultralytics/nn/modules然后在这个目录下创建一个UNetV2.py文件文件名字可以根据你自己的习惯起然后将UNetV2的核心代码复制进去 import os.path import warnings import torch import torch.nn as nn import torch.nn.functional as F from functools import partial from timm.models.layers import DropPath, to_2tuple, trunc_normal_ import math__all__ [pvt_v2_b0, pvt_v2_b1, pvt_v2_b2, pvt_v2_b3, pvt_v2_b4, pvt_v2_b5]class ChannelAttention(nn.Module):def __init__(self, in_planes, ratio16):super(ChannelAttention, self).__init__()self.avg_pool nn.AdaptiveAvgPool2d(1)self.max_pool nn.AdaptiveMaxPool2d(1)self.fc1 nn.Conv2d(in_planes, in_planes // 16, 1, biasFalse)self.relu1 nn.ReLU()self.fc2 nn.Conv2d(in_planes // 16, in_planes, 1, biasFalse)self.sigmoid nn.Sigmoid()def forward(self, x):avg_out self.fc2(self.relu1(self.fc1(self.avg_pool(x))))max_out self.fc2(self.relu1(self.fc1(self.max_pool(x))))out avg_out max_outreturn self.sigmoid(out)class SpatialAttention(nn.Module):def __init__(self, kernel_size7):super(SpatialAttention, self).__init__()assert kernel_size in (3, 7), kernel size must be 3 or 7padding 3 if kernel_size 7 else 1self.conv1 nn.Conv2d(2, 1, kernel_size, paddingpadding, biasFalse)self.sigmoid nn.Sigmoid()def forward(self, x):avg_out torch.mean(x, dim1, keepdimTrue)max_out, _ torch.max(x, dim1, keepdimTrue)x torch.cat([avg_out, max_out], dim1)x self.conv1(x)return self.sigmoid(x)class BasicConv2d(nn.Module):def __init__(self, in_planes, out_planes, kernel_size, stride1, padding0, dilation1):super(BasicConv2d, self).__init__()self.conv nn.Conv2d(in_planes, out_planes,kernel_sizekernel_size, stridestride,paddingpadding, dilationdilation, biasFalse)self.bn nn.BatchNorm2d(out_planes)self.relu nn.ReLU(inplaceTrue)def forward(self, x):x self.conv(x)x self.bn(x)return xclass Encoder(nn.Module):def __init__(self, pretrain_path):super().__init__()self.backbone pvt_v2_b2()if pretrain_path is None:warnings.warn(please provide the pretrained pvt model. Not using pretrained model.)elif not os.path.isfile(pretrain_path):warnings.warn(fpath: {pretrain_path} does not exists. Not using pretrained model.)else:print(fusing pretrained file: {pretrain_path})save_model torch.load(pretrain_path)model_dict self.backbone.state_dict()state_dict {k: v for k, v in save_model.items() if k in model_dict.keys()}model_dict.update(state_dict)self.backbone.load_state_dict(model_dict)def forward(self, x):f1, f2, f3, f4 self.backbone(x) # (x: 3, 352, 352)return f1, f2, f3, f4class SDI(nn.Module):def __init__(self, channel):super().__init__()self.convs nn.ModuleList([nn.Conv2d(channel, channel, kernel_size3, stride1, padding1) for _ in range(4)])def forward(self, xs, anchor):ans torch.ones_like(anchor)target_size anchor.shape[-1]for i, x in enumerate(xs):if x.shape[-1] target_size:x F.adaptive_avg_pool2d(x, (target_size, target_size))elif x.shape[-1] target_size:x F.interpolate(x, size(target_size, target_size),modebilinear, align_cornersTrue)ans ans * self.convs[i](x)return ansclass UNetV2(nn.Module):use SpatialAtt ChannelAttdef __init__(self, channel3, n_classes1, deep_supervisionTrue, pretrained_pathNone):super().__init__()self.deep_supervision deep_supervisionself.encoder Encoder(pretrained_path)self.ca_1 ChannelAttention(64)self.sa_1 SpatialAttention()self.ca_2 ChannelAttention(128)self.sa_2 SpatialAttention()self.ca_3 ChannelAttention(320)self.sa_3 SpatialAttention()self.ca_4 ChannelAttention(512)self.sa_4 SpatialAttention()self.Translayer_1 BasicConv2d(64, channel, 1)self.Translayer_2 BasicConv2d(128, channel, 1)self.Translayer_3 BasicConv2d(320, channel, 1)self.Translayer_4 BasicConv2d(512, channel, 1)self.sdi_1 SDI(channel)self.sdi_2 SDI(channel)self.sdi_3 SDI(channel)self.sdi_4 SDI(channel)self.seg_outs nn.ModuleList([nn.Conv2d(channel, n_classes, 1, 1) for _ in range(4)])self.deconv2 nn.ConvTranspose2d(channel, channel, kernel_size4, stride2, padding1,biasFalse)self.deconv3 nn.ConvTranspose2d(channel, channel, kernel_size4, stride2,padding1, biasFalse)self.deconv4 nn.ConvTranspose2d(channel, channel, kernel_size4, stride2,padding1, biasFalse)self.deconv5 nn.ConvTranspose2d(channel, channel, kernel_size4, stride2,padding1, biasFalse)self.width_list [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]def forward(self, x):seg_outs []f1, f2, f3, f4 self.encoder(x)f1 self.ca_1(f1) * f1f1 self.sa_1(f1) * f1f1 self.Translayer_1(f1)f2 self.ca_2(f2) * f2f2 self.sa_2(f2) * f2f2 self.Translayer_2(f2)f3 self.ca_3(f3) * f3f3 self.sa_3(f3) * f3f3 self.Translayer_3(f3)f4 self.ca_4(f4) * f4f4 self.sa_4(f4) * f4f4 self.Translayer_4(f4)f41 self.sdi_4([f1, f2, f3, f4], f4)f31 self.sdi_3([f1, f2, f3, f4], f3)f21 self.sdi_2([f1, f2, f3, f4], f2)f11 self.sdi_1([f1, f2, f3, f4], f1)seg_outs.append(self.seg_outs[0](f41))y self.deconv2(f41) f31seg_outs.append(self.seg_outs[1](y))y self.deconv3(y) f21seg_outs.append(self.seg_outs[2](y))y self.deconv4(y) f11seg_outs.append(self.seg_outs[3](y))for i, o in enumerate(seg_outs):seg_outs[i] F.interpolate(o, scale_factor4, modebilinear)if self.deep_supervision:return seg_outs[::-1]else:return seg_outs[-1]class Mlp(nn.Module):def __init__(self, in_features, hidden_featuresNone, out_featuresNone, act_layernn.GELU, drop0.):super().__init__()out_features out_features or in_featureshidden_features hidden_features or in_featuresself.fc1 nn.Linear(in_features, hidden_features)self.dwconv DWConv(hidden_features)self.act act_layer()self.fc2 nn.Linear(hidden_features, out_features)self.drop nn.Dropout(drop)self.apply(self._init_weights)def _init_weights(self, m):if isinstance(m, nn.Linear):trunc_normal_(m.weight, std.02)if isinstance(m, nn.Linear) and m.bias is not None:nn.init.constant_(m.bias, 0)elif isinstance(m, nn.LayerNorm):nn.init.constant_(m.bias, 0)nn.init.constant_(m.weight, 1.0)elif isinstance(m, nn.Conv2d):fan_out m.kernel_size[0] * m.kernel_size[1] * m.out_channelsfan_out // m.groupsm.weight.data.normal_(0, math.sqrt(2.0 / fan_out))if m.bias is not None:m.bias.data.zero_()def forward(self, x, H, W):x self.fc1(x)x self.dwconv(x, H, W)x self.act(x)x self.drop(x)x self.fc2(x)x self.drop(x)return xclass Attention(nn.Module):def __init__(self, dim, num_heads8, qkv_biasFalse, qk_scaleNone, attn_drop0., proj_drop0., sr_ratio1):super().__init__()assert dim % num_heads 0, fdim {dim} should be divided by num_heads {num_heads}.self.dim dimself.num_heads num_headshead_dim dim // num_headsself.scale qk_scale or head_dim ** -0.5self.q nn.Linear(dim, dim, biasqkv_bias)self.kv nn.Linear(dim, dim * 2, biasqkv_bias)self.attn_drop nn.Dropout(attn_drop)self.proj nn.Linear(dim, dim)self.proj_drop nn.Dropout(proj_drop)self.sr_ratio sr_ratioif sr_ratio 1:self.sr nn.Conv2d(dim, dim, kernel_sizesr_ratio, stridesr_ratio)self.norm nn.LayerNorm(dim)self.apply(self._init_weights)def _init_weights(self, m):if isinstance(m, nn.Linear):trunc_normal_(m.weight, std.02)if isinstance(m, nn.Linear) and m.bias is not None:nn.init.constant_(m.bias, 0)elif isinstance(m, nn.LayerNorm):nn.init.constant_(m.bias, 0)nn.init.constant_(m.weight, 1.0)elif isinstance(m, nn.Conv2d):fan_out m.kernel_size[0] * m.kernel_size[1] * m.out_channelsfan_out // m.groupsm.weight.data.normal_(0, math.sqrt(2.0 / fan_out))if m.bias is not None:m.bias.data.zero_()def forward(self, x, H, W):B, N, C x.shapeq self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)if self.sr_ratio 1:x_ x.permute(0, 2, 1).reshape(B, C, H, W)x_ self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)x_ self.norm(x_)kv self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)else:kv self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)k, v kv[0], kv[1]attn (q k.transpose(-2, -1)) * self.scaleattn attn.softmax(dim-1)attn self.attn_drop(attn)x (attn v).transpose(1, 2).reshape(B, N, C)x self.proj(x)x self.proj_drop(x)return xclass Block(nn.Module):def __init__(self, dim, num_heads, mlp_ratio4., qkv_biasFalse, qk_scaleNone, drop0., attn_drop0.,drop_path0., act_layernn.GELU, norm_layernn.LayerNorm, sr_ratio1):super().__init__()self.norm1 norm_layer(dim)self.attn Attention(dim,num_headsnum_heads, qkv_biasqkv_bias, qk_scaleqk_scale,attn_dropattn_drop, proj_dropdrop, sr_ratiosr_ratio)# NOTE: drop path for stochastic depth, we shall see if this is better than dropout hereself.drop_path DropPath(drop_path) if drop_path 0. else nn.Identity()self.norm2 norm_layer(dim)mlp_hidden_dim int(dim * mlp_ratio)self.mlp Mlp(in_featuresdim, hidden_featuresmlp_hidden_dim, act_layeract_layer, dropdrop)self.apply(self._init_weights)def _init_weights(self, m):if isinstance(m, nn.Linear):trunc_normal_(m.weight, std.02)if isinstance(m, nn.Linear) and m.bias is not None:nn.init.constant_(m.bias, 0)elif isinstance(m, nn.LayerNorm):nn.init.constant_(m.bias, 0)nn.init.constant_(m.weight, 1.0)elif isinstance(m, nn.Conv2d):fan_out m.kernel_size[0] * m.kernel_size[1] * m.out_channelsfan_out // m.groupsm.weight.data.normal_(0, math.sqrt(2.0 / fan_out))if m.bias is not None:m.bias.data.zero_()def forward(self, x, H, W):x x self.drop_path(self.attn(self.norm1(x), H, W))x x self.drop_path(self.mlp(self.norm2(x), H, W))return xclass OverlapPatchEmbed(nn.Module): Image to Patch Embeddingdef __init__(self, img_size224, patch_size7, stride4, in_chans3, embed_dim768):super().__init__()img_size to_2tuple(img_size)patch_size to_2tuple(patch_size)self.img_size img_sizeself.patch_size patch_sizeself.H, self.W img_size[0] // patch_size[0], img_size[1] // patch_size[1]self.num_patches self.H * self.Wself.proj nn.Conv2d(in_chans, embed_dim, kernel_sizepatch_size, stridestride,padding(patch_size[0] // 2, patch_size[1] // 2))self.norm nn.LayerNorm(embed_dim)self.apply(self._init_weights)def _init_weights(self, m):if isinstance(m, nn.Linear):trunc_normal_(m.weight, std.02)if isinstance(m, nn.Linear) and m.bias is not None:nn.init.constant_(m.bias, 0)elif isinstance(m, nn.LayerNorm):nn.init.constant_(m.bias, 0)nn.init.constant_(m.weight, 1.0)elif isinstance(m, nn.Conv2d):fan_out m.kernel_size[0] * m.kernel_size[1] * m.out_channelsfan_out // m.groupsm.weight.data.normal_(0, math.sqrt(2.0 / fan_out))if m.bias is not None:m.bias.data.zero_()def forward(self, x):x self.proj(x)_, _, H, W x.shapex x.flatten(2).transpose(1, 2)x self.norm(x)return x, H, Wclass PyramidVisionTransformerImpr(nn.Module):def __init__(self, img_size224, patch_size16, in_chans3, num_classes1000, embed_dims[64, 128, 256, 512],num_heads[1, 2, 4, 8], mlp_ratios[4, 4, 4, 4], qkv_biasFalse, qk_scaleNone, drop_rate0.,attn_drop_rate0., drop_path_rate0., norm_layernn.LayerNorm,depths[3, 4, 6, 3], sr_ratios[8, 4, 2, 1]):super().__init__()self.num_classes num_classesself.depths depths# patch_embedself.patch_embed1 OverlapPatchEmbed(img_sizeimg_size, patch_size7, stride4, in_chansin_chans,embed_dimembed_dims[0])self.patch_embed2 OverlapPatchEmbed(img_sizeimg_size // 4, patch_size3, stride2, in_chansembed_dims[0],embed_dimembed_dims[1])self.patch_embed3 OverlapPatchEmbed(img_sizeimg_size // 8, patch_size3, stride2, in_chansembed_dims[1],embed_dimembed_dims[2])self.patch_embed4 OverlapPatchEmbed(img_sizeimg_size // 16, patch_size3, stride2, in_chansembed_dims[2],embed_dimembed_dims[3])# transformer encoderdpr [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rulecur 0self.block1 nn.ModuleList([Block(dimembed_dims[0], num_headsnum_heads[0], mlp_ratiomlp_ratios[0], qkv_biasqkv_bias, qk_scaleqk_scale,dropdrop_rate, attn_dropattn_drop_rate, drop_pathdpr[cur i], norm_layernorm_layer,sr_ratiosr_ratios[0])for i in range(depths[0])])self.norm1 norm_layer(embed_dims[0])cur depths[0]self.block2 nn.ModuleList([Block(dimembed_dims[1], num_headsnum_heads[1], mlp_ratiomlp_ratios[1], qkv_biasqkv_bias, qk_scaleqk_scale,dropdrop_rate, attn_dropattn_drop_rate, drop_pathdpr[cur i], norm_layernorm_layer,sr_ratiosr_ratios[1])for i in range(depths[1])])self.norm2 norm_layer(embed_dims[1])cur depths[1]self.block3 nn.ModuleList([Block(dimembed_dims[2], num_headsnum_heads[2], mlp_ratiomlp_ratios[2], qkv_biasqkv_bias, qk_scaleqk_scale,dropdrop_rate, attn_dropattn_drop_rate, drop_pathdpr[cur i], norm_layernorm_layer,sr_ratiosr_ratios[2])for i in range(depths[2])])self.norm3 norm_layer(embed_dims[2])cur depths[2]self.block4 nn.ModuleList([Block(dimembed_dims[3], num_headsnum_heads[3], mlp_ratiomlp_ratios[3], qkv_biasqkv_bias, qk_scaleqk_scale,dropdrop_rate, attn_dropattn_drop_rate, drop_pathdpr[cur i], norm_layernorm_layer,sr_ratiosr_ratios[3])for i in range(depths[3])])self.norm4 norm_layer(embed_dims[3])# classification head# self.head nn.Linear(embed_dims[3], num_classes) if num_classes 0 else nn.Identity()self.apply(self._init_weights)self.width_list [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]def _init_weights(self, m):if isinstance(m, nn.Linear):trunc_normal_(m.weight, std.02)if isinstance(m, nn.Linear) and m.bias is not None:nn.init.constant_(m.bias, 0)elif isinstance(m, nn.LayerNorm):nn.init.constant_(m.bias, 0)nn.init.constant_(m.weight, 1.0)elif isinstance(m, nn.Conv2d):fan_out m.kernel_size[0] * m.kernel_size[1] * m.out_channelsfan_out // m.groupsm.weight.data.normal_(0, math.sqrt(2.0 / fan_out))if m.bias is not None:m.bias.data.zero_()def init_weights(self, pretrainedNone):if isinstance(pretrained, str):logger 1#load_checkpoint(self, pretrained, map_locationcpu, strictFalse, loggerlogger)def reset_drop_path(self, drop_path_rate):dpr [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]cur 0for i in range(self.depths[0]):self.block1[i].drop_path.drop_prob dpr[cur i]cur self.depths[0]for i in range(self.depths[1]):self.block2[i].drop_path.drop_prob dpr[cur i]cur self.depths[1]for i in range(self.depths[2]):self.block3[i].drop_path.drop_prob dpr[cur i]cur self.depths[2]for i in range(self.depths[3]):self.block4[i].drop_path.drop_prob dpr[cur i]def freeze_patch_emb(self):self.patch_embed1.requires_grad Falsetorch.jit.ignoredef no_weight_decay(self):return {pos_embed1, pos_embed2, pos_embed3, pos_embed4, cls_token} # has pos_embed may be betterdef get_classifier(self):return self.headdef reset_classifier(self, num_classes, global_pool):self.num_classes num_classesself.head nn.Linear(self.embed_dim, num_classes) if num_classes 0 else nn.Identity()# def _get_pos_embed(self, pos_embed, patch_embed, H, W):# if H * W self.patch_embed1.num_patches:# return pos_embed# else:# return F.interpolate(# pos_embed.reshape(1, patch_embed.H, patch_embed.W, -1).permute(0, 3, 1, 2),# size(H, W), modebilinear).reshape(1, -1, H * W).permute(0, 2, 1)def forward_features(self, x):B x.shape[0]outs []# stage 1x, H, W self.patch_embed1(x)for i, blk in enumerate(self.block1):x blk(x, H, W)x self.norm1(x)x x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()outs.append(x)# stage 2x, H, W self.patch_embed2(x)for i, blk in enumerate(self.block2):x blk(x, H, W)x self.norm2(x)x x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()outs.append(x)# stage 3x, H, W self.patch_embed3(x)for i, blk in enumerate(self.block3):x blk(x, H, W)x self.norm3(x)x x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()outs.append(x)# stage 4x, H, W self.patch_embed4(x)for i, blk in enumerate(self.block4):x blk(x, H, W)x self.norm4(x)x x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()outs.append(x)return outs# return x.mean(dim1)def forward(self, x):x self.forward_features(x)# x self.head(x)return xclass DWConv(nn.Module):def __init__(self, dim768):super(DWConv, self).__init__()self.dwconv nn.Conv2d(dim, dim, 3, 1, 1, biasTrue, groupsdim)def forward(self, x, H, W):B, N, C x.shapex x.transpose(1, 2).view(B, C, H, W)x self.dwconv(x)x x.flatten(2).transpose(1, 2)return xdef _conv_filter(state_dict, patch_size16): convert patch embedding weight from manual patchify linear proj to convout_dict {}for k, v in state_dict.items():if patch_embed.proj.weight in k:v v.reshape((v.shape[0], 3, patch_size, patch_size))out_dict[k] vreturn out_dictclass pvt_v2_b0(PyramidVisionTransformerImpr):def __init__(self, **kwargs):super(pvt_v2_b0, self).__init__(patch_size4, embed_dims[32, 64, 160, 256], num_heads[1, 2, 5, 8], mlp_ratios[8, 8, 4, 4],qkv_biasTrue, norm_layerpartial(nn.LayerNorm, eps1e-6), depths[2, 2, 2, 2], sr_ratios[8, 4, 2, 1],drop_rate0.0, drop_path_rate0.1)class pvt_v2_b1(PyramidVisionTransformerImpr):def __init__(self, **kwargs):super(pvt_v2_b1, self).__init__(patch_size4, embed_dims[64, 128, 320, 512], num_heads[1, 2, 5, 8], mlp_ratios[8, 8, 4, 4],qkv_biasTrue, norm_layerpartial(nn.LayerNorm, eps1e-6), depths[2, 2, 2, 2], sr_ratios[8, 4, 2, 1],drop_rate0.0, drop_path_rate0.1)class pvt_v2_b2(PyramidVisionTransformerImpr):def __init__(self, **kwargs):super(pvt_v2_b2, self).__init__(patch_size4, embed_dims[64, 128, 320, 512], num_heads[1, 2, 5, 8], mlp_ratios[8, 8, 4, 4],qkv_biasTrue, norm_layerpartial(nn.LayerNorm, eps1e-6), depths[3, 4, 6, 3], sr_ratios[8, 4, 2, 1],drop_rate0.0, drop_path_rate0.1)class pvt_v2_b3(PyramidVisionTransformerImpr):def __init__(self, **kwargs):super(pvt_v2_b3, self).__init__(patch_size4, embed_dims[64, 128, 320, 512], num_heads[1, 2, 5, 8], mlp_ratios[8, 8, 4, 4],qkv_biasTrue, norm_layerpartial(nn.LayerNorm, eps1e-6), depths[3, 4, 18, 3], sr_ratios[8, 4, 2, 1],drop_rate0.0, drop_path_rate0.1)class pvt_v2_b4(PyramidVisionTransformerImpr):def __init__(self, **kwargs):super(pvt_v2_b4, self).__init__(patch_size4, embed_dims[64, 128, 320, 512], num_heads[1, 2, 5, 8], mlp_ratios[8, 8, 4, 4],qkv_biasTrue, norm_layerpartial(nn.LayerNorm, eps1e-6), depths[3, 8, 27, 3], sr_ratios[8, 4, 2, 1],drop_rate0.0, drop_path_rate0.1)class pvt_v2_b5(PyramidVisionTransformerImpr):def __init__(self, **kwargs):super(pvt_v2_b5, self).__init__(patch_size4, embed_dims[64, 128, 320, 512], num_heads[1, 2, 5, 8], mlp_ratios[4, 4, 4, 4],qkv_biasTrue, norm_layerpartial(nn.LayerNorm, eps1e-6), depths[3, 6, 40, 3], sr_ratios[8, 4, 2, 1],drop_rate0.0, drop_path_rate0.1) 2.2 步骤二 在task.py导入我们的模块 from .modules.UNetV2 import * 2.3 步骤三 按下图所示进行修改在task.py的parse_model方法中 2.4 步骤四 在task.py的parse_model方法中添加如下代码 elif m in {pvt_v2_b0, pvt_v2_b1, pvt_v2_b2, pvt_v2_b3, pvt_v2_b4, pvt_v2_b5}:m m(*args)c2 m.width_list backbone True2.5 步骤五 在task.py的parse_model方法中对如下代码进行修改修改为如图所示 if isinstance(c2, list):m_ mm_.backbone Trueelse:m_ nn.Sequential(*(m(*args) for _ in range(n))) if n 1 else m(*args) # modulet str(m)[8:-2].replace(__main__., ) # module typem.np sum(x.numel() for x in m_.parameters()) # number paramsm_.i, m_.f, m_.type i 4 if backbone else i, f, t # attach index, from index, type 2.6 步骤六 在task.py的parse_model方法中对如下代码进行修改修改为如图所示在上图的紧邻后方 2.7 步骤七 在task.py的BaseModel类中对如下代码进行修改修改为如图所示 def _predict_once(self, x, profileFalse, visualizeFalse, embedNone):Perform a forward pass through the network.Args:x (torch.Tensor): The input tensor to the model.profile (bool): Print the computation time of each layer if True, defaults to False.visualize (bool): Save the feature maps of the model if True, defaults to False.embed (list, optional): A list of feature vectors/embeddings to return.Returns:(torch.Tensor): The last output of the model.y, dt, embeddings [], [], [] # outputsfor m in self.model:if m.f ! -1: # if not from previous layerx y[m.f] if isinstance(m.f, int) else [x if j -1 else y[j] for j in m.f] # from earlier layersif profile:self._profile_one_layer(m, x, dt)if hasattr(m, backbone):x m(x)if len(x) ! 5: # 0 - 5x.insert(0, None)for index, i in enumerate(x):if index in self.save:y.append(i)else:y.append(None)x x[-1] # 最后一个输出传给下一层else:x m(x) # runy.append(x if m.i in self.save else None) # save outputif visualize:feature_visualization(x, m.type, m.i, save_dirvisualize)if embed and m.i in embed:embeddings.append(nn.functional.adaptive_avg_pool2d(x, (1, 1)).squeeze(-1).squeeze(-1)) # flattenif m.i max(embed):return torch.unbind(torch.cat(embeddings, 1), dim0)return x 2.8 步骤八 在task.py的大概200多行detectionmodel中对如下参数进修改为640 2.9 步骤九 额外的步骤-如果计算量打印不下来可以找到utils/torch_utils.py,修改以下参数值 注意这个步骤比较多容易搞错修改过程一定要多检查  到此修改完成复制下面的yaml文件运行即可 yaml文件 # Ultralytics YOLO , AGPL-3.0 license # YOLOv8 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect# Parameters nc: 80 # number of classes scales: # model compound scaling constants, i.e. modelyolov8n.yaml will call yolov8.yaml with scale n# [depth, width, max_channels]n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPss: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPsm: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPsl: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPsx: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs# YOLOv8.0n backbone backbone:# [from, repeats, module, args]- [-1, 1, pvt_v2_b1, []] # 4- [-1, 1, SPPF, [1024, 5]] # 5# YOLOv8.0n head head:- [-1, 1, nn.Upsample, [None, 2, nearest]] # 6- [[-1, 3], 1, Concat, [1]] # 7 cat backbone P4- [-1, 3, C2f, [512]] # 8- [-1, 1, nn.Upsample, [None, 2, nearest]] # 9- [[-1, 2], 1, Concat, [1]] # 10 cat backbone P3- [-1, 3, C2f, [256]] # 11 (P3/8-small)- [-1, 1, Conv, [256, 3, 2]] # 12- [[-1, 8], 1, Concat, [1]] # 13 cat head P4- [-1, 3, C2f, [512]] # 14 (P4/16-medium)- [-1, 1, Conv, [512, 3, 2]] # 15- [[-1, 5], 1, Concat, [1]] # 16 cat head P5- [-1, 3, C2f, [1024]] # 17 (P5/32-large)- [[11, 14, 17], 1, Detect, [nc]] # Detect(P3, P4, P5) # 关于主干网络大家可以自行替换数据集不同效果不同 不知不觉已经看完了哦动动小手留个点赞吧--_--
http://www.zqtcl.cn/news/912405/

相关文章:

  • 泉州网站设计哪家公司好沈阳seo代理计费
  • 做景观素材有哪几个网站国内建网站费用
  • 驻马店重点项目建设网站wordpress常规选项
  • 网站开发 英文网站策划建设阶段的推广
  • 建立网站一般多少钱wordpress评论跳过验证
  • 南京每月做社保明细在哪个网站查看设计作品的网站软件
  • html怎么做网站如何在腾讯云上网站建设
  • 网站建设怎么链接表格手机做外贸有什么好的网站
  • 深圳开发网站建设哪家好外贸网络营销培训
  • 广州智迅网络做网站免费下载ps素材网站
  • 什么网站时候做伪静态开发软件定制
  • 找人做网站 多少钱西宁市公司网站建设
  • 网页设计 教程网站找权重高的网站方法
  • 网站建设本地还是外地重庆seo排名方法
  • 那个网站做网编好昨晚兰州发生了什么事
  • 温州建设局网站首页哪里可以学做资料员的网站
  • 网站怎样在360做优化wordpress文章图片在线裁剪
  • 彭州建设网站建设网站哪间公司比较好
  • qq空间网站根目录慧聪网首页
  • 制作小程序和网站的公司杭州品牌设计公司
  • 显示网站翻页代码wordpress 金融 模板下载
  • 用双语网站做seo会不会phpmysql网站
  • 长沙专业网站建设公司优惠券怎么做自己的网站
  • 做网站如何宣传怎么弄公众号
  • seo网站策划书网站建设资金投入
  • 做网站东莞东莞建网站wordpress 多文件上传
  • 公司注册流程聊城网站优化案例
  • 化妆品网站建设实施方案杭州seo代理公司
  • 网站小图片素材高质量外链
  • 福州个人建站模板有没有一些有试卷做的网站