先自我介绍一下,小编浙江大学毕业,去过华为、字节跳动等大厂,目前阿里P7
深知大多数程序员,想要提升技能,往往是自己摸索成长,但自己不成体系的自学效果低效又漫长,而且极易碰到天花板技术停滞不前!
因此收集整理了一份《2024年最新大数据全套学习资料》,初衷也很简单,就是希望能够帮助到想自学提升又不知道该从何学起的朋友。
既有适合小白学习的零基础资料,也有适合3年以上经验的小伙伴深入学习提升的进阶课程,涵盖了95%以上大数据知识点,真正体系化!
由于文件比较多,这里只是将部分目录截图出来,全套包含大厂面经、学习笔记、源码讲义、实战项目、大纲路线、讲解视频,并且后续会持续更新
如果你需要这些资料,可以添加V获取:vip204888 (备注大数据)
正文
self.relu = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(out_channels, out_channels, 3, padding=1)
self.bn2 = nn.BatchNorm2d(out_channels)
def forward(self, x):
# b, 10, 8, 1024
batch_size = x.shape[0]
out1_1 = self.conv1_1(nn.functional.pad(x, (1, 1, 1, 1)))
out1_2 = self.conv1_2(nn.functional.pad(x, (1, 1, 0, 1)))#right interleaving padding
#out1\_2 = self.conv1\_2(nn.functional.pad(x, (1, 1, 1, 0)))#author's interleaving pading in github
out1_3 = self.conv1_3(nn.functional.pad(x, (0, 1, 1, 1)))#right interleaving padding
#out1\_3 = self.conv1\_3(nn.functional.pad(x, (1, 0, 1, 1)))#author's interleaving pading in github
out1_4 = self.conv1_4(nn.functional.pad(x, (0, 1, 0, 1)))#right interleaving padding
#out1\_4 = self.conv1\_4(nn.functional.pad(x, (1, 0, 1, 0)))#author's interleaving pading in github
out2_1 = self.conv2_1(nn.functional.pad(x, (1, 1, 1, 1)))
out2_2 = self.conv2_2(nn.functional.pad(x, (1, 1, 0, 1)))#right interleaving padding
#out2\_2 = self.conv2\_2(nn.functional.pad(x, (1, 1, 1, 0)))#author's interleaving pading in github
out2_3 = self.conv2_3(nn.functional.pad(x, (0, 1, 1, 1)))#right interleaving padding
#out2\_3 = self.conv2\_3(nn.functional.pad(x, (1, 0, 1, 1)))#author's interleaving pading in github
out2_4 = self.conv2_4(nn.functional.pad(x, (0, 1, 0, 1)))#right interleaving padding
#out2\_4 = self.conv2\_4(nn.functional.pad(x, (1, 0, 1, 0)))#author's interleaving pading in github
height = out1_1.size()[2]
width = out1_1.size()[3]
out1_1_2 = torch.stack((out1_1, out1_2), dim=-3).permute(0, 1, 3, 4, 2).contiguous().view(
batch_size, -1, height, width \* 2)
out1_3_4 = torch.stack((out1_3, out1_4), dim=-3).permute(0, 1, 3, 4, 2).contiguous().view(
batch_size, -1, height, width \* 2)
out1_1234 = torch.stack((out1_1_2, out1_3_4), dim=-3).permute(0, 1, 3, 2, 4).contiguous().view(
batch_size, -1, height \* 2, width \* 2)
out2_1_2 = torch.stack((out2_1, out2_2), dim=-3).permute(0, 1, 3, 4, 2).contiguous().view(
batch_size, -1, height, width \* 2)
out2_3_4 = torch.stack((out2_3, out2_4), dim=-3).permute(0, 1, 3, 4, 2).contiguous().view(
batch_size, -1, height, width \* 2)
out2_1234 = torch.stack((out2_1_2, out2_3_4), dim=-3).permute(0, 1, 3, 2, 4).contiguous().view(
batch_size, -1, height \* 2, width \* 2)
out1 = self.bn1_1(out1_1234)
out1 = self.relu(out1)
out1 = self.conv3(out1)
out1 = self.bn2(out1)
out2 = self.bn1_2(out2_1234)
out = out1 + out2
out = self.relu(out)
return out
#编码,下采样
class Fcrn_encode(nn.Module):
def __init__(self, dim=opt.dim):
super(Fcrn_encode, self).init()
self.dim = dim
self.conv_1 = nn.Conv2d(in_channels=3, out_channels=dim, kernel_size=3, stride=1, padding=1)
self.residual_block_1_down_1 = ResidualBlockClass(‘Detector.Res1’, 1*dim, 2*dim, resample=‘down’, activate=‘leaky_relu’)
# 128x128
self.residual_block_2_down_1 = ResidualBlockClass('Detector.Res2', 2\*dim, 4\*dim, resample='down', activate='leaky\_relu')
#64x64
self.residual_block_3_down_1 = ResidualBlockClass('Detector.Res3', 4\*dim, 4\*dim, resample='down', activate='leaky\_relu')
#32x32
self.residual_block_4_down_1 = ResidualBlockClass('Detector.Res4', 4\*dim, 6\*dim, resample='down', activate='leaky\_relu')
#16x16
self.residual_block_5_none_1 = ResidualBlockClass('Detector.Res5', 6\*dim, 6\*dim, resample=None, activate='leaky\_relu')
def forward(self, x, n1=0, n2=0, n3=0):
x1 = self.conv_1(x)#x1:dimx256x256
x2 = self.residual_block_1_down_1(x1)#x2:2dimx128x128
x3 = self.residual_block_2_down_1((1-opt.alpha)\*x2+opt.alpha\*n1)#x3:4dimx64x64
x4 = self.residual_block_3_down_1((1-opt.alpha)\*x3+opt.alpha\*n2)#x4:4dimx32x32
x = self.residual_block_4_down_1((1-opt.alpha)\*x4+opt.alpha\*n3)
feature = self.residual_block_5_none_1(x)
x = F.tanh(feature)
return x, x2, x3, x4
### 3.10、解码, 上采样
<