csdn如何写博客

csdn写博客的步骤

  1. 好好的写
  2. 慢慢的写
  3. 认真的写

#仅仅展示enet 注意点:代码段的展示

import torch.nn as nn
import torch
from torchsummary import summary

class InitialBlock(nn.Module):
def init(self,
in_channels,
out_channels,
bias=False,
relu=True):
super().init()

    if relu:
        activation = nn.ReLU
    else:
        activation = nn.PReLU

    self.main_branch = nn.Conv2d(
        in_channels,
        out_channels - 3,
        kernel_size=3,
        stride=2,
        padding=1,
        bias=bias)

    # Extension branch
    self.ext_branch = nn.MaxPool2d(3, stride=2, padding=1)

    # Initialize batch normalization to be used after concatenation
    self.batch_norm = nn.BatchNorm2d(out_channels)

    # PReLU layer to apply after concatenating the branches
    self.out_activation = activation()

def forward(self, x):
    main = self.main_branch(x)
    ext = self.ext_branch(x)

    # Concatenate branches
    out = torch.cat((main, ext), 1)

    # Apply batch normalization
    out = self.batch_norm(out)

    return self.out_activation(out)

class RegularBottleneck(nn.Module):
def init(self,
channels,
internal_ratio=4,
kernel_size=3,
padding=0,
dilation=1,
asymmetric=False,
dropout_prob=0,
bias=False,
relu=True):
super().init()

    internal_channels = channels // internal_ratio

    if relu:
        activation = nn.ReLU
    else:
        activation = nn.PReLU

    # 1x1 projection convolution
    self.ext_conv1 = nn.Sequential(
        nn.Conv2d(
            channels,
            internal_channels,
            kernel_size=1,
            stride=1,
            bias=bias), nn.BatchNorm2d(internal_channels), activation())

    if asymmetric:
        self.ext_conv2 = nn.Sequential(
            nn.Conv2d(
                internal_channels,
                internal_channels,
                kernel_size=(kernel_size, 1),
                stride=1,
                padding=(padding, 0),
                dilation=dilation,
                bias=bias), nn.BatchNorm2d(internal_channels), activation(),
            nn.Conv2d(
                internal_channels,
                internal_channels,
                kernel_size=(1, kernel_size),
                stride=1,
                padding=(0, padding),
                dilation=dilation,
                bias=bias), nn.BatchNorm2d(internal_channels), activation())
    else:
        self.ext_conv2 = nn.Sequential(
            nn.Conv2d(
                internal_channels,
                internal_channels,
                kernel_size=kernel_size,
                stride=1,
                padding=padding,
                dilation=dilation,
                bias=bias), nn.BatchNorm2d(internal_channels), activation())

    # 1x1 expansion convolution
    self.ext_conv3 = nn.Sequential(
        nn.Conv2d(
            internal_channels,
            channels,
            kernel_size=1,
            stride=1,
            bias=bias), nn.BatchNorm2d(channels), activation())

    self.ext_regul = nn.Dropout2d(p=dropout_prob)

    # PReLU layer to apply after adding the branches
    self.out_activation = activation()

def forward(self, x):
    # Main branch shortcut
    main = x

    # Extension branch
    ext = self.ext_conv1(x)
    ext = self.ext_conv2(ext)
    ext = self.ext_conv3(ext)
    ext = self.ext_regul(ext)

    # Add main and extension branches
    out = main + ext

    return self.out_activation(out)

class DownsamplingBottleneck(nn.Module):
def init(self,
in_channels,
out_channels,
internal_ratio=4,
return_indices=False,
dropout_prob=0,
bias=False,
relu=True):
super().init()

    # Store parameters that are needed later
    self.return_indices = return_indices

    internal_channels = in_channels // internal_ratio

    if relu:
        activation = nn.ReLU
    else:
        activation = nn.PReLU

    # Main branch - max pooling followed by feature map (channels) padding
    self.main_max1 = nn.MaxPool2d(
        2,
        stride=2,
        return_indices=return_indices)

    # 2x2 projection convolution with stride 2
    self.ext_conv1 = nn.Sequential(
        nn.Conv2d(
            in_channels,
            internal_channels,
            kernel_size=2,
            stride=2,
            bias=bias), nn.BatchNorm2d(internal_channels), activation())

    # Convolution
    self.ext_conv2 = nn.Sequential(
        nn.Conv2d(
            internal_channels,
            internal_channels,
            kernel_size=3,
            stride=1,
            padding=1,
            bias=bias), nn.BatchNorm2d(internal_channels), activation())

    # 1x1 expansion convolution
    self.ext_conv3 = nn.Sequential(
        nn.Conv2d(
            internal_channels,
            out_channels,
            kernel_size=1,
            stride=1,
            bias=bias), nn.BatchNorm2d(out_channels), activation())

    self.ext_regul = nn.Dropout2d(p=dropout_prob)

    # PReLU layer to apply after concatenating the branches
    self.out_activation = activation()

def forward(self, x):
    # Main branch shortcut
    if self.return_indices:
        main, max_indices = self.main_max1(x)
    else:
        main = self.main_max1(x)

    # Extension branch
    ext = self.ext_conv1(x)
    ext = self.ext_conv2(ext)
    ext = self.ext_conv3(ext)
    ext = self.ext_regul(ext)

    # Main branch channel padding
    n, ch_ext, h, w = ext.size()
    ch_main = main.size()[1]
    padding = torch.zeros(n, ch_ext - ch_main, h, w)

    # Before concatenating, check if main is on the CPU or GPU and
    # convert padding accordingly
    if main.is_cuda:
        padding = padding.cuda()

    # Concatenate
    main = torch.cat((main, padding), 1)

    # Add main and extension branches
    out = main + ext

    return self.out_activation(out), max_indices

class UpsamplingBottleneck(nn.Module):

def __init__(self,
             in_channels,
             out_channels,
             internal_ratio=4,
             dropout_prob=0,
             bias=False,
             relu=True):
    super().__init__()

    internal_channels = in_channels // internal_ratio

    if relu:
        activation = nn.ReLU
    else:
        activation = nn.PReLU

    # Main branch - max pooling followed by feature map (channels) padding
    self.main_conv1 = nn.Sequential(
        nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias),
        nn.BatchNorm2d(out_channels))

    # Remember that the stride is the same as the kernel_size, just like
    # the max pooling layers
    self.main_unpool1 = nn.MaxUnpool2d(kernel_size=2)

    # 1x1 projection convolution with stride 1
    self.ext_conv1 = nn.Sequential(
        nn.Conv2d(
            in_channels, internal_channels, kernel_size=1, bias=bias),
        nn.BatchNorm2d(internal_channels), activation())

    # Transposed convolution
    self.ext_tconv1 = nn.ConvTranspose2d(
        internal_channels,
        internal_channels,
        kernel_size=2,
        stride=2,
        bias=bias)
    self.ext_tconv1_bnorm = nn.BatchNorm2d(internal_channels)
    self.ext_tconv1_activation = activation()

    # 1x1 expansion convolution
    self.ext_conv2 = nn.Sequential(
        nn.Conv2d(
            internal_channels, out_channels, kernel_size=1, bias=bias),
        nn.BatchNorm2d(out_channels), activation())

    self.ext_regul = nn.Dropout2d(p=dropout_prob)

    # PReLU layer to apply after concatenating the branches
    self.out_activation = activation()

def forward(self, x, max_indices, output_size):
    # Main branch shortcut
    main = self.main_conv1(x)
    main = self.main_unpool1(
        main, max_indices, output_size=output_size)

    # Extension branch
    ext = self.ext_conv1(x)
    ext = self.ext_tconv1(ext, output_size=output_size)
    ext = self.ext_tconv1_bnorm(ext)
    ext = self.ext_tconv1_activation(ext)
    ext = self.ext_conv2(ext)
    ext = self.ext_regul(ext)

    # Add main and extension branches
    out = main + ext

    return self.out_activation(out)

class ENet(nn.Module):
def init(self, n_classes, encoder_relu=False, decoder_relu=True):
super().init()

    self.initial_block = InitialBlock(3, 16, relu=encoder_relu)

    # Stage 1 - Encoder
    self.downsample1_0 = DownsamplingBottleneck(
        16,
        64,
        return_indices=True,
        dropout_prob=0.01,
        relu=encoder_relu)
    self.regular1_1 = RegularBottleneck(
        64, padding=1, dropout_prob=0.01, relu=encoder_relu)
    self.regular1_2 = RegularBottleneck(
        64, padding=1, dropout_prob=0.01, relu=encoder_relu)
    self.regular1_3 = RegularBottleneck(
        64, padding=1, dropout_prob=0.01, relu=encoder_relu)
    self.regular1_4 = RegularBottleneck(
        64, padding=1, dropout_prob=0.01, relu=encoder_relu)

    # Stage 2 - Encoder
    self.downsample2_0 = DownsamplingBottleneck(
        64,
        128,
        return_indices=True,
        dropout_prob=0.1,
        relu=encoder_relu)
    self.regular2_1 = RegularBottleneck(
        128, padding=1, dropout_prob=0.1, relu=encoder_relu)
    self.dilated2_2 = RegularBottleneck(
        128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)
    self.asymmetric2_3 = RegularBottleneck(
        128,
        kernel_size=5,
        padding=2,
        asymmetric=True,
        dropout_prob=0.1,
        relu=encoder_relu)
    self.dilated2_4 = RegularBottleneck(
        128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)
    self.regular2_5 = RegularBottleneck(
        128, padding=1, dropout_prob=0.1, relu=encoder_relu)
    self.dilated2_6 = RegularBottleneck(
        128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)
    self.asymmetric2_7 = RegularBottleneck(
        128,
        kernel_size=5,
        asymmetric=True,
        padding=2,
        dropout_prob=0.1,
        relu=encoder_relu)
    self.dilated2_8 = RegularBottleneck(
        128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)

    # Stage 3 - Encoder
    self.regular3_0 = RegularBottleneck(
        128, padding=1, dropout_prob=0.1, relu=encoder_relu)
    self.dilated3_1 = RegularBottleneck(
        128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)
    self.asymmetric3_2 = RegularBottleneck(
        128,
        kernel_size=5,
        padding=2,
        asymmetric=True,
        dropout_prob=0.1,
        relu=encoder_relu)
    self.dilated3_3 = RegularBottleneck(
        128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)
    self.regular3_4 = RegularBottleneck(
        128, padding=1, dropout_prob=0.1, relu=encoder_relu)
    self.dilated3_5 = RegularBottleneck(
        128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)
    self.asymmetric3_6 = RegularBottleneck(
        128,
        kernel_size=5,
        asymmetric=True,
        padding=2,
        dropout_prob=0.1,
        relu=encoder_relu)
    self.dilated3_7 = RegularBottleneck(
        128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)

    # Stage 4 - Decoder
    self.upsample4_0 = UpsamplingBottleneck(
        128, 64, dropout_prob=0.1, relu=decoder_relu)
    self.regular4_1 = RegularBottleneck(
        64, padding=1, dropout_prob=0.1, relu=decoder_relu)
    self.regular4_2 = RegularBottleneck(
        64, padding=1, dropout_prob=0.1, relu=decoder_relu)

    # Stage 5 - Decoder
    self.upsample5_0 = UpsamplingBottleneck(
        64, 16, dropout_prob=0.1, relu=decoder_relu)
    self.regular5_1 = RegularBottleneck(
        16, padding=1, dropout_prob=0.1, relu=decoder_relu)
    self.transposed_conv = nn.ConvTranspose2d(
        16,
        n_classes,
        kernel_size=3,
        stride=2,
        padding=1,
        bias=False)

def forward(self, x):
    # Initial block
    input_size = x.size()
    x = self.initial_block(x)

    # Stage 1-Encoder
    stage1_input_size = x.size()
    x, max_indices1_0 = self.downsample1_0(x)
    x = self.regular1_1(x)
    x = self.regular1_2(x)
    x = self.regular1_3(x)
    x = self.regular1_4(x)

    # Stage2 -Encoder
    stage2_input_size = x.size()
    x, max_indices2_0 = self.downsample2_0(x)
    x = self.regular2_1(x)
    x = self.dilated2_2(x)
    x = self.asymmetric2_3(x)
    x = self.dilated2_4(x)
    x = self.regular2_5(x)
    x = self.dilated2_6(x)
    x = self.asymmetric2_7(x)
    x = self.dilated2_8(x)

    # Stage3-Encoder
    x = self.regular3_0(x)
    x = self.dilated3_1(x)
    x = self.asymmetric3_2(x)
    x = self.dilated3_3(x)
    x = self.regular3_4(x)
    x = self.dilated3_5(x)
    x = self.asymmetric3_6(x)
    x = self.dilated3_7(x)

    # Stage4 -Decoder
    x = self.upsample4_0(x, max_indices2_0, output_size=stage2_input_size)
    x = self.regular4_1(x)
    x = self.regular4_2(x)

    # Stage5-Decoder
    x = self.upsample5_0(x, max_indices1_0, output_size=stage1_input_size)
    x = self.regular5_1(x)
    x = self.transposed_conv(x, output_size=input_size)

    return x

#模型的调试
if name == ‘main’:
inputs = torch.randn((1, 3, 512, 512))
model = ENet(n_classes=4)
out = model(inputs)
print(out.size())
# 需要使用device来指定网络在GPU还是CPU运行
device = torch.device(‘cuda’ if torch.cuda.is_available() else ‘cpu’)
model = ENet(n_classes=4).to(device)
summary(model, input_size=(3, 64, 64))

#模型图片(不是enet)
在这里插入图片描述

新的改变

我们对Markdown编辑器进行了一些功能拓展与语法支持,除了标准的Markdown编辑器功能,我们增加了如下几点新功能,帮助你用它写博客:

  1. 全新的界面设计 ,将会带来全新的写作体验;
  2. 在创作中心设置你喜爱的代码高亮样式,Markdown 将代码片显示选择的高亮样式 进行展示;
  3. 增加了 图片拖拽 功能,你可以将本地的图片直接拖拽到编辑区域直接展示;
  4. 全新的 KaTeX数学公式 语法;
  5. 增加了支持甘特图的mermaid语法1 功能;
  6. 增加了 多屏幕编辑 Markdown文章功能;
  7. 增加了 焦点写作模式、预览模式、简洁写作模式、左右区域同步滚轮设置 等功能,功能按钮位于编辑区域与预览区域中间;
  8. 增加了 检查列表 功能。

功能快捷键

撤销:Ctrl/Command + Z
重做:Ctrl/Command + Y
加粗:Ctrl/Command + B
斜体:Ctrl/Command + I
标题:Ctrl/Command + Shift + H
无序列表:Ctrl/Command + Shift + U
有序列表:Ctrl/Command + Shift + O
检查列表:Ctrl/Command + Shift + C
插入代码:Ctrl/Command + Shift + K
插入链接:Ctrl/Command + Shift + L
插入图片:Ctrl/Command + Shift + G
查找:Ctrl/Command + F
替换:Ctrl/Command + G

合理的创建标题,有助于目录的生成

直接输入1次#,并按下space后,将生成1级标题。
输入2次#,并按下space后,将生成2级标题。
以此类推,我们支持6级标题。有助于使用TOC语法后生成一个完美的目录。

如何改变文本的样式

强调文本 强调文本

加粗文本 加粗文本

标记文本

删除文本

引用文本

H2O is是液体。

210 运算结果是 1024.

插入链接与图片

链接: link.

图片: Alt

带尺寸的图片: Alt

居中的图片: Alt

居中并且带尺寸的图片: Alt

当然,我们为了让用户更加便捷,我们增加了图片拖拽功能。

如何插入一段漂亮的代码片

博客设置页面,选择一款你喜欢的代码片高亮样式,下面展示同样高亮的 代码片.

// An highlighted block
var foo = 'bar';

生成一个适合你的列表

  • 项目
    • 项目
      • 项目
  1. 项目1
  2. 项目2
  3. 项目3
  • 计划任务
  • 完成任务

创建一个表格

一个简单的表格是这么创建的:

项目Value
电脑$1600
手机$12
导管$1

设定内容居中、居左、居右

使用:---------:居中
使用:----------居左
使用----------:居右

第一列第二列第三列
第一列文本居中第二列文本居右第三列文本居左

SmartyPants

SmartyPants将ASCII标点字符转换为“智能”印刷标点HTML实体。例如:

TYPEASCIIHTML
Single backticks'Isn't this fun?'‘Isn’t this fun?’
Quotes"Isn't this fun?"“Isn’t this fun?”
Dashes-- is en-dash, --- is em-dash– is en-dash, — is em-dash

创建一个自定义列表

Markdown
Text-to- HTML conversion tool
Authors
John
Luke

如何创建一个注脚

一个具有注脚的文本。2

注释也是必不可少的

Markdown将文本转换为 HTML

KaTeX数学公式

您可以使用渲染LaTeX数学表达式 KaTeX:

Gamma公式展示 Γ ( n ) = ( n − 1 ) ! ∀ n ∈ N \Gamma(n) = (n-1)!\quad\forall n\in\mathbb N Γ(n)=(n1)!nN 是通过欧拉积分

Γ ( z ) = ∫ 0 ∞ t z − 1 e − t d t   . \Gamma(z) = \int_0^\infty t^{z-1}e^{-t}dt\,. Γ(z)=0tz1etdt.

你可以找到更多关于的信息 LaTeX 数学表达式here.

新的甘特图功能,丰富你的文章

Mon 06 Mon 13 Mon 20 已完成 进行中 计划一 计划二 现有任务 Adding GANTT diagram functionality to mermaid
  • 关于 甘特图 语法,参考 这儿,

UML 图表

可以使用UML图表进行渲染。 Mermaid. 例如下面产生的一个序列图:

张三 李四 王五 你好!李四, 最近怎么样? 你最近怎么样,王五? 我很好,谢谢! 我很好,谢谢! 李四想了很长时间, 文字太长了 不适合放在一行. 打量着王五... 很好... 王五, 你怎么样? 张三 李四 王五

这将产生一个流程图。:

链接
长方形
圆角长方形
菱形
  • 关于 Mermaid 语法,参考 这儿,

FLowchart流程图

我们依旧会支持flowchart的流程图:

Created with Raphaël 2.3.0 开始 我的操作 确认? 结束 yes no
  • 关于 Flowchart流程图 语法,参考 这儿.

导出与导入

导出

如果你想尝试使用此编辑器, 你可以在此篇文章任意编辑。当你完成了一篇文章的写作, 在上方工具栏找到 文章导出 ,生成一个.md文件或者.html文件进行本地保存。

导入

如果你想加载一篇你写过的.md文件,在上方工具栏可以选择导入功能进行对应扩展名的文件导入,
继续你的创作。


  1. mermaid语法说明 ↩︎

  2. 注脚的解释 ↩︎

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值