docker push 过程 distribution源码 分析

7 篇文章 0 订阅
4 篇文章 0 订阅

docker push 过程 distribution源码分析

承接上一篇“distribution structure and start up 分析”本文分析一下distribution在docker push时候的处理流程。所写内容为个人对distribution registry的理解,如有错误还请各位指出以便更正。
本文所涉及的存储信息是以本地文件系统为例进行分析说明。

在分析之前我先根据我的理解以本地存储为例对distribution的存储目录结构进行简单说明。

distribution 存储目录分析说明

这里以我之前搭建的一个镜像仓库为例,有一个镜像两个tag, 简单说明distribution的存储目录信息:

    ├── blobs
    │   └── sha256
    │       ├── 12
    │       │   └── 124a9bb461f0106581e9e0e76a1edd39076b62ff80c3608676f5a6e057326b10
    │       │       └── data
    │       ├── 19
    │       │   └── 194529caae55f1ec32d99ae611bd52a5a25b2d20f4c49094598b3cfecb459f55
    │       │       └── data
    │       ├── 27
    │       │   └── 276470c32842665461864314d57dd44bb3c56f074794ac3535493f329c258239
    │       │       └── data
    │       ├── 29
    │       │   └── 298839ed00f4384909cc62cb14ea994a6d170efac760c304f7e28662b970ba0d
    │       │       └── data
    │       ├── 34
    │       │   └── 3458458e7203e6bdc1e1ae94d9ba554af73db82334c5851d599dd435cc6bd301
    │       │       └── data
    │       ├── 3c
    │       │   ├── 3ca07c56cae5ba32650b237f4263cb382241373ef8bdb4fdf3159f8f7aa222eb
    │       │   │   └── data
    │       │   ├── 3cb08336d912fcc35c3cd618459c2e6803b20f505da1255a75b7fea4819b8a1d
    │       │   │   └── data
    │       │   └── 3cebd0f4bc415e136cda07dd211c759deadec4e95ee0740fefce5c0b70b28a55
    │       │       └── data
    │       ├── 42
    │       │   ├── 42a9aa022025cae4f696bb839f61adbf04fe7e4fbd9e600ed6fd784c4e27ef5f
    │       │   │   └── data
    │       │   └── 42ec43196b29fd4e3b17c6d04ff2fdb1069f8f7d21a2c44da9cc29d401f751a1
    │       │       └── data
    │       ├── 4f
    │       │   └── 4fd5388148cb60d9180d862df0440aa66cb1e330aab7cce785218f761ef24061
    │       │       └── data
    │       ├── 53
    │       │   └── 53b2590d0c3fef44882e645c79542b05caab1b5acf21968ba162e28a1b02d1a4
    │       │       └── data
    │       ├── 54
    │       │   └── 5461c243803b776a461d0eac87018660c977af1d10bf4e5ce95911b82054685d
    │       │       └── data
    │       ├── 65
    │       │   └── 65e4379b47f129da1cc399e68388c590ef024e55290a0ef5b82b63a1f18baf13
    │       │       └── data
    │       ├── 67
    │       │   └── 67e8f3430c3f11993877ca7fcf200fcb6b6514020c5f202a700dd28b6a122a2a
    │       │       └── data
    │       ├── 79
    │       │   └── 796c35f1fde2fcf458b1f25ebb68534f6bc028bad43c58172b240420b254b75d
    │       │       └── data
    │       ├── 8f
    │       │   └── 8f8a7dd64b5eabfb8aa1aa20ac6cb5db1ae79769f7bb69177c35979fae8714fa
    │       │       └── data
    │       ├── 94
    │       │   └── 941ac47011daa91b282b2a1b5e2cfd31a2a533295aa0d01e48fc41484658d764
    │       │       └── data
    │       ├── a1
    │       │   └── a1cd489c1ab8e51df8547a534524fd557ef83c00da88d970f241432347fb4bfe
    │       │       └── data
    │       ├── a5
    │       │   └── a52d7cc561ce3f840778d712a9458b3bd9f4f5cf9d9a0fa421d58eae10a46be4
    │       │       └── data
    │       ├── b2
    │       │   └── b296475482e5a3cbf3b2d21a4eb3c198dca9a638d1ad7cb548fd5eb9df38fb18
    │       │       └── data
    │       ├── b4
    │       │   └── b4fa55a79843e5e711d313127221832295cfc62a2b2de3f8e72c77c851b65201
    │       │       └── data
    │       ├── b5
    │       │   └── b5fa36c7626337446af4dd66d6ca7040c5061a708502b25c7e2a315b0dc0fd2d
    │       │       └── data
    │       ├── b9
    │       │   └── b975216c4476e4120537f4b04c080fbf8f71e5a8a303d920d23ed43d1a291723
    │       │       └── data
    │       ├── c2
    │       │   └── c2a8c6084659ec5616e58986f94f59fc0f8d1537300854b70420262d370a14df
    │       │       └── data
    │       ├── c3
    │       │   ├── c30f55aa7914fdce4ccca196601df27b168f04bdd26f067fb2be55e9ca7d3cc2
    │       │   │   └── data
    │       │   └── c3683c0954721b5a7f7932673024f818a6b1dd09f3d923d11f80710d22a37a8b
    │       │       └── data
    │       ├── c4
    │       │   └── c4db43cd7e2080e418138b4fd25e10e540f48eee55906b1332df7986bfc07320
    │       │       └── data
    │       ├── c5
    │       │   └── c5ee94bc35bc65a59cf7fb650a9a57ce9fdb79176c2f707251cb4cdd9f1ead0a
    │       │       └── data
    │       ├── c6
    │       │   └── c6d4161c3d2db5a0dff55e67665ad252dc6a244745d4d28748a33b0eb9016ac5
    │       │       └── data
    │       ├── d8
    │       │   └── d83791fefdeb24a924f16bf4b1e4e6a555f68a167e730a5a61e5b123d31114f4
    │       │       └── data
    │       ├── dd
    │       │   └── dd5c8c14a21cf99b3a69ba520d71058630d35ad474251358700830a4596c62aa
    │       │       └── data
    │       ├── e7
    │       │   └── e7c637000dfa093cd047d63321b0d722540152f2fe23edc4726893dc55afc89f
    │       │       └── data
    │       ├── f1
    │       │   └── f158dd228292a8c1eab511d3f05d50260a589d12f21c1a960e91de8dac600d6d
    │       │       └── data
    │       └── fc
    │           └── fce479c28d5681384afbf8e7c4bb5cb3e41c20b6e8d359eb4b8d7527f79e1002
    │               └── data
    └── repositories
        └── ddcmao
            └── java8
                ├── _layers
                │   └── sha256
                │       ├── 124a9bb461f0106581e9e0e76a1edd39076b62ff80c3608676f5a6e057326b10
                │       │   └── link
                │       ├── 194529caae55f1ec32d99ae611bd52a5a25b2d20f4c49094598b3cfecb459f55
                │       │   └── link
                │       ├── 276470c32842665461864314d57dd44bb3c56f074794ac3535493f329c258239
                │       │   └── link
                │       ├── 298839ed00f4384909cc62cb14ea994a6d170efac760c304f7e28662b970ba0d
                │       │   └── link
                │       ├── 3458458e7203e6bdc1e1ae94d9ba554af73db82334c5851d599dd435cc6bd301
                │       │   └── link
                │       ├── 3ca07c56cae5ba32650b237f4263cb382241373ef8bdb4fdf3159f8f7aa222eb
                │       │   └── link
                │       ├── 3cb08336d912fcc35c3cd618459c2e6803b20f505da1255a75b7fea4819b8a1d
                │       │   └── link
                │       ├── 3cebd0f4bc415e136cda07dd211c759deadec4e95ee0740fefce5c0b70b28a55
                │       │   └── link
                │       ├── 42a9aa022025cae4f696bb839f61adbf04fe7e4fbd9e600ed6fd784c4e27ef5f
                │       │   └── link
                │       ├── 42ec43196b29fd4e3b17c6d04ff2fdb1069f8f7d21a2c44da9cc29d401f751a1
                │       │   └── link
                │       ├── 4fd5388148cb60d9180d862df0440aa66cb1e330aab7cce785218f761ef24061
                │       │   └── link
                │       ├── 53b2590d0c3fef44882e645c79542b05caab1b5acf21968ba162e28a1b02d1a4
                │       │   └── link
                │       ├── 5461c243803b776a461d0eac87018660c977af1d10bf4e5ce95911b82054685d
                │       │   └── link
                │       ├── 65e4379b47f129da1cc399e68388c590ef024e55290a0ef5b82b63a1f18baf13
                │       │   └── link
                │       ├── 67e8f3430c3f11993877ca7fcf200fcb6b6514020c5f202a700dd28b6a122a2a
                │       │   └── link
                │       ├── 796c35f1fde2fcf458b1f25ebb68534f6bc028bad43c58172b240420b254b75d
                │       │   └── link
                │       ├── 8f8a7dd64b5eabfb8aa1aa20ac6cb5db1ae79769f7bb69177c35979fae8714fa
                │       │   └── link
                │       ├── 941ac47011daa91b282b2a1b5e2cfd31a2a533295aa0d01e48fc41484658d764
                │       │   └── link
                │       ├── a1cd489c1ab8e51df8547a534524fd557ef83c00da88d970f241432347fb4bfe
                │       │   └── link
                │       ├── a52d7cc561ce3f840778d712a9458b3bd9f4f5cf9d9a0fa421d58eae10a46be4
                │       │   └── link
                │       ├── b296475482e5a3cbf3b2d21a4eb3c198dca9a638d1ad7cb548fd5eb9df38fb18
                │       │   └── link
                │       ├── b4fa55a79843e5e711d313127221832295cfc62a2b2de3f8e72c77c851b65201
                │       │   └── link
                │       ├── b5fa36c7626337446af4dd66d6ca7040c5061a708502b25c7e2a315b0dc0fd2d
                │       │   └── link
                │       ├── b975216c4476e4120537f4b04c080fbf8f71e5a8a303d920d23ed43d1a291723
                │       │   └── link
                │       ├── c2a8c6084659ec5616e58986f94f59fc0f8d1537300854b70420262d370a14df
                │       │   └── link
                │       ├── c30f55aa7914fdce4ccca196601df27b168f04bdd26f067fb2be55e9ca7d3cc2
                │       │   └── link
                │       ├── c3683c0954721b5a7f7932673024f818a6b1dd09f3d923d11f80710d22a37a8b
                │       │   └── link
                │       ├── c4db43cd7e2080e418138b4fd25e10e540f48eee55906b1332df7986bfc07320
                │       │   └── link
                │       ├── c5ee94bc35bc65a59cf7fb650a9a57ce9fdb79176c2f707251cb4cdd9f1ead0a
                │       │   └── link
                │       ├── c6d4161c3d2db5a0dff55e67665ad252dc6a244745d4d28748a33b0eb9016ac5
                │       │   └── link
                │       ├── d83791fefdeb24a924f16bf4b1e4e6a555f68a167e730a5a61e5b123d31114f4
                │       │   └── link
                │       ├── dd5c8c14a21cf99b3a69ba520d71058630d35ad474251358700830a4596c62aa
                │       │   └── link
                │       ├── e7c637000dfa093cd047d63321b0d722540152f2fe23edc4726893dc55afc89f
                │       │   └── link
                │       ├── f158dd228292a8c1eab511d3f05d50260a589d12f21c1a960e91de8dac600d6d
                │       │   └── link
                │       └── fce479c28d5681384afbf8e7c4bb5cb3e41c20b6e8d359eb4b8d7527f79e1002
                │           └── link
                ├── _manifests
                │   ├── revisions
                │   │   └── sha256
                │   │       ├── 08ffaab8c710dd9cf03d880e11e8f3def1907fe53ea57e198f7f1ac7a19e4848
                │   │       │   └── link
                │   │       ├── 24256410da5fedb15166c150e98ff9e348dd029bfe35077a3862051594f29919
                │   │       │   └── link
                │   │       ├── 400462eab4cbbb446e676e07af9df536afc59e7f47f1f2441d3241b1942d7b84
                │   │       │   └── link
                │   │       ├── 92b5198f48e84aa0d5212715550d2761368207f8522b107e63c74cd43c6e8ec3
                │   │       │   └── link
                │   │       └── f415c274bbc1da0847014ffd37aaeff1385b9234504a6804e7e88f0fe310dd1c
                │   │           └── link
                │   └── tags
                │       ├── 0.1
                │       │   ├── current
                │       │   │   └── link
                │       │   └── index
                │       │       └── sha256
                │       │           ├── 08ffaab8c710dd9cf03d880e11e8f3def1907fe53ea57e198f7f1ac7a19e4848
                │       │           │   └── link
                │       │           └── 400462eab4cbbb446e676e07af9df536afc59e7f47f1f2441d3241b1942d7b84
                │       │               └── link
                │       └── 0.2
                │           ├── current
                │           │   └── link
                │           └── index
                │               └── sha256
                │                   ├── 24256410da5fedb15166c150e98ff9e348dd029bfe35077a3862051594f29919
                │                   │   └── link
                │                   ├── 92b5198f48e84aa0d5212715550d2761368207f8522b107e63c74cd43c6e8ec3
                │                   │   └── link
                │                   └── f415c274bbc1da0847014ffd37aaeff1385b9234504a6804e7e88f0fe310dd1c
                │                       └── link
                └── _uploads

以上结构是tree 镜像仓库根目录后的一个树状结构图, 基本的结构是:

├── blobs
│   └── sha256
│       ├── 12
│       │   └── 124a9bb461f0106581e9e0e76a1edd39076b62ff80c3608676f5a6e057326b10
│       │       └── data
│       └── 19
│           └── 194529caae55f1ec32d99ae611bd52a5a25b2d20f4c49094598b3cfecb459f55
│               └── data
└── repositories
    └── ddcmao
        └── java8
            ├── _layers
            │   └── sha256
            │       ├── 124a9bb461f0106581e9e0e76a1edd39076b62ff80c3608676f5a6e057326b10
            │       │   └── link
                ……
            │       └── 194529caae55f1ec32d99ae611bd52a5a25b2d20f4c49094598b3cfecb459f55
            │           └── link
            ├── _manifests
            │   ├── revisions
            │   │   └── sha256
                    ……
            │   │       └── 08ffaab8c710dd9cf03d880e11e8f3def1907fe53ea57e198f7f1ac7a19e4848
            │   │           └── link
            │   └── tags
            │       ├── 0.1
            │       │   ├── current
            │       │   │   └── link
            │       │   └── index
            │       │       └── sha256
            │       │           └── 400462eab4cbbb446e676e07af9df536afc59e7f47f1f2441d3241b1942d7b84
            │       │               └── link
            │       └── 0.2
            │           ├── current
            │           │   └── link
            │           └── index
            │               └── sha256
            │                   └── f415c274bbc1da0847014ffd37aaeff1385b9234504a6804e7e88f0fe310dd1c
            │                       └── link
            └── _uploads

以上结构是一个镜像仓库的基本树状目录结构。 在根目录下存有两个目录 blobs 跟repositories。
blobs目录是存放每层数据以及一个镜像的manifests信息的具体文件,其目录结构简单明了,data中存放的是真是的数据信息。
repositories目录则是存放镜像仓库中的镜像的组织信息,相当于是分布式存储中的matadata, 期目录结构也不复杂。

  1. 首先repositories目录下是ddcmao目录,该目录是镜像仓库里的逻辑分组,有没有都可以,如果要进行分组管理,则该目录就是分组这里称之为项目。
  2. 项目下面是java8目录,该目录是一个镜像repository目录。
  3. java8目录下面有_layers、_manifests和_uploads目录,其中_uploads目录是一个临时目录,是镜像上传的过程中的目录,一旦镜像上传完成,该目录下的文件就被删除。
  4. _layers目录类似于blobs目录,但是它不存储真是数据仅仅以link文件保存每个layer的sha256编码。保存该repository长传过得所有layer的sha256编码信息。
  5. _manifests目录存放的信息就是该repository的上传的所有版本(tag)的manifest信息。其目录下有revisions目录和tags目录。
  6. _tags目录很明显每个tag一组记录,例如tag 0.1 0.2, 每个tag下面有current目录和index目录, current目录下的link文件保存了该tag目前的manifest文件的sha256编码,而index目录则列出了该tag历史上传的所有版本的sha256编码信息。
  7. _revisions目录里存放了该repository历史上上传版本的所有sha256编码信息。

以第一组tree目录结构给大家简单解释, 我们的镜像名为ddcmao/java8, 现有0.2和0.1 两个tag, 可以看到0.1tag目录下index目录有两条记录,而0.2tag目录下index有三条记录,这个表示了0.1tag被上传了两个历史版本同时0.2tag共被上传了三个历史版本, 具体限制的0.1tag使用的是哪个版本的信息需要看0.1tag目录下current目录的index文件,该文件表明了0.1tag镜像的manifest文件的sha256编码,因此确认0.1tag的真是版本情况。0.2tag也是同样。

我们再看revisions目录下总共有5条记录,而且编码是0.1tag目录下index子目录的编码跟0.2tag index子目录编码的总和,这表示java8总共上传过5个版本的信息,但现在只有两个tag,其中三条记录已经无效,但由于registry的layer共享机制,目前不删除。

docker push 过程中distribution处理

distribution 实质上是一个http服务,在docker push的时候实际上是在处理http 请求的。我们结合一个例子来说明一下,先看一下我们下面关于docker push过程中抓获的tcpdum 信息。
以 docker push library/busybox 为例,信息如下:


这里写图片描述

从以上的图片中可以看到,image push的核基本流程是先check 该镜像的layer是否存在, 在post patch put blob–push每个layer, 最后put manifests。因此我们可以根据流程进行单独的分析。
我们将上面的流程分为check–HEAD blobs, Post Blobs, Patch Blobs, Put Blobs, Put Manifests 这几部来分析。

check-HEAD Blob分析

从上面的图片中看到每一次上传一个layer之前都会先发送一条HEAD blobs的请求,该http request是查看该layer是否已经存在,如果存在则不再上传,如果不存在那么接下来进行上传。

request example:
http://lalalala.com/v2/library/busybox/blobs/sha256:04176c8b224aa0eb9942af765f66dae866f436e75acef028fe44b8a98e045515

对应的方法:

  • request function: HEAD

  • request URL: /v2/*/blobs/sha256:*********

  • request handler dispatch:func blobDispatcher(ctx *Context, r *http.Request)

  • request handler: blobHandler.GetBlob

根据app.register(v2.RouteNameBlob, blobDispatcher),/v2/*/blobs/sha256:***没有uploads,因此其对应的dispatch是blobDispatcher。
而 func blobDispatcher(ctx *Context, r *http.Request)中仅仅分配了对应的http handlerFunc,具体内容如下:

mhandler := handlers.MethodHandler{
            "GET":  http.HandlerFunc(blobHandler.GetBlob),
            "HEAD": http.HandlerFunc(blobHandler.GetBlob),
        }

我们来看一下对应的func (bh *blobHandler) GetBlob(w http.ResponseWriter, r *http.Request):

func (bh *blobHandler) GetBlob(w http.ResponseWriter, r *http.Request) {
    context.GetLogger(bh).Debug("GetBlob")
    blobs := bh.Repository.Blobs(bh)
    desc, err := blobs.Stat(bh, bh.Digest)
    if err != nil {
        if err == distribution.ErrBlobUnknown {
            bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown.WithDetail(bh.Digest))
        } else {
            bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
        }
        return
    }

    if err := blobs.ServeBlob(bh, w, r, desc.Digest); err != nil {
        context.GetLogger(bh).Debugf("unexpected error getting blob HTTP handler: %v", err)
        bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
        return
    }
}

该函数中先获取desc = blobs.stat 即获取对应的blob的组织信息。

func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
    return bs.statter.Stat(ctx, dgst)

}

func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
    path, err := pathFor(blobDataPathSpec{
        digest: dgst,
    })

    if err != nil {
        return distribution.Descriptor{}, err
    }

    fi, err := bs.driver.Stat(ctx, path)
    if err != nil {
        switch err := err.(type) {
        case driver.PathNotFoundError:
            return distribution.Descriptor{}, distribution.ErrBlobUnknown
        default:
            return distribution.Descriptor{}, err
        }
    }

    if fi.IsDir() {
        // NOTE(stevvooe): This represents a corruption situation. Somehow, we
        // calculated a blob path and then detected a directory. We log the
        // error and then error on the side of not knowing about the blob.
        context.GetLogger(ctx).Warnf("blob path should not be a directory: %q", path)
        return distribution.Descriptor{}, distribution.ErrBlobUnknown
    }

    // TODO(stevvooe): Add method to resolve the mediatype. We can store and
    // cache a "global" media type for the blob, even if a specific repo has a
    // mediatype that overrides the main one.

    return distribution.Descriptor{
        Size: fi.Size(),

        // NOTE(stevvooe): The central blob store firewalls media types from
        // other users. The caller should look this up and override the value
        // for the specific repository.
        MediaType: "application/octet-stream",
        Digest:    dgst,
    }, nil
}

调用bs.driver.Stat(ctx, path)最后调用到filesystem的driver的Stat函数即坚持对应路径的文件信息,如果有则返回信息,否则返回错误信息。
之后GetBlob调用blobs.ServeBlob(bh, w, r, desc.Digest),调用到这里说明对应的文件存在,但是不知道文件是否一致,该函数的调用信息如下:

func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
    desc, err := bs.statter.Stat(ctx, dgst)
    if err != nil {
        return err
    }

    path, err := bs.pathFn(desc.Digest)
    if err != nil {
        return err
    }

    if bs.redirect {
        redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method})
        switch err.(type) {
        case nil:
            // Redirect to storage URL.
            http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect)
            return err

        case driver.ErrUnsupportedMethod:
            // Fallback to serving the content directly.
        default:
            // Some unexpected error.
            return err
        }
    }

    br, err := newFileReader(ctx, bs.driver, path, desc.Size)
    if err != nil {
        return err
    }
    defer br.Close()

    w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent
    w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds()))

    if w.Header().Get("Docker-Content-Digest") == "" {
        w.Header().Set("Docker-Content-Digest", desc.Digest.String())
    }

    if w.Header().Get("Content-Type") == "" {
        // Set the content type if not already set.
        w.Header().Set("Content-Type", desc.MediaType)
    }

    if w.Header().Get("Content-Length") == "" {
        // Set the content length if not already set.
        w.Header().Set("Content-Length", fmt.Sprint(desc.Size))
    }

    http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br)
    return nil
}

又执行了一遍bs.statter.Stat, 之后获取对应的文件路径,在之后根据是否是连接文件来重新获取信息,也就是说镜像仓库支持分布式的link文件。 再往后创建了一个FileReader,执行http.ServerContent 即检查文件是否修改,文件大小文件的sha256编码信息返httpresponse。 该函数的具体内容自己去看源码,我粗略的看了一下级别上是追踪文件的修改、大小、sha256码。
如果这里没有找到对应的layer的文件——blobs目录下没有对应的sha256文件夹以及data文件,就需要执行下面的Post Blobs/uploads Patch、Put请求了,如果完全存在则继续下一个layer。

上传一个layer

上传一个layer 分为Post Patch Put三个步骤,我们一个一个分析:

Post Blobs/uploads分析

继续上面的分析,如果push image的一个layer时发现镜像仓库里面没有改layer的信息,则就执行Post blobs请求。

request example:
http://reg.lalalalal.com/v2/library/busybox/blobs/uploads/?from=library%2Fbusybox&mount=sha256%3A04176c8b224aa0eb9942af765f66dae866f436e75acef028fe44b8a98e045515

对应的方法:

  • request function: POST

  • request URL: /v2/*/blobs/uploads/?:*********

  • request handler dispatch:func blobUploadDispatcher(ctx *Context, r *http.Request)

  • request handler: buh.StartBlobUpload

    buh : blobUploadHandler
    根据app.register(v2.RouteNameBlobUpload, blobUploadDispatcher) 所以该请求对应的dispatch是blobUploadDispatcher
    而 func blobUploadDispatcher(ctx *Context, r *http.Request)中仅仅分配了对应的http handlerFunc,具体内容如下:

    buh := &blobUploadHandler{
    Context: ctx,
    UUID: getUploadUUID(ctx),
    }

    handler := handlers.MethodHandler{
        "GET":  http.HandlerFunc(buh.GetUploadStatus),
        "HEAD": http.HandlerFunc(buh.GetUploadStatus),
    }
    
    if !ctx.readOnly {
        handler["POST"] = http.HandlerFunc(buh.StartBlobUpload)
        handler["PATCH"] = http.HandlerFunc(buh.PatchBlobData)
        handler["PUT"] = http.HandlerFunc(buh.PutBlobUploadComplete)
        handler["DELETE"] = http.HandlerFunc(buh.CancelBlobUpload)
    }
    

    ……

上面是构建blobUploadHandler结构以及针对request请求的方法来指定对应的http.handlerFunc。

我们来看一下buh.StartBlobUpload函数:

// StartBlobUpload begins the blob upload process and allocates a server-side
// blob writer session, optionally mounting the blob from a separate repository.

func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Request) {
    var options []distribution.BlobCreateOption

    fromRepo := r.FormValue("from")
    mountDigest := r.FormValue("mount")

    if mountDigest != "" && fromRepo != "" {
        opt, err := buh.createBlobMountOption(fromRepo, mountDigest)
        if opt != nil && err == nil {
            options = append(options, opt)
        }
    }

    blobs := buh.Repository.Blobs(buh)
    upload, err := blobs.Create(buh, options...)

    if err != nil {
        if ebm, ok := err.(distribution.ErrBlobMounted); ok {
            if err := buh.writeBlobCreatedHeaders(w, ebm.Descriptor); err != nil {
                buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
            }
        } else if err == distribution.ErrUnsupported {
            buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported)
        } else {
            buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
        }
        return
    }

    buh.Upload = upload

    if err := buh.blobUploadResponse(w, r, true); err != nil {
        buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
        return
    }

    w.Header().Set("Docker-Upload-UUID", buh.Upload.ID())
    w.WriteHeader(http.StatusAccepted)
}

首先根据是否有from参数跟mount参数创建mount信息。
其次获取Blobs对象再调用blobs.Create来生产upload信息。我们来看一下Blobs跟Blobs.Create函数:

func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore {
    var statter distribution.BlobDescriptorService = &linkedBlobStatter{
        blobStore:   repo.blobStore,
        repository:  repo,
        linkPathFns: []linkPathFunc{blobLinkPath},
    }

    if repo.descriptorCache != nil {
        statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter)
    }

    if repo.registry.blobDescriptorServiceFactory != nil {
        statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter)
    }

    return &linkedBlobStore{
        registry:             repo.registry,
        blobStore:            repo.blobStore,
        blobServer:           repo.blobServer,
        blobAccessController: statter,
        repository:           repo,
        ctx:                  ctx,

        // TODO(stevvooe): linkPath limits this blob store to only layers.
        // This instance cannot be used for manifest checks.
        linkPathFns:            []linkPathFunc{blobLinkPath},
        deleteEnabled:          repo.registry.deleteEnabled,
        resumableDigestEnabled: repo.resumableDigestEnabled,
    }
}

func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
    context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer")

    var opts createOptions

    for _, option := range options {
        err := option.Apply(&opts)
        if err != nil {
            return nil, err
        }
    }

    if opts.Mount.ShouldMount {
        desc, err := lbs.mount(ctx, opts.Mount.From, opts.Mount.From.Digest())
        if err == nil {
            // Mount successful, no need to initiate an upload session
            return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc}
        }
    }

    uuid := uuid.Generate().String()
    startedAt := time.Now().UTC()

    path, err := pathFor(uploadDataPathSpec{
        name: lbs.repository.Named().Name(),
        id:   uuid,
    })

    if err != nil {
        return nil, err
    }

    startedAtPath, err := pathFor(uploadStartedAtPathSpec{
        name: lbs.repository.Named().Name(),
        id:   uuid,
    })

    if err != nil {
        return nil, err
    }

    // Write a startedat file for this upload
    if err := lbs.blobStore.driver.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil {
        return nil, err
    }

    return lbs.newBlobUpload(ctx, uuid, path, startedAt, false)
}

这里根据配置信息生产linkedBlobStore信息,然后调用linkedBlobStore的Create方法,该方法中处理mount信息,生产uuid,blob目录,然后PutContext写入后端存储, 再调用newBlobUpload
mount信息处理其实还是蛮简单的就是在生产对应layer的信息放在_layers目录下。
我们来看一下startedAtPath的文件路径: repoPrefix/current_image_name/_uploads/repository.id/startedat,是前面分析的镜像名下的repositories目录下的_uploads目录。
其中 lbs.blobStore.driver.PutContent 是调用后端存储驱动的PutContext函数真是的写入数据,只不过目前没有真是信息需要写入,统一写入[]byte(startedAt.Format(time.RFC3339)。
最后调用 lbs.newBlobUpload生产对应distribution.BlobWriter信息并返回,里面指定了fileWriter这个对象为对应driver的FileWrite对象。具体内容如下:

func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time, append bool) (distribution.BlobWriter, error) {
    fw, err := lbs.driver.Writer(ctx, path, append)
    if err != nil {
        return nil, err
    }

    bw := &blobWriter{
        ctx:        ctx,
        blobStore:  lbs,
        id:         uuid,
        startedAt:  startedAt,
        digester:   digest.Canonical.New(),
        fileWriter: fw,
        driver:     lbs.driver,
        path:       path,
        resumableDigestEnabled: lbs.resumableDigestEnabled,
    }

    return bw, nil
}

返回到StartBlobUpload之后给buh.Upload = upload,并调用 buh.blobUploadResponse(w, r, true)并返回。 该函数时间是生成http reesponse相关的信息,里面具体内容比较复杂,这里先不做分析。

Patch Blobs/uploads分析

继续上面的分析,如果push image的一个layer时发现镜像仓库里面没有改layer的信息,则就执行Patch blobs请求。

request exapmle :
http://reg.lalalala.com/v2/library/busybox/blobs/uploads/75816304-18cb-4fcf-af57-98f5508f0eb8?_state=nCtc8O54DzVTJSg5WI-WHVO8qF-F23Q9Murrfv67QqR7Ik5hbWUiOiJsaWJyYXJ5L2J1c3lib3giLCJVVUlEIjoiNzU4MTYzMDQtMThjYi00ZmNmLWFmNTctOThmNTUwOGYwZWI4IiwiT2Zmc2V0IjowLCJTdGFydGVkQXQiOiIyMDE3LTA1LTI0VDA4OjA1OjQ3Ljk3NjQxMDU3OFoifQ%3D%3D

对应的方法:

  • request function: Patch

  • request URL: /v2/*/blobs/uploads/{uuid:}?:*********

  • request handler dispatch:func blobUploadDispatcher(ctx *Context, r *http.Request)

  • request handler: buh.PatchBlobData

    buh : blobUploadHandler
    根据app.register(v2.RouteNameBlobUploadChunk, blobUploadDispatcher) 所以该请求对应的dispatch是blobUploadDispatcher
    而 func blobUploadDispatcher(ctx *Context, r *http.Request)中仅仅分配了对应的http handlerFunc,具体内容已经分析过了,这里不再重复。

由于该request中有UUID信息因此blobUploadDispatcher在指定request handler之后还进行了一次转换和处理,我们简单来看一下:

if buh.UUID != "" {
    state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state"))
    if err != nil {
        return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
            ctxu.GetLogger(ctx).Infof("error resolving upload: %v", err)
            buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err))
        })
    }
    buh.State = state

    if state.Name != ctx.Repository.Named().Name() {
        return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
            ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Named().Name())
            buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err))
        })
    }

    if state.UUID != buh.UUID {
        return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
            ctxu.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, buh.UUID)
            buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err))
        })
    }

    blobs := ctx.Repository.Blobs(buh)
    upload, err := blobs.Resume(buh, buh.UUID)
    if err != nil {
        ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err)
        if err == distribution.ErrBlobUploadUnknown {
            return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
                buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown.WithDetail(err))
            })
        }

        return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
            buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
        })
    }
    buh.Upload = upload

    if size := upload.Size(); size != buh.State.Offset {
        defer upload.Close()
        ctxu.GetLogger(ctx).Errorf("upload resumed at wrong offest: %d != %d", size, buh.State.Offset)
        return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
            buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err))
            upload.Cancel(buh)
        })
    }
    return closeResources(handler, buh.Upload)
}

上面的代码主要解析了了http request的_state信息,这个信息是上一次请求结束的时候带回去的信息, 然后根据相关的信息进行校验,校验之后执行了blobs.Resume(buh, buh.UUID),之后 调用了closeResources(handler, buh.Upload)相当于是重定向了http handler。
我们首先来看一下blobs.Resume(buh, buh.UUID):
我们再来看一下重定向的handler:

func closeResources(handler http.Handler, closers ...io.Closer) http.Handler {
    return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
        for _, closer := range closers {
            defer closer.Close()
        }
        handler.ServeHTTP(w, r)
    })
}

该处理函数只是将原来的UPload预约关闭。调用了buh.Upload.Close–>func (bw *blobWriter) Close()–>bw.fileWriter.Close()实际上是调用了后端存储上的文件的close函数。

我们看一下blobs.Resume(buh, buh.UUID) Resume跟create非常类似,调用newBlobUpload新建了一个BlobWriter对象,文件的目录由原来的 repoPrefix/current_image_name/_uploads/repository.id/startedat 变更为 repoPrefix/current_image_name/_uploads/repository.id/data了,而且属性append之前是false这次变更为true,为后面的数据写入做准备。

接下来我们回到dispatach指定的处理函数PatchBlobData中:

// PatchBlobData writes data to an upload.
func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Request) {
    if buh.Upload == nil {
        buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown)
        return
    }

    ct := r.Header.Get("Content-Type")
    if ct != "" && ct != "application/octet-stream" {
        buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("Bad Content-Type")))
        // TODO(dmcgowan): encode error
        return
    }

    // TODO(dmcgowan): support Content-Range header to seek and write range

    if err := copyFullPayload(w, r, buh.Upload, buh, "blob PATCH", &buh.Errors); err != nil {
        // copyFullPayload reports the error if necessary
        return
    }

    if err := buh.blobUploadResponse(w, r, false); err != nil {
        buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
        return
    }

    w.WriteHeader(http.StatusAccepted)
}

从上面代码来看主要执行了两个调用,copyFullPayload跟blobUploadResponse,我们先来看一下copyFullPayload,该函数仅仅是将request中所带的data拷贝到buh.Upload中,调用的是io.Copy(destWriter, r.Body)方法, 该方法跟后端的blobwriter有关,需要写驱动的人仔细研究一下。

之后调用blobUploadResponse方法, 该函数的具体内容如下:

// blobUploadResponse provides a standard request for uploading blobs and
// chunk responses. This sets the correct headers but the response status is
// left to the caller. The fresh argument is used to ensure that new blob
// uploads always start at a 0 offset. This allows disabling resumable push by
// always returning a 0 offset on check status.
func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http.Request, fresh bool) error {
    // TODO(stevvooe): Need a better way to manage the upload state automatically.
    buh.State.Name = buh.Repository.Named().Name()
    buh.State.UUID = buh.Upload.ID()
    buh.Upload.Close()
    buh.State.Offset = buh.Upload.Size()
    buh.State.StartedAt = buh.Upload.StartedAt()

    token, err := hmacKey(buh.Config.HTTP.Secret).packUploadState(buh.State)
    if err != nil {
        ctxu.GetLogger(buh).Infof("error building upload state token: %s", err)
        return err
    }

    uploadURL, err := buh.urlBuilder.BuildBlobUploadChunkURL(
        buh.Repository.Named(), buh.Upload.ID(),
        url.Values{
            "_state": []string{token},
        })
    if err != nil {
        ctxu.GetLogger(buh).Infof("error building upload url: %s", err)
        return err
    }

    endRange := buh.Upload.Size()
    if endRange > 0 {
        endRange = endRange - 1
    }

    w.Header().Set("Docker-Upload-UUID", buh.UUID)
    w.Header().Set("Location", uploadURL)

    w.Header().Set("Content-Length", "0")
    w.Header().Set("Range", fmt.Sprintf("0-%d", endRange))

    return nil
}

里面比较明显的是调用buh.Upload.Close()–>func (bw *blobWriter) Close()–>bw.fileWriter.Close()实际上是调用了后端存储上的文件的close函数,刷缓存关闭文件。
之后调用生产response信息。其中比较重要的是uploadURL的生产,为吓一条http请求生产URL,前面说的http的_state信息是前一条http的response带回来的,就是这个,首先生产token, 然后在BuildBlobUploadChunkURL。

至此Patch请求分析完毕。

Put Blobs/uploads分析

继续上面的分析,如果push image的一个layer时发现镜像仓库里面没有改layer的信息,则就执行Put blobs请求。

request example:
http://reglalalala.com/v2/library/busybox/blobs/uploads/75816304-18cb-4fcf-af57-98f5508f0eb8?_state=4GUuNB-bRVYFnJ36MKo7aMCDdcTg8NX0e0OAt7CNR1N7Ik5hbWUiOiJsaWJyYXJ5L2J1c3lib3giLCJVVUlEIjoiNzU4MTYzMDQtMThjYi00ZmNmLWFmNTctOThmNTUwOGYwZWI4IiwiT2Zmc2V0Ijo3MDExMDIsIlN0YXJ0ZWRBdCI6IjIwMTctMDUtMjRUMDg6MDU6NDdaIn0%3D&digest=sha256%3A04176c8b224aa0eb9942af765f66dae866f436e75acef028fe44b8a98e045515

对应的方法:

  • request function: Put

  • request URL: /v2/*/blobs/uploads/{uuid:}?:*********

  • request handler dispatch:func blobUploadDispatcher(ctx *Context, r *http.Request)

  • request handler: buh.PutBlobUploadComplete

    buh : blobUploadHandler
    根据app.register(v2.RouteNameBlobUploadChunk, blobUploadDispatcher) 所以该请求对应的dispatch是blobUploadDispatcher
    而 func blobUploadDispatcher(ctx *Context, r *http.Request)中分配了对应的http handlerFunc并进行了一次转换,具体内容已经分析过了,这里不再重复。

这里直接看PutBlobUploadComplete函数:

// PutBlobUploadComplete takes the final request of a blob upload. The
// request may include all the blob data or no blob data. Any data
// provided is received and verified. If successful, the blob is linked
// into the blob store and 201 Created is returned with the canonical
// url of the blob.
func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *http.Request) {
    if buh.Upload == nil {
        buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown)
        return
    }

    dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters!

    if dgstStr == "" {
        // no digest? return error, but allow retry.
        buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest missing"))
        return
    }

    dgst, err := digest.ParseDigest(dgstStr)
    if err != nil {
        // no digest? return error, but allow retry.
        buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest parsing failed"))
        return
    }

    if err := copyFullPayload(w, r, buh.Upload, buh, "blob PUT", &buh.Errors); err != nil {
        // copyFullPayload reports the error if necessary
        return
    }

    desc, err := buh.Upload.Commit(buh, distribution.Descriptor{
        Digest: dgst,

        // TODO(stevvooe): This isn't wildly important yet, but we should
        // really set the mediatype. For now, we can let the backend take care
        // of this.
    })

    if err != nil {
        switch err := err.(type) {
        case distribution.ErrBlobInvalidDigest:
            buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err))
        case errcode.Error:
            buh.Errors = append(buh.Errors, err)
        default:
            switch err {
            case distribution.ErrAccessDenied:
                buh.Errors = append(buh.Errors, errcode.ErrorCodeDenied)
            case distribution.ErrUnsupported:
                buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported)
            case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported:
                buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err))
            default:
                ctxu.GetLogger(buh).Errorf("unknown error completing upload: %#v", err)
                buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
            }

        }

        // Clean up the backend blob data if there was an error.
        if err := buh.Upload.Cancel(buh); err != nil {
            // If the cleanup fails, all we can do is observe and report.
            ctxu.GetLogger(buh).Errorf("error canceling upload after error: %v", err)
        }

        return
    }
    if err := buh.writeBlobCreatedHeaders(w, desc); err != nil {
        buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
        return
    }
}

从代码看这次需要获取blob的sha256编码,而此编码request中已经有了,之后直接调用copyFullPayload跟buh.Upload.Commit,copyFullPayload不做分析了, 我们先分析一下writeBlobCreatedHeaders而后在分析buh.Upload.Commit。
我们先看一下writeBlobCreatedHeaders:

func (buh *blobUploadHandler) writeBlobCreatedHeaders(w http.ResponseWriter, desc distribution.Descriptor) error {
    ref, err := reference.WithDigest(buh.Repository.Named(), desc.Digest)
    if err != nil {
        return err
    }
    blobURL, err := buh.urlBuilder.BuildBlobURL(ref)
    if err != nil {
        return err
    }

    w.Header().Set("Location", blobURL)
    w.Header().Set("Content-Length", "0")
    w.Header().Set("Docker-Content-Digest", desc.Digest.String())
    w.WriteHeader(http.StatusCreated)
    return nil
}

内容很简单仅仅生产了对应BLob的URL信息。
我们再看buh.Upload.Commit函数:

func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
    context.GetLogger(ctx).Debug("(*blobWriter).Commit")

    if err := bw.fileWriter.Commit(); err != nil {
        return distribution.Descriptor{}, err
    }

    bw.Close()
    desc.Size = bw.Size()

    canonical, err := bw.validateBlob(ctx, desc)
    if err != nil {
        return distribution.Descriptor{}, err
    }

    if err := bw.moveBlob(ctx, canonical); err != nil {
        return distribution.Descriptor{}, err
    }

    if err := bw.blobStore.linkBlob(ctx, canonical, desc.Digest); err != nil {
        return distribution.Descriptor{}, err
    }

    if err := bw.removeResources(ctx); err != nil {
        return distribution.Descriptor{}, err
    }

    err = bw.blobStore.blobAccessController.SetDescriptor(ctx, canonical.Digest, canonical)
    if err != nil {
        return distribution.Descriptor{}, err
    }

    bw.committed = true
    return canonical, nil
}

从代码来看先调用了bw.fileWriter.Commit()将文件内容的buf刷新。然后在检验sha256编码与文件本身是否合法匹配,再调用bw.moveBlob跟bw.blobStore.linkBlob,完成存储后端的信息更新与文件组织,之后调用bw.removeResources删除相关临时(_uploads 目录下的)文件。

bw.moveBlob在做了一组校验之后调用bw.blobStore.driver.Move(ctx, bw.path, blobPath)将_uplaod下面的数据文件move到blobs/sha256/Hex[0:2]/Hex/data文件。
bw.blobStore.linkBlob则是创建了repositorys/current_image_name/_layers/sha256/hex/link文件,文件内容为对应sha256:$hex
至此一个layer上传成功。

上传完成以上所有的layer之后需要最有一个image的组织信息——即manifest文件上传,上面的所有layer都可以不上传(例如仅仅是新加了一个Tag信息),但是该Manifest文件必须上传。

Put Manifests分析

继续上面的分析,如果push image的所有layer之后,还需要将该镜像的描述和组织文件上传,否则就找不到该镜像以及其对应的各层组织信息——manifest文件。

request example:
http://reg.lalalala.com/v2/library/busybox/manifests/latest

对应的方法:

  • request function: POST

  • request URL: /v2/**/manifests/$taginfo

  • request handler dispatch:func imageManifestDispatcher(ctx *Context, r *http.Request)

  • request handler: imageManifestHandler.PutImageManifest

根据app.register(v2.RouteNameManifest, imageManifestDispatcher) 所以该请求对应的dispatch是imageManifestDispatcher

这里我们之间看 PutImageManifest函数:

// PutImageManifest validates and stores an image in the registry.
func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) {
    ctxu.GetLogger(imh).Debug("PutImageManifest")
    manifests, err := imh.Repository.Manifests(imh)
    if err != nil {
        imh.Errors = append(imh.Errors, err)
        return
    }

    var jsonBuf bytes.Buffer
    if err := copyFullPayload(w, r, &jsonBuf, imh, "image manifest PUT", &imh.Errors); err != nil {
        // copyFullPayload reports the error if necessary
        return
    }

    mediaType := r.Header.Get("Content-Type")
    manifest, desc, err := distribution.UnmarshalManifest(mediaType, jsonBuf.Bytes())
    if err != nil {
        imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err))
        return
    }

    if imh.Digest != "" {
        if desc.Digest != imh.Digest {
            ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", desc.Digest, imh.Digest)
            imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid)
            return
        }
    } else if imh.Tag != "" {
        imh.Digest = desc.Digest
    } else {
        imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail("no tag or digest specified"))
        return
    }

    var options []distribution.ManifestServiceOption
    if imh.Tag != "" {
        options = append(options, distribution.WithTag(imh.Tag))
    }
    _, err = manifests.Put(imh, manifest, options...)
    if err != nil {
        ……
        return
    }

    // Tag this manifest
    if imh.Tag != "" {
        tags := imh.Repository.Tags(imh)
        err = tags.Tag(imh, imh.Tag, desc)
        if err != nil {
            imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
            return
        }

    }

    // Construct a canonical url for the uploaded manifest.
    ref, err := reference.WithDigest(imh.Repository.Named(), imh.Digest)
    if err != nil {
        imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
        return
    }

    location, err := imh.urlBuilder.BuildManifestURL(ref)
    if err != nil {
        // NOTE(stevvooe): Given the behavior above, this absurdly unlikely to
        // happen. We'll log the error here but proceed as if it worked. Worst
        // case, we set an empty location header.
        ctxu.GetLogger(imh).Errorf("error building manifest url from digest: %v", err)
    }

    w.Header().Set("Location", location)
    w.Header().Set("Docker-Content-Digest", imh.Digest.String())
    w.WriteHeader(http.StatusCreated)
}

从上面的代码中可以看出首先调用manifests, err := imh.Repository.Manifests(imh)构建manifests对象,然后调用distribution.UnmarshalManifest(mediaType, jsonBuf.Bytes())解析manifest,因为有shema1跟shema2两种因此是有区别的,之后manifests.Put(imh, manifest, options…)写入manifest文件并建立revisions目录下link文件, 最后调用 tags := imh.Repository.Tags(imh)获取tagservice,之后调用tags.Tag(imh, imh.Tag, desc)写入manifests目录里面tags目录下的相关link文件。
这里imh.Repository.Manifests(imh)是非常关键的,其内容决定了后面很多函数的入口,我们来看一下imh.Repository.Manifests(imh)函数:

func (repo *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
    manifestLinkPathFns := []linkPathFunc{
        // NOTE(stevvooe): Need to search through multiple locations since
        // 2.1.0 unintentionally linked into  _layers.
        manifestRevisionLinkPath,
        blobLinkPath,
    }

    manifestDirectoryPathSpec := manifestRevisionsPathSpec{name: repo.name.Name()}

    var statter distribution.BlobDescriptorService = &linkedBlobStatter{
        blobStore:   repo.blobStore,
        repository:  repo,
        linkPathFns: manifestLinkPathFns,
    }

    if repo.registry.blobDescriptorServiceFactory != nil {
        statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter)
    }

    blobStore := &linkedBlobStore{
        ctx:                  ctx,
        blobStore:            repo.blobStore,
        repository:           repo,
        deleteEnabled:        repo.registry.deleteEnabled,
        blobAccessController: statter,

        // TODO(stevvooe): linkPath limits this blob store to only
        // manifests. This instance cannot be used for blob checks.
        linkPathFns:           manifestLinkPathFns,
        linkDirectoryPathSpec: manifestDirectoryPathSpec,
    }

    ms := &manifestStore{
        ctx:        ctx,
        repository: repo,
        blobStore:  blobStore,
        schema1Handler: &signedManifestHandler{
            ctx:        ctx,
            repository: repo,
            blobStore:  blobStore,
        },
        schema2Handler: &schema2ManifestHandler{
            ctx:        ctx,
            repository: repo,
            blobStore:  blobStore,
        },
        manifestListHandler: &manifestListHandler{
            ctx:        ctx,
            repository: repo,
            blobStore:  blobStore,
        },
    }

    // Apply options
    for _, option := range options {
        err := option.Apply(ms)
        if err != nil {
            return nil, err
        }
    }

    return ms, nil
}

函数内容比较长,但是非常简单明了,不做具体的解析。
我们再看一下manifests.Put(imh, manifest, options…):

这里以shema2为例。

func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) {
    context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Put")

    m, ok := manifest.(*schema2.DeserializedManifest)
    if !ok {
        return "", fmt.Errorf("non-schema2 manifest put to schema2ManifestHandler: %T", manifest)
    }

    if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil {
        return "", err
    }

    mt, payload, err := m.Payload()
    if err != nil {
        return "", err
    }

    revision, err := ms.blobStore.Put(ctx, mt, payload)
    if err != nil {
        context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err)
        return "", err
    }

    // Link the revision into the repository.
    if err := ms.blobStore.linkBlob(ctx, revision); err != nil {
        return "", err
    }

    return revision.Digest, nil
}

调用ms.blobStore.Put(ctx, mt, payload)函数将manifest文件内容写入blobs目录,调用ms.blobStore.linkBlob(ctx, revision)创建manifests/current_image_name/revisions/目录下的link文件。

我们在看一下tags.Tag(imh, imh.Tag, desc)函数:

func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error {
    currentPath, err := pathFor(manifestTagCurrentPathSpec{
        name: ts.repository.Named().Name(),
        tag:  tag,
    })

    if err != nil {
        return err
    }

    lbs := ts.linkedBlobStore(ctx, tag)

    // Link into the index
    if err := lbs.linkBlob(ctx, desc); err != nil {
        return err
    }

    // Overwrite the current link
    return ts.blobStore.link(ctx, currentPath, desc.Digest)
}

函数中调用lbs := ts.linkedBlobStore(ctx, tag)来指定tag目录下index目录的Path转义信息。调用lbs.linkBlob(ctx, desc)生成tag目录下index目录中的link信息。
调用ts.blobStore.link(ctx, currentPath, desc.Digest)来生成tags目录下current目录的link文件。
至此 Manifests文件上传完成。

至此一个镜像上传的所有request请求处理完毕。

distribution push完成后有notify信息发送,留待以后再做分析。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值