kubelet sandbox创建与calico cni网络配置流程 (三)

4 篇文章 0 订阅
3 篇文章 0 订阅

转载链接https://blog.csdn.net/weixin_39961559/article/details/82706931

上一篇文章分析了calico cni的调用流程,通过分析calcio cni的代码,我们知道kubelet在创建sandbox容器后对该容器的网络配置其实是通过调用calico cni的二进制文件进行配置的,而上一节的代码中我们遗留了calico-ipam没有深入分析。作为整个操作的一部分,CNI插件需要给interface分配并维护一个IP地址,并且还要安装一些和该interface有关的必要的路由。这给了CNI插件很大的灵活性同时也给它造成了很大的负担。许多插件需要重复编写多种用户想要的IP管理框架(例如,dhcp, host-local)。为了减轻各个插件的负担,并且将IP管理的功能独立出来,定义了第二种插件类型 – IP Address Management 插件(IPAM插件)。此时,其他插件的任务就是在适当的执行过程中调用相应的IPAM插件。IPAM插件用于确定interface的IP/子网,网关,路由并且将这些信息返回”main” plugin去执行。IPAM插件可以从一个协议(如dhcp)中,或者从本地文件系统存储的数据中,或者network configuration file中的”ipam”字段,或者上述这些方式的组合中获取信息。calico有自己的ipam,这节我们将分析它的代码,看它的主要功能和返回的结果。
calico-ipam源码基于1.11.6,分析calico-ipam.go文件,这里就直奔主题了,我们直接分析cmdAdd函数

func cmdAdd(args *skel.CmdArgs) error {
    conf := utils.NetConf{}
    if err := json.Unmarshal(args.StdinData, &conf); err != nil {
        return fmt.Errorf("failed to load netconf: %v", err)
    }

    nodename := determineNodename(conf)
    cniVersion := conf.CNIVersion

    utils.ConfigureLogging(conf.LogLevel)

    calicoClient, err := utils.CreateClient(conf)
    if err != nil {
        return err
    }

    workloadID, _, err := utils.GetIdentifiers(args)
    if err != nil {
        return err
    }

    handleID, err := utils.GetHandleID(conf.Name, args.ContainerID, workloadID)
    if err != nil {
        return err
    }
    logger := log.WithFields(log.Fields{
        "workloadID": workloadID,
        "handleID":   handleID,
    })

    ipamArgs := ipamArgs{}
    if err = types.LoadArgs(args.Args, &ipamArgs); err != nil {
        return err
    }

    r := &current.Result{}
    if ipamArgs.IP != nil {
        fmt.Fprintf(os.Stderr, "Calico CNI IPAM request IP: %v\n", ipamArgs.IP)

        // The hostname will be defaulted to the actual hostname if conf.Nodename is empty
        assignArgs := client.AssignIPArgs{IP: cnet.IP{ipamArgs.IP}, HandleID: &handleID, Hostname: nodename}
        logger.WithField("assignArgs", assignArgs).Info("Assigning provided IP")
        err := calicoClient.IPAM().AssignIP(assignArgs)
        if err != nil {
            return err
        }

        var ipNetwork net.IPNet

        if ipamArgs.IP.To4() == nil {
            // It's an IPv6 address.
            ipNetwork = net.IPNet{IP: ipamArgs.IP, Mask: net.CIDRMask(128, 128)}
            r.IPs = append(r.IPs, &current.IPConfig{
                Version: "6",
                Address: ipNetwork,
            })

            logger.WithField("result.IPs", ipamArgs.IP).Info("Appending an IPv6 address to the result")
        } else {
            // It's an IPv4 address.
            ipNetwork = net.IPNet{IP: ipamArgs.IP, Mask: net.CIDRMask(32, 32)}
            r.IPs = append(r.IPs, &current.IPConfig{
                Version: "4",
                Address: ipNetwork,
            })

            logger.WithField("result.IPs", ipamArgs.IP).Info("Appending an IPv4 address to the result")
        }
    } else {
        // Default to assigning an IPv4 address
        num4 := 1
        if conf.IPAM.AssignIpv4 != nil && *conf.IPAM.AssignIpv4 == "false" {
            num4 = 0
        }

        // Default to NOT assigning an IPv6 address
        num6 := 0
        if conf.IPAM.AssignIpv6 != nil && *conf.IPAM.AssignIpv6 == "true" {
            num6 = 1
        }

        fmt.Fprintf(os.Stderr, "Calico CNI IPAM request count IPv4=%d IPv6=%d\n", num4, num6)

        v4pools, err := utils.ParsePools(conf.IPAM.IPv4Pools, true)
        if err != nil {
            return err
        }

        v6pools, err := utils.ParsePools(conf.IPAM.IPv6Pools, false)
        if err != nil {
            return err
        }

        assignArgs := client.AutoAssignArgs{
            Num4:      num4,
            Num6:      num6,
            HandleID:  &handleID,
            Hostname:  nodename,
            IPv4Pools: v4pools,
            IPv6Pools: v6pools,
        }
        logger.WithField("assignArgs", assignArgs).Info("Auto assigning IP")
        assignedV4, assignedV6, err := calicoClient.IPAM().AutoAssign(assignArgs)
        fmt.Fprintf(os.Stderr, "Calico CNI IPAM assigned addresses IPv4=%v IPv6=%v\n", assignedV4, assignedV6)
        if err != nil {
            return err
        }

        if num4 == 1 {
            if len(assignedV4) != num4 {
                return fmt.Errorf("Failed to request %d IPv4 addresses. IPAM allocated only %d.", num4, len(assignedV4))
            }
            ipV4Network := net.IPNet{IP: assignedV4[0].IP, Mask: net.CIDRMask(32, 32)}
            r.IPs = append(r.IPs, &current.IPConfig{
                Version: "4",
                Address: ipV4Network,
            })
        }

        if num6 == 1 {
            if len(assignedV6) != num6 {
                return fmt.Errorf("Failed to request %d IPv6 addresses. IPAM allocated only %d.", num6, len(assignedV6))
            }
            ipV6Network := net.IPNet{IP: assignedV6[0].IP, Mask: net.CIDRMask(128, 128)}
            r.IPs = append(r.IPs, &current.IPConfig{
                Version: "6",
                Address: ipV6Network,
            })
        }
        logger.WithFields(log.Fields{"result.IPs": r.IPs}).Info("IPAM Result")
    }

    // Print result to stdout, in the format defined by the requested cniVersion.
    return types.PrintResult(r, cniVersion)
}
 
 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135

calico-ipam的参数和calico cni差不多,在第二节中我们看到k8s会根据pod的注解cni.projectcalico.org/ipv4pools和cni.projectcalico.org/ipv6pools(如果有)的值配置ipv4地址池和ipv6地址池配置到args.StdinData参数,然后根据cni.projectcalico.org/ipAddrs(如果有)的值配置ipAddrs参数到环境变量,以IP=加ip的字符串形式。LoadArgs函数将ipam的Args解析到ipamArgs,其中args的形式为”K=V;K2=V2;…”的k/v相似,以”;”隔开。
看这里,如果ipamArgs参数的IP值不为空,这个也就是 我们在pod的注解里设定了cni.projectcalico.org/ipAddrs,那么calico-ipam将使用我们设定的这个值去分配pod的ip,查看AssignIP函数

// AssignIP assigns the provided IP address to the provided host.  The IP address
// must fall within a configured pool.  AssignIP will claim block affinity as needed
// in order to satisfy the assignment.  An error will be returned if the IP address
// is already assigned, or if StrictAffinity is enabled and the address is within
// a block that does not have affinity for the given host.
func (c ipams) AssignIP(args AssignIPArgs) error {
    hostname := decideHostname(args.Hostname)
    log.Infof("Assigning IP %s to host: %s", args.IP, hostname)

    if !c.blockReaderWriter.withinConfiguredPools(args.IP) {
        return goerrors.New("The provided IP address is not in a configured pool\n")
    }

    blockCIDR := getBlockCIDRForAddress(args.IP)
    log.Debugf("IP %s is in block '%s'", args.IP.String(), blockCIDR.String())
    for i := 0; i < ipamEtcdRetries; i++ {
        obj, err := c.client.Backend.Get(model.BlockKey{blockCIDR})
        if err != nil {
            if _, ok := err.(errors.ErrorResourceDoesNotExist); ok {
                // Block doesn't exist, we need to create it.  First,
                // validate the given IP address is within a configured pool.
                if !c.blockReaderWriter.withinConfiguredPools(args.IP) {
                    estr := fmt.Sprintf("The given IP address (%s) is not in any configured pools", args.IP.String())
                    log.Errorf(estr)
                    return goerrors.New(estr)
                }
                log.Debugf("Block for IP %s does not yet exist, creating", args.IP)
                cfg, err := c.GetIPAMConfig()
                if err != nil {
                    log.Errorf("Error getting IPAM Config: %s", err)
                    return err
                }
                err = c.blockReaderWriter.claimBlockAffinity(blockCIDR, hostname, *cfg)
                if err != nil {
                    if _, ok := err.(*affinityClaimedError); ok {
                        log.Warningf("Someone else claimed block %s before us", blockCIDR.String())
                        continue
                    } else {
                        return err
                    }
                }
                log.Infof("Claimed new block: %s", blockCIDR)
                continue
            } else {
                // Unexpected error
                return err
            }
        }
        block := allocationBlock{obj.Value.(*model.AllocationBlock)}
        err = block.assign(args.IP, args.HandleID, args.Attrs, hostname)
        if err != nil {
            log.Errorf("Failed to assign address %s: %s", args.IP, err)
            return err
        }

        // Increment handle.
        if args.HandleID != nil {
            c.incrementHandle(*args.HandleID, blockCIDR, 1)
        }

        // Update the block using the original KVPair to do a CAS.  No need to
        // update the Value since we have been manipulating the Value pointed to
        // in the KVPair.
        _, err = c.client.Backend.Update(obj)
        if err != nil {
            log.Warningf("Update failed on block %s", block.CIDR.String())
            if args.HandleID != nil {
                c.decrementHandle(*args.HandleID, blockCIDR, 1)
            }
            return err
        }
        return nil
    }
    return goerrors.New("Max retries hit")
}
 
 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75

我们分析AssignIP函数,首先它会通过withinConfiguredPools函数检测你指定的ip是否在地址池里,所以如果你想指定pod的ip,你要确保你的ip是在calico规划的地址池里

// withinConfiguredPools returns true if the given IP is within a configured
// Calico pool, and false otherwise.
func (rw blockReaderWriter) withinConfiguredPools(ip cnet.IP) bool {
    allPools, _ := rw.client.IPPools().List(api.IPPoolMetadata{})
    for _, p := range allPools.Items {
        // Compare any enabled pools.
        if !p.Spec.Disabled && p.Metadata.CIDR.Contains(ip.IP) {
            return true
        }
    }
    return false
}
 
 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12

getBlockCIDRForAddress函数会计算所指定ip的blockCIDR,然后根据这个blockCIDR去查找该block,这个block包含了这个部分地址池内的ip的分配情况,带着栗子也许会通俗易懂一点,比如我们找到的blockCIDR为192.168.102.128-26,那么我们机会去查找这个BlockKey,它的值如下所示

{
    "cidr":"192.168.102.128/26",
    "affinity":"host:localhost.localdomain",
    "strictAffinity":false,
    "allocations":[
        0,
        null,
        null,
        null,
        null,
        null,
        1,
        null,
        2,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null,
        null
    ],
    "unallocated":[
        9,
        10,
        11,
        12,
        13,
        14,
        15,
        16,
        17,
        18,
        19,
        20,
        21,
        22,
        23,
        24,
        25,
        26,
        27,
        28,
        29,
        30,
        31,
        32,
        33,
        34,
        35,
        36,
        37,
        38,
        39,
        40,
        41,
        42,
        43,
        44,
        45,
        46,
        47,
        48,
        49,
        50,
        51,
        52,
        53,
        54,
        55,
        56,
        57,
        58,
        59,
        60,
        61,
        62,
        63,
        1,
        2,
        5,
        4,
        3,
        7
    ],
    "attributes":[
        {
            "handle_id":null,
            "secondary":null
        },
        {
            "handle_id":"k8s-pod-network.2c528435a732f35594fd54229172be1cc0b70748ee5c2e7e30850135d219703f",
            "secondary":null
        },
        {
            "handle_id":"k8s-pod-network.10f9fc77f8ebc30365ab2d481f9e755cf315bf85dbd7a25657b30ac016c4b51b",
            "secondary":null
        }
    ]
}
 
 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148

根据block返回结果,然后再执行assign函数

func (b *allocationBlock) assign(address cnet.IP, handleID *string, attrs map[string]string, host string) error {
    if b.StrictAffinity && b.Affinity != nil && !hostAffinityMatches(host, b.AllocationBlock) {
        // Affinity check is enabled but the host does not match - error.
        return errors.New("Block host affinity does not match")
    }

    // Convert to an ordinal.
    ordinal := ipToOrdinal(address, *b)
    if (ordinal < 0) || (ordinal > blockSize) {
        return errors.New("IP address not in block")
    }

    // Check if already allocated.
    if b.Allocations[ordinal] != nil {
        return errors.New("Address already assigned in block")
    }

    // Set up attributes.
    attrIndex := b.findOrAddAttribute(handleID, attrs)
    b.Allocations[ordinal] = &attrIndex

    // Remove from unallocated.
    for i, unallocated := range b.Unallocated {
        if unallocated == ordinal {
            b.Unallocated = append(b.Unallocated[:i], b.Unallocated[i+1:]...)
            break
        }
    }
    return nil
}
 
 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30

ipToOrdinal函数它会根据我们指定的ip地址去计算出这个位置在该block的alloctions字段所占的位置ordinal,findOrAddAttribute会计算当前所要分配的ip地址是该block中第几个分配的,其值为attrIndex,那么根据这两个值去填充alloctions。既然我们分配了ip那么相应的我们得在未分配列表将已经分配的占位符去掉,assign函数的操作时这样的,如果是分配操作,那就相应的去掉unallocated字段的值为ordinal的数,unallocated的值是一个数组。这两个函数我们对应的那个栗子查看就回比较清晰明了了。
在assign函数里我们只看到它的逻辑都是在处理block,好像并没有涉及到ip的分配。因为ip你已经指定了,那它当然不会再给你分配ip了。它只是将根据ip在block中做一些类似于占位符的操作,告诉别人,这个ip已被使用,以后给其它pod分配ip时不要再拿这个ip去分配就行了。当然,block更新只是在我们的本机节点逻辑函数里,我们还要update block到etcd,其它节点的calico在分配ip时才不知晓这个操作,所以你也看到它的update操作。
好了,分析完我们自定义的ip,现在分析一下我们未指定ip的情况,calico-ipam默认是分配ipv4地址而不分配ipv6地址,除非我们指定,这里只分析ipv4的情况,先看分配参数assignArgs

assignArgs := client.AutoAssignArgs{
    Num4:      num4,
    Num6:      num6,
    HandleID:  &handleID,
    Hostname:  nodename,
    IPv4Pools: v4pools,
    IPv6Pools: v6pools,
}
 
 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

这里我们看到的v4pools和v6pools就是我们在pod中定义的v4和v6地址池,接下来我们分析AutoAssign函数

// AutoAssign automatically assigns one or more IP addresses as specified by the
// provided AutoAssignArgs.  AutoAssign returns the list of the assigned IPv4 addresses,
// and the list of the assigned IPv6 addresses.
func (c ipams) AutoAssign(args AutoAssignArgs) ([]net.IP, []net.IP, error) {
    // Determine the hostname to use - prefer the provided hostname if
    // non-nil, otherwise use the hostname reported by os.
    hostname := decideHostname(args.Hostname)
    log.Infof("Auto-assign %d ipv4, %d ipv6 addrs for host '%s'", args.Num4, args.Num6, hostname)

    var v4list, v6list []net.IP
    var err error

    if args.Num4 != 0 {
        // Assign IPv4 addresses.
        log.Debugf("Assigning IPv4 addresses")
        for _, pool := range args.IPv4Pools {
            if pool.IP.To4() == nil {
                return nil, nil, fmt.Errorf("provided IPv4 IPPools list contains one or more IPv6 IPPools")
            }
        }
        v4list, err = c.autoAssign(args.Num4, args.HandleID, args.Attrs, args.IPv4Pools, ipv4, hostname)
        if err != nil {
            log.Errorf("Error assigning IPV4 addresses: %s", err)
            return nil, nil, err
        }
    }

    if args.Num6 != 0 {
        // If no err assigning V4, try to assign any V6.
        log.Debugf("Assigning IPv6 addresses")
        for _, pool := range args.IPv6Pools {
            if pool.IP.To4() != nil {
                return nil, nil, fmt.Errorf("provided IPv6 IPPools list contains one or more IPv4 IPPools")
            }
        }
        v6list, err = c.autoAssign(args.Num6, args.HandleID, args.Attrs, args.IPv6Pools, ipv6, hostname)
        if err != nil {
            log.Errorf("Error assigning IPV6 addresses: %s", err)
            return nil, nil, err
        }
    }

    return v4list, v6list, nil
}
 
 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44

该函数里只有autoAssign是主要逻辑,查看autoAssign函数

func (c ipams) autoAssign(num int, handleID *string, attrs map[string]string, pools []net.IPNet, version ipVersion, host string) ([]net.IP, error) {

    // Start by trying to assign from one of the host-affine blocks.  We
    // always do strict checking at this stage, so it doesn't matter whether
    // globally we have strict_affinity or not.
    log.Debugf("Looking for addresses in current affine blocks for host '%s'", host)
    affBlocks, err := c.blockReaderWriter.getAffineBlocks(host, version, pools)
    if err != nil {
        return nil, err
    }
    log.Debugf("Found %d affine IPv%d blocks for host '%s': %v", len(affBlocks), version.Number, host, affBlocks)
    ips := []net.IP{}
    for len(ips) < num {
        if len(affBlocks) == 0 {
            log.Infof("Ran out of existing affine blocks for host '%s'", host)
            break
        }
        cidr := affBlocks[0]
        affBlocks = affBlocks[1:]
        ips, _ = c.assignFromExistingBlock(cidr, num, handleID, attrs, host, true)
        log.Debugf("Block '%s' provided addresses: %v", cidr.String(), ips)
    }

    // If there are still addresses to allocate, then we've run out of
    // blocks with affinity.  Before we can assign new blocks or assign in
    // non-affine blocks, we need to check that our IPAM configuration
    // allows that.
    config, err := c.GetIPAMConfig()
    if err != nil {
        return nil, err
    }
    log.Debugf("Allocate new blocks? Config: %+v", config)
    if config.AutoAllocateBlocks == true {
        rem := num - len(ips)
        retries := ipamEtcdRetries
        for rem > 0 && retries > 0 {
            // Claim a new block.
            log.Infof("Need to allocate %d more addresses - allocate another block", rem)
            retries = retries - 1
            b, err := c.blockReaderWriter.claimNewAffineBlock(host, version, pools, *config)
            if err != nil {
                // Error claiming new block.
                if _, ok := err.(noFreeBlocksError); ok {
                    // No free blocks.  Break.
                    break
                }
                log.Errorf("Error claiming new block: %s", err)
                return nil, err
            } else {
                // Claim successful.  Assign addresses from the new block.
                log.Infof("Claimed new block %s - assigning %d addresses", b.String(), rem)
                newIPs, err := c.assignFromExistingBlock(*b, rem, handleID, attrs, host, config.StrictAffinity)
                if err != nil {
                    log.Warningf("Failed to assign IPs:", err)
                    break
                }
                log.Debugf("Assigned IPs from new block: %s", newIPs)
                ips = append(ips, newIPs...)
                rem = num - len(ips)
            }
        }

        if retries == 0 {
            return nil, goerrors.New("Max retries hit")
        }
    }

    // If there are still addresses to allocate, we've now tried all blocks
    // with some affinity to us, and tried (and failed) to allocate new
    // ones.  If we do not require strict host affinity, our last option is
    // a random hunt through any blocks we haven't yet tried.
    //
    // Note that this processing simply takes all of the IP pools and breaks
    // them up into block-sized CIDRs, then shuffles and searches through each
    // CIDR.  This algorithm does not work if we disallow auto-allocation of
    // blocks because the allocated blocks may be sparsely populated in the
    // pools resulting in a very slow search for free addresses.
    //
    // If we need to support non-strict affinity and no auto-allocation of
    // blocks, then we should query the actual allocation blocks and assign
    // from those.
    rem := num - len(ips)
    if config.StrictAffinity != true && rem != 0 {
        log.Infof("Attempting to assign %d more addresses from non-affine blocks", rem)
        // Figure out the pools to allocate from.
        if len(pools) == 0 {
            // Default to all configured pools.
            allPools, err := c.client.IPPools().List(api.IPPoolMetadata{})
            if err != nil {
                log.Errorf("Error reading configured pools: %s", err)
                return ips, nil
            }

            // Grab all the IP networks in these pools.
            for _, p := range allPools.Items {
                // Don't include disabled pools.
                if !p.Spec.Disabled {
                    pools = append(pools, p.Metadata.CIDR)
                }
            }
        }

        // Iterate over pools and assign addresses until we either run out of pools,
        // or the request has been satisfied.
        for _, p := range pools {
            log.Debugf("Assigning from random blocks in pool %s", p.String())
            newBlock := randomBlockGenerator(p, host)
            for rem > 0 {
                // Grab a new random block.
                blockCIDR := newBlock()
                if blockCIDR == nil {
                    log.Warningf("All addresses exhausted in pool %s", p.String())
                    break
                }

                // Attempt to assign from the block.
                newIPs, err := c.assignFromExistingBlock(*blockCIDR, rem, handleID, attrs, host, false)
                if err != nil {
                    log.Warningf("Failed to assign IPs in pool %s: %s", p.String(), err)
                    break
                }
                ips = append(ips, newIPs...)
                rem = num - len(ips)
            }
        }
    }

    log.Infof("Auto-assigned %d out of %d IPv%ds: %v", len(ips), num, version.Number, ips)
    return ips, nil
}
 
 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130

getAffineBlocks函数根据calico所在的host和给定的地址池返回给定的block列表

func (rw blockReaderWriter) getAffineBlocks(host string, ver ipVersion, pools []cnet.IPNet) ([]cnet.IPNet, error) {
    // Lookup all blocks by providing an empty BlockListOptions
    // to the List operation.
    opts := model.BlockAffinityListOptions{Host: host, IPVersion: ver.Number}
    datastoreObjs, err := rw.client.Backend.List(opts)
    if err != nil {
        if _, ok := err.(errors.ErrorResourceDoesNotExist); ok {
            // The block path does not exist yet.  This is OK - it means
            // there are no affine blocks.
            return []cnet.IPNet{}, nil

        } else {
            log.Errorf("Error getting affine blocks: %s", err)
            return nil, err
        }
    }

    // Iterate through and extract the block CIDRs.
    ids := []cnet.IPNet{}
    for _, o := range datastoreObjs {
        k := o.Key.(model.BlockAffinityKey)

        // Add the block if no IP pools were specified, or if IP pools were specified
        // and the block falls within the given IP pools.
        if len(pools) == 0 {
            ids = append(ids, k.CIDR)
        } else {
            for _, pool := range pools {
                if pool.Contains(k.CIDR.IPNet.IP) {
                    ids = append(ids, k.CIDR)
                    break
                }
            }
        }
    }
    return ids, nil
}
 
 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37

获得block列表后,将循环block(如果当前block的地址已经分配完毕),直到ip已经分配好,那么他的ip是怎么分配的呢?我们查看assignFromExistingBlock函数

func (c ipams) assignFromExistingBlock(
    blockCIDR net.IPNet, num int, handleID *string, attrs map[string]string, host string, affCheck bool) ([]net.IP, error) {
    // Limit number of retries.
    var ips []net.IP
    for i := 0; i < ipamEtcdRetries; i++ {
        log.Debugf("Auto-assign from %s - retry %d", blockCIDR.String(), i)
        obj, err := c.client.Backend.Get(model.BlockKey{blockCIDR})
        if err != nil {
            log.Errorf("Error getting block: %s", err)
            return nil, err
        }

        // Pull out the block.
        b := allocationBlock{obj.Value.(*model.AllocationBlock)}

        log.Debugf("Got block: %+v", b)
        ips, err = b.autoAssign(num, handleID, host, attrs, affCheck)
        if err != nil {
            log.Errorf("Error in auto assign: %s", err)
            return nil, err
        }
        if len(ips) == 0 {
            log.Infof("Block %s is full", blockCIDR)
            return []net.IP{}, nil
        }

        // Increment handle count.
        if handleID != nil {
            c.incrementHandle(*handleID, blockCIDR, num)
        }

        // Update the block using CAS by passing back the original
        // KVPair.
        obj.Value = b.AllocationBlock
        _, err = c.client.Backend.Update(obj)
        if err != nil {
            log.Infof("Failed to update block '%s' - try again", b.CIDR.String())
            if handleID != nil {
                c.decrementHandle(*handleID, blockCIDR, num)
            }
            continue
        }
        break
    }
    return ips, nil
}
 
 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46

我们看到assignFromExistingBlock函数也存在autoAssign函数,这个函数属于allocationBlock的,这里面这个函数是核心的,经过autoAssign函数后再更新block。我们看到这里,是不是觉得有点熟悉,好像和我们分析指定ip的时候,也是填充block,然后update。我们先做一个大胆的假设,既然这里也是填充block以及做update操作,唯一缺乏的是没有ip,那我们是否可以认为我们所需要的ip是不是通过占位这些block分配出来的,带着这个疑问我们分析一下autoAssign函数

func (b *allocationBlock) autoAssign(
    num int, handleID *string, host string, attrs map[string]string, affinityCheck bool) ([]cnet.IP, error) {

    // Determine if we need to check for affinity.
    checkAffinity := b.StrictAffinity || affinityCheck
    if checkAffinity && b.Affinity != nil && !hostAffinityMatches(host, b.AllocationBlock) {
        // Affinity check is enabled but the host does not match - error.
        s := fmt.Sprintf("Block affinity (%s) does not match provided (%s)", *b.Affinity, host)
        return nil, errors.New(s)
    }

    // Walk the allocations until we find enough addresses.
    ordinals := []int{}
    for len(b.Unallocated) > 0 && len(ordinals) < num {
        ordinals = append(ordinals, b.Unallocated[0])
        b.Unallocated = b.Unallocated[1:]
    }

    // Create slice of IPs and perform the allocations.
    ips := []cnet.IP{}
    for _, o := range ordinals {
        attrIndex := b.findOrAddAttribute(handleID, attrs)
        b.Allocations[o] = &attrIndex
        ips = append(ips, incrementIP(cnet.IP{b.CIDR.IP}, big.NewInt(int64(o))))
    }

    log.Debugf("Block %s returned ips: %v", b.CIDR.String(), ips)
    return ips, nil
}
 
 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29

这里有个hostAffinityMatches函数,是为了检查所提供的host是否是我们block里面的host,因为在autoAssign函数中,block分配的地址是属于某个host的。

这里我们看到了ordinals和attrIndex,是不是很熟悉,似曾相识。是的,这个我们在分析指定ip的时候已经分析过了,只不过那个时候我们是通过ip去计算block里的allocations和unallocated占位符。而这里它选取了unallocated数组里的第一个数,然后根据这个数去占位allocations并填充它的值,最后根据这个值去生成ip。看出来了么,其实是指定ip的逆操作,通过block的unallocated占位去计算出ip,这个ip也就是我们容器的ip。

回到ipams的autoAssign函数,如果我们获得的block列表的地址池都被分配完了,那calico-ipam会再给该host分配一个block,在分配block的时候根据指定的ip地址池分配,然后再给容器分配剩下的ip。后面的操作和前面分析的一致了,也是通过调用assignFromExistingBlock给容器分配ip地址。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值