etcdclientv3组件使用

etcd 续租代码实现

package example

import (
	"context"
	"fmt"
	"testing"
	"time"

	"github.com/coreos/etcd/clientv3"
)

var (
	client *clientv3.Client
	err    error
	conf   = clientv3.Config{
		Endpoints:   []string{"localhost:2379"},
		DialTimeout: 5 * time.Second,
	}
	getResp *clientv3.GetResponse
)

func Test_ExampleLease(t *testing.T) {
	if client, err = clientv3.New(conf); err != nil {
		t.Log(err)
		return
	}
	defer client.Close()
	_ = client
	lease := clientv3.NewLease(client)
	resp, err := lease.Grant(context.TODO(), 10) //10 seconds
	if err != nil {
		panic(err)
	}
	fmt.Println(resp)
	var leaseID = resp.ID

	kv := clientv3.NewKV(client)
	if resp, err := kv.Put(context.TODO(), "/tree/lock/job1", "bb", clientv3.WithLease(leaseID)); err != nil {
		t.Error(err)
		return
	} else {
		t.Log("写入成功", resp)
	}
	for {
		if getResp, err = kv.Get(context.TODO(), "/tree/lock/job1"); err != nil {
			t.Error(err)
			return
		}
		if getResp.Count == 0 {
			t.Log("kv 过期")
			break
		} else {
			t.Log(getResp.Kvs)
		}

		time.Sleep(time.Second * 2)
	}

}


etcd 续租使用

利用 etcd 续租接口,利用这个续租接口 可以不断的刷新 key ,防止过期

type Lease interface {
	// Grant creates a new lease.
	Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)

	// Revoke revokes the given lease.
	Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)

	// TimeToLive retrieves the lease information of the given lease ID.
	TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)

	// Leases retrieves all leases.
	Leases(ctx context.Context) (*LeaseLeasesResponse, error)

	// KeepAlive keeps the given lease alive forever. If the keepalive response
	// posted to the channel is not consumed immediately, the lease client will
	// continue sending keep alive requests to the etcd server at least every
	// second until latest response is consumed.
	//
	// The returned "LeaseKeepAliveResponse" channel closes if underlying keep
	// alive stream is interrupted in some way the client cannot handle itself;
	// given context "ctx" is canceled or timed out. "LeaseKeepAliveResponse"
	// from this closed channel is nil.
	//
	// If client keep alive loop halts with an unexpected error (e.g. "etcdserver:
	// no leader") or canceled by the caller (e.g. context.Canceled), the error
	// is returned. Otherwise, it retries.
	//
	// TODO(v4.0): post errors to last keep alive message before closing
	// (see https://github.com/coreos/etcd/pull/7866)
	KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)

	// KeepAliveOnce renews the lease once. The response corresponds to the
	// first message from calling KeepAlive. If the response has a recoverable
	// error, KeepAliveOnce will retry the RPC with a new keep alive message.
	//
	// In most of the cases, Keepalive should be used instead of KeepAliveOnce.
	KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)

	// Close releases all resources Lease keeps for efficient communication
	// with the etcd server.
	Close() error
}



var (
	client *clientv3.Client
	err    error
	conf   = clientv3.Config{
		Endpoints:   []string{"localhost:2379"},
		DialTimeout: 5 * time.Second,
	}
	getResp *clientv3.GetResponse
	putResp *clientv3.PutResponse
	// lease   *clientv3.Lease

	keepResp <-chan *clientv3.LeaseKeepAliveResponse
)

func Test_ExampleLease(t *testing.T) {
	if client, err = clientv3.New(conf); err != nil {
		t.Log(err)
		return
	}
	defer client.Close()
	_ = client
	lease := clientv3.NewLease(client)
	resp, err := lease.Grant(context.TODO(), 10) //10 seconds
	if err != nil {
		panic(err)
	}
	go func() {
		for j := 0; ; j++ {
			time.Sleep(time.Second * 1)
			t.Logf("过去了 %d秒", j)
		}
	}()
	// fmt.Println(resp)
	var leaseID = resp.ID

	// // 6秒后手动停止续租
	timeoutCtx, cancelFunc := context.WithTimeout(context.TODO(), 5*time.Second)
	defer cancelFunc()
	// 调用 etcd 接口进行自动续租
	if keepResp, err = lease.KeepAlive(timeoutCtx, leaseID); err != nil { // 5秒自动退出续期,
		//整体时间为 10秒基本生命期+ 5秒续期 =  15秒
		t.Error(err)
		return
	}
	// defer cancelFunc()
	go func() {
		for {
			select {
			case keepresp := <-keepResp:
				if keepresp == nil {
					// 租约失效了!
					t.Logf("租约已经失效了")
					goto END
				} else {
					t.Logf("收到续租应答 %v", keepresp.ID)
				}
			}
		}
	END:
	}()
	//只读channel

	kv := clientv3.NewKV(client)

	if resp, err := kv.Put(context.TODO(), "/tree/lock/job1", "bb", clientv3.WithLease(leaseID)); err != nil {
		t.Error(err)
		return
	} else {
		t.Log("写入成功", resp)
	}
	for {
		if getResp, err = kv.Get(context.TODO(), "/tree/lock/job1"); err != nil {
			t.Error(err)
			return
		}
		if getResp.Count == 0 {
			t.Log("kv 过期")
			break
		} else {
			t.Log(getResp.Kvs)
		}

		time.Sleep(time.Second * 2)
	}

}

利用 watch 接口感知 key 变化

type Watcher interface {
	// Watch watches on a key or prefix. The watched events will be returned
	// through the returned channel. If revisions waiting to be sent over the
	// watch are compacted, then the watch will be canceled by the server, the
	// client will post a compacted error watch response, and the channel will close.
	// If the context "ctx" is canceled or timed out, returned "WatchChan" is closed,
	// and "WatchResponse" from this closed channel has zero events and nil "Err()".
	// The context "ctx" MUST be canceled, as soon as watcher is no longer being used,
	// to release the associated resources.
	//
	// If the context is "context.Background/TODO", returned "WatchChan" will
	// not be closed and block until event is triggered, except when server
	// returns a non-recoverable error (e.g. ErrCompacted).
	// For example, when context passed with "WithRequireLeader" and the
	// connected server has no leader (e.g. due to network partition),
	// error "etcdserver: no leader" (ErrNoLeader) will be returned,
	// and then "WatchChan" is closed with non-nil "Err()".
	// In order to prevent a watch stream being stuck in a partitioned node,
	// make sure to wrap context with "WithRequireLeader".
	//
	// Otherwise, as long as the context has not been canceled or timed out,
	// watch will retry on other recoverable errors forever until reconnected.
	//
	// TODO: explicitly set context error in the last "WatchResponse" message and close channel?
	// Currently, client contexts are overwritten with "valCtx" that never closes.
	// TODO(v3.4): configure watch retry policy, limit maximum retry number
	// (see https://github.com/etcd-io/etcd/issues/8980)
	Watch(ctx context.Context, key string, opts ...OpOption) WatchChan

	// RequestProgress requests a progress notify response be sent in all watch channels.
	RequestProgress(ctx context.Context) error

	// Close closes the watcher and cancels all watch requests.
	Close() error
}

代码样例

func Test_ExampleWatch(t *testing.T) {
	if client, err = clientv3.New(conf); err != nil {
		t.Log(err)
		return
	}
	defer client.Close()
	_ = client
	kv := clientv3.NewKV(client)
	go func() {
		for j := 0; j < 10; j++ {
			kv.Put(context.TODO(), "/tree/job2", strconv.Itoa(j))
			kv.Delete(context.TODO(), "/tree/job2")
			time.Sleep(time.Second * 1)
		}
	}()
	if getResp, err = kv.Get(context.TODO(), "/tree/job2"); err != nil {
		t.Log("当前值 ", getResp.Kvs)
	}
	// etcd 集群事务id,单调递增 , 每次修改(put,del)都会+1
	revision := getResp.Header.Revision
	//创建监听器
	watcher := clientv3.NewWatcher(client)
	t.Log("监听版本 ", revision)
	watchchan := watcher.Watch(context.TODO(), "/tree/job2", clientv3.WithRev(revision))
	for resp := range watchchan {
		// 这里的事件是通过打包推送过来的
		for _, evt := range resp.Events {
			switch evt.Type {
			case mvccpb.PUT:
				t.Logf("修改为 %v ,version %v", evt.Kv.Value, evt.Kv.ModRevision)
			case mvccpb.DELETE:
				t.Logf("删除事件 %v", evt.Kv.ModRevision)
			}
		}
	}

}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值