MIT 6.824 2023 lab1 记录

第一部分  coordinator

coordintor的结构如下,主要负责记录全局信息,协调各种worker(reducer/mapper)

c := Coordinator{
		Stage:                     "Map",
		Map_num:                   len(files),
		Reduce_num:                nReduce,
		Tasks:                     make(map[string]Task),
		Internal_file:             make(map[string][]string),
		Available_tasks:           make(chan Task, int(len(files))),
		Task_finished_map:         make(map[int]bool),
		Task_finished_num:         0,
		Finished_reducce_task:     make(map[int]bool),
		Finished_reducce_task_num: 0,
		Map_timer:                 make(map[int]time.Time),
		Reduce_timer:              make(map[int]time.Time),
		Map_Task:                  make(map[int]Task),
		Reduce_Task:               make(map[int]Task),
	}

因为是分布式系统,所以要来一个背景线程做灾容,然后把crash的task重新放入chan里面。(一开始设计时,把chan里面的包含了Map和Reduce两个task, 因为在切换时很有可能出现一些竞争之类的,我就把它分成了两个chan)设计决定的了bug的多少和定位的难易程度

go func() {
		for {
			time.Sleep(10 * time.Second)
			c.Lock.Lock()
			log.Println("start dead task check")
			if nReduce == c.Finished_reducce_task_num {
				c.Lock.Unlock()
				break
			}
			for id, this_time := range c.Map_timer {
				if c.Map_Task[id].IsPush && !c.Task_finished_map[id] && time.Now().After(this_time.Add(10*time.Second)) {
					log.Println("the map is crash!!!!!!!!!")
					c.Lock.Unlock()
					c.Map_tasks_chan <- c.Map_Task[id]
					c.Lock.Lock()
					delete(c.Map_Task, id)
				}
			}
			log.Println(c.Reduce_timer)
			for id, this_time := range c.Reduce_timer {
				log.Println(c.Reduce_Task[id].IsPush, c.Finished_reducce_task[id])
				if c.Reduce_Task[id].IsPush && !c.Finished_reducce_task[id] && time.Now().After(this_time.Add(10*time.Second)) {
					log.Println("the reduce is crash!!!!!!!!!", id)
					c.Lock.Unlock()
					c.Reduce_tasks_chan <- c.Reduce_Task[id]
					c.Lock.Lock()
					delete(c.Reduce_Task, id)
				}
			}
			c.Lock.Unlock()
			log.Println("start dead task check finish")
		}
	}()

切换阶段的线程

go func() {
		for {
			c.Lock.Lock()
			if c.Stage == "Reduce" {
				map_reduce_bucket := make(map[int]Key_and_files_set)
				for i := 0; i < c.Reduce_num; i++ {
					map1 := make(map[string]bool)
					map2 := make(map[string]bool)
					map_reduce_bucket[i] = Key_and_files_set{Key: map1, Files: map2}
				}
				for key, files := range c.Internal_file {
					reduce_id := ihash(key) % c.Reduce_num
					map_reduce_bucket[reduce_id].Key[key] = true
					for _, file := range files {
						map_reduce_bucket[reduce_id].Files[file] = true
					}

				}
				for i, Key_and_files := range map_reduce_bucket {
					log.Println("reduce task produce ", i)
					// log.Println(this_key_file)
					c.Reduce_tasks_chan <- Task{
						Task_type:  "Reduce",
						Num_reduce: c.Reduce_num,
						Task_id:    i,
						Files_set:  Key_and_files,
						IsPush:     false,
					}
					// c.Reduce_Task[i] = Task{
					// 	Task_type:  "Reduce",
					// 	Num_reduce: c.Reduce_num,
					// 	Task_id:    i,
					// 	Files_set:  Key_and_files,
					// 	IsPush:     true,
					// }
				}
				c.Lock.Unlock()
				break
			}
			c.Lock.Unlock()
			time.Sleep(1000 * time.Millisecond)
		}
	}()
	return &c

RPC调用部分,worker请求,master通过这个函数来分配任务,更新全局信息

func (c *Coordinator) ApplyForTask(args *RpcArgs, replys *RpcReply) error {
	c.Lock.Lock()
	log.Println("RPC Apply")
	if args.Task_finished {
		log.Println("receive reply")
		// conflict let the worker exit
		if args.Type_request != c.Stage || (c.Stage == "Map" && c.Task_finished_map[args.Task_id]) || (c.Stage == "Reduce" && c.Finished_reducce_task[args.Task_id]) {
			log.Println("the stage and task conflict")
			replys.Conflict = true
			c.Lock.Unlock()
			return nil
		}
		// map task finish, update the state
		if args.Type_request == "Map" {
			c.Task_finished_map[args.Task_id] = true
			c.Task_finished_num++
			log.Println("task num and map num", c.Task_finished_num, c.Map_num, args.Task_id)
			for key, files := range args.Internal_file {
				// log.Println(files)
				c.Internal_file[key] = append(c.Internal_file[key], files...)
			}
			if c.Task_finished_num == c.Map_num {
				log.Println("stage change Reduce")
				c.Stage = "Reduce"
			}
			c.Lock.Unlock()
			return nil
		}
		// reduce task finish, update the state
		if args.Type_request == "Reduce" {
			c.Finished_reducce_task[args.Task_id] = true
			c.Finished_reducce_task_num++
			log.Println("reduce finished task", c.Finished_reducce_task_num, args.Task_id)
			c.Lock.Unlock()
			return nil
		}
	}
	if !args.Task_finished {
		if c.Stage == "Map" {
			replys.Task.Task_type = "Map"
			c.Lock.Unlock()
			log.Println("the block is ", c.Task_finished_num)
			replys.Task = <-c.Map_tasks_chan
			log.Println("here1")
			c.Lock.Lock()
			log.Println("reach here")
			c.Map_timer[replys.Task.Task_id] = time.Now()
			replys.Task.IsPush = true
			c.Map_Task[replys.Task.Task_id] = replys.Task
			c.Lock.Unlock()

			log.Println("the finished block is ", c.Task_finished_num)
			replys.Is_success = true
			return nil
		}
		if c.Stage == "Reduce" {
			log.Println("Reduce dispatch")
			replys.Task.Task_type = "Reduce"
			c.Lock.Unlock()
			replys.Task = <-c.Reduce_tasks_chan
			log.Println("here1!")
			c.Lock.Lock()
			log.Println("reach here!!")
			log.Println("here is task id", replys.Task.Task_id)
			replys.Task.IsPush = true
			c.Reduce_timer[replys.Task.Task_id] = time.Now()
			c.Reduce_Task[replys.Task.Task_id] = replys.Task
			log.Println("the finished block is ", c.Finished_reducce_task_num)
			c.Lock.Unlock()
			replys.Is_success = true
			return nil
		}
	}
	c.Lock.Unlock()
	return nil
}

 第二部分 worker 

分为两个阶段1.发出请求2.提交完成信息

Reduce通过先拿到key,再扫描所得文件一次,就行了(之前有多少key扫多少次太慢了)

json.encode 和 json.code 使得文件可以方便以key value读取。在文件中以{key xxxx: , value : xxxx}形式存储,到reduce-out-x时是Fprintf %v %v方式来进行

func Worker(mapf func(string, string) []KeyValue,
	reducef func(string, []string) string) {
	log.Println("worker start")
	// Your worker implementation here.
	for {
		args := RpcArgs{
			Task_finished: false,
		}
		replys := RpcReply{}
		ok := call("Coordinator.ApplyForTask", &args, &replys)
		// the position may not be suitable
		if replys.Conflict {
			log.Fatal("confilct exit")
			os.Exit(0)
		}
		if !ok {
			log.Fatal("Rpc call failed")
			os.Exit(0)
		}
		if !replys.Is_success {
			log.Println("reply is not success")
			os.Exit(0)
		}
		if replys.Task.Task_type == "Map" {
			log.Println("Start Map worker")
			intermediate := []KeyValue{}
			file, err := os.Open(replys.Task.File_name)
			log.Println(replys.Task.File_name)
			if err != nil {
				log.Fatalf("cannot open %v\n", replys.Task.Task_type)
			}
			content, err := ioutil.ReadAll(file)
			if err != nil {
				log.Fatalf("cannot read %v", replys.Task.File_name)
				os.Exit(0)
			}
			file.Close()
			kva := mapf(replys.Task.File_name, string(content))
			intermediate = append(intermediate, kva...)
			sort.Sort(ByKey(intermediate))
			i := 0
			var key_and_file_name []Key_file
			log.Println("i max is", len(intermediate))
			reduce_file_map := make(map[int]bool)
			reduce_json := make(map[int]*json.Encoder)
			reduce_file := make(map[int]*os.File)
			for i < len(intermediate) {
				j := i + 1
				for j < len(intermediate) && intermediate[j].Key == intermediate[i].Key {
					j++
				}
				var files []string
				values := []string{}
				for k := i; k < j; k++ {
					values = append(values, intermediate[k].Value)
				}
				reduce_id := ihash(intermediate[i].Key) % replys.Task.Num_reduce
				file_name := fmt.Sprintf("mr-%d-%d", replys.Task.Task_id, reduce_id)
				var file *os.File
				var ok error
				if !reduce_file_map[reduce_id] {
					log.Println("reduce_id ", reduce_id)
					reduce_file_map[reduce_id] = true
					// file, ok = os.Create(file_name)
					file, ok = ioutil.TempFile(".", "*")
					reduce_file[reduce_id] = file
					// to do temp file
					if ok != nil {
						log.Fatalln("os Create fatal ", replys.Task.Task_id, err)
					}
					reduce_json[reduce_id] = json.NewEncoder(file)
				}
				files = append(files, file_name)
				//log.Printf("create mr-%d-%d", replys.Task.Task_id, reduce_id)
				enc := reduce_json[reduce_id]
				for _, value := range values {
					err := enc.Encode(&KeyValue{
						Key:   intermediate[i].Key,
						Value: value,
					})
					if err != nil {
						log.Fatalln("encode fail", err)
					}
				}
				append_item := Key_file{
					Key:   intermediate[i].Key,
					Files: files,
				}
				key_and_file_name = append(key_and_file_name, append_item)
				i = j
			}
			for reduce_this_id, is_true := range reduce_file_map {
				if is_true {
					reduce_file[reduce_this_id].Close()
				}
			}
			internal_files := make(map[string][]string)
			for _, this_kv := range key_and_file_name {
				key := this_kv.Key
				files := this_kv.Files
				internal_files[key] = append(internal_files[key], files...)
			}
			for my_reduce_id, file_io := range reduce_file {
				s := fmt.Sprintf("./mr-%d-%d", replys.Task.Task_id, my_reduce_id)
				log.Println(s)
				os.Rename(file_io.Name(), s)
			}
			args := RpcArgs{
				Type_request:  "Map",
				Task_finished: true,
				Task_id:       replys.Task.Task_id,
				Internal_file: internal_files,
			}
			// rename the file
			replys := RpcReply{}
			ok := call("Coordinator.ApplyForTask", &args, &replys)
			if !ok {
				log.Fatalln("rpc fail when map finish")
				os.Exit(0)
			}
			log.Println("map worker reply the master")
			continue

		}
		if replys.Task.Task_type == "Reduce" {
			log.Println("Start Reduce worker")
			var err error
			var file_io *os.File
			defer file_io.Close()
			reduce_map := replys.Task.Files_set
			Key_Value := make(map[string][]string)
			reduce_id := replys.Task.Task_id
			// file_name := fmt.Sprintf("mr-out-%d", reduce_id)
			file_io, err = ioutil.TempFile(".", "*")
			// file_io, err = os.Create(file_name)
			if err != nil {
				log.Fatalln("create file fail")
			}
			log.Println("reduce mr-out succeed", reduce_id)
			for file, _ := range reduce_map.Files {
				// log.Println(file_names)
				file_io, err := os.Open(file)
				if err != nil {
					log.Fatalln("open fail")
				}
				dec := json.NewDecoder(file_io)
				for {
					var kv KeyValue
					if err := dec.Decode(&kv); err != nil {
						break
					}
					if reduce_map.Key[kv.Key] {
						Key_Value[kv.Key] = append(Key_Value[kv.Key], kv.Value)
					}
				}
			}
			for Key, Value := range Key_Value {
				result := reducef(Key, Value)
				fmt.Fprintf(file_io, "%v %v\n", Key, result)
			}
			// log.Println(Key, "+", values)
			// enc := json.NewEncoder(file_io)
			// result_kv := KeyValue{
			// 	Key:   Key,
			// 	Value: result,
			// }
			// enc.Encode(&result_kv)
			log.Println("the reduce rename !!!!  ", reduce_id)
			os.Rename(file_io.Name(), fmt.Sprintf("./mr-out-%d", reduce_id))
			return_args := RpcArgs{
				Type_request:  "Reduce",
				Task_finished: true,
				Task_id:       replys.Task.Task_id,
			}
			log.Println("reduce finish")
			return_replys := RpcReply{}
			ok1 := call("Coordinator.ApplyForTask", &return_args, &return_replys)
			if !ok1 {
				log.Fatalln("rpc fail when Reduce finish")
				os.Exit(0)
			}
			log.Println("Reducer worker reply the master")
			// uncomment to send the Example RPC to the coordinator.
			// CallExample()
			continue
		}
	}

}

第三部分 debug收获和未解决的bug

channel本来就是阻塞的,不能上锁,不如阻塞在锁里,别人拿不到锁。go的map的value是struct时,又要改变struct里面值的时候,我们就应该把struct改为指针或者是把struct整个

多个RPC请求阻塞在channel(一个)

bug已解决

reducef是根据key来判断是不是2?(parallel reduce too less) 

select {
			case replys.Task = <-c.Reduce_tasks_chan:
			case <-time.After(10 * time.Second): // 超时10秒没有获得数据,则退出程序,如果只是退出循环,可以return改为continue
				log.Println("More than 10 second no input, return!!!!!!!!!")
				replys.Conflict = true
				return nil
			}

conflict ? continue = fatal ????应该吧,可能效果一样

 参考文章:http://www.youngzy.com/blog/2022/07/mit-6-824-lab-mr-hints-2022/

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值