Future<vector<LeafResponse>>fanout(const map<Leaf, LeafReq>&leafToReqMap,
chrono::milliseconds timeout){
vector<Future<LeafResponse>> leafFutures;for(constauto&kv : leafToReqMap){constauto&leaf = kv.first;constauto&leafReq = kv.second;
leafFutures.push_back(// Get the client for this leaf and do the async RPCgetClient(leaf)->futureLeafRPC(leafReq)// If the request times out, use an empty response and move on..onTimeout(timeout,[=]{returnLeafResponse();})// If there's an error (e.g. RPC exception),// use an empty response and move on..onError([=](const exception &e){returnLeafResponse();}));}// Collect all the individual leaf requests into one Futurereturncollect(leafFutures);}// Some sharding function; possibly dependent on previous responses.
map<Leaf, LeafReq>buildLeafToReqMap(const Request &request,const vector<LeafResponse>&responses);// This function assembles our final response.
Response assembleResponse(const Request &request,const vector<LeafResponse>&firstFanoutResponses,const vector<LeafResponse>&secondFanoutResponses);
Future<Response>twoStageFanout(shared_ptr<Request> request){// Stage 1: first fanoutreturnfanout(buildLeafToReqMap(*request,{}), FIRST_FANOUT_TIMEOUT_MS)// Stage 2: With the first fanout completed, initiate the second fanout..then([=](vector<LeafResponse>&responses){auto firstFanoutResponses =
std::make_shared<vector<LeafResponse>>(std::move(responses));// This time, sharding is dependent on the first fanout.returnfanout(buildLeafToReqMap(*request,*firstFanoutResponses),
SECOND_FANOUT_TIMEOUT_MS)// Stage 3: Assemble and return the final response..then([=](const vector<LeafResponse>&secondFanoutResponses){returnassembleResponse(*request,*firstFanoutResponses,
secondFanoutResponses);});});}