理解了rpc实现原理就可以很容易看懂hadoop rpc。hadoop rpc类在hadoop-common包中的org.apache.hadoop.ipc下。我们看下使用hadoop rpc 编写的一个例子:
package com.wf.hadoop.rpc;
public interface ClientProtocol {
long versionID = 1;
String print(String messate);
}
package com.wf.hadoop.rpc;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RPC;
public class Client {
public static void main(String[] args) throws IOException {
//初始化socket连接,获取代理对象
ClientProtocol client = (ClientProtocol) RPC.getProxy(
ClientProtocol.class,
1L,
new InetSocketAddress("localhost", 8090),
new Configuration());
//发送远程执行请求
String str = client.print("rpc request");
System.out.println("client:= "+str);
}
}
package com.wf.hadoop.rpc;
import java.io.IOException;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RPC;
public class RpcServer implements ClientProtocol{
public static void main(String[] args) throws HadoopIllegalArgumentException, IOException {
//初始化服务端
RPC.Server server = new RPC.Builder(new Configuration())
.setBindAddress("localhost")
.setPort(8090)
.setInstance(new RpcServer()) //服务实现
.setProtocol(ClientProtocol.class) //服务协议接口
.build();
System.out.println("服务启动");
server.start();
}
@Override
public String print(String message) {
System.out.println("服务端:"+message);
return "