无论怎么样都必须要先说IPC,即进程间通信。进程间通信的手段包括管道,共享内存,消息队列,socket,信号等等。
jdk中的RMI算是进程间通信的一种标准模式了。下面给出RMI的简单的java代码实现,然后简单的分析一下原理。
package com.dong.istudy.ipc;
import java.rmi.Remote;
import java.rmi.RemoteException;
public interface Hello extends Remote {
public String doSomething(String str) throws RemoteException;
}
package com.dong.istudy.ipc;
import java.rmi.RemoteException;
import java.rmi.server.UnicastRemoteObject;
public class HelloImpl extends UnicastRemoteObject implements Hello {
private static final long serialVersionUID = 1L;
public HelloImpl() throws RemoteException {
super();
}
public String doSomething(String str) {
return "hello" + str;
}
}
import java.rmi.Naming;
import java.rmi.RMISecurityManager;
public class RMIServer {
public static void main(String[] args) throws Exception {
System.setSecurityManager(new RMISecurityManager());
Hello c = new HelloImpl();
Naming.bind("hello", c);
System.out.println("rmi server start ...");
}
}
package com.dong.istudy.ipc;
import java.net.MalformedURLException;
import java.rmi.Naming;
import java.rmi.NotBoundException;
import java.rmi.RMISecurityManager;
import java.rmi.RemoteException;
public class RMIClient {
public static void main(String[] args) throws Exception {
System.setSecurityManager(new RMISecurityManager());
try {
Hello hello = (Hello)Naming.lookup("rmi://192.168.76.112/hello");
// Hello hello = (Hello)Naming.lookup("hello");
String str = hello.doSomething("world");
System.out.println("result : " + str);
} catch (MalformedURLException e) {
e.printStackTrace();
} catch (RemoteException e) {
e.printStackTrace();
} catch (NotBoundException e) {
e.printStackTrace();
}
}
}
代码写完了,然后经过一大堆的jdk当中的命令,然后客户端和服务器端就可以进行通信了,客户端就像调用本地的方法一样。在客户端和服务器端传送的数据格式为vo的序列化和反序列化。
hadoop的RPC,在hdfs中namenode和datanode之间是要进行通信的,比如datanode要定时向namenode发送心跳。在hdfs中利用RPC进行通信的java写法和规则和rmi简直是非常的相似,极大的降低了理解成本。但是namenode和datanode只是在socket的基础上传输的是方法的参数和方法的返回值,在socket的基础上采用的通信协议为google的proto buffer。
package com.dong.hadoop.rpc;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RPC.Server;
public class RpcServer {
public static void main(String[] args) throws IOException {
Server server = new RPC.Builder(new Configuration())
.setProtocol(ClientProtocol.class)
.setInstance(new HelloImpl())
.setBindAddress("127.0.0.1").setPort(8787)
.setNumHandlers(5).build();
server.start();
}
}
package com.dong.hadoop.rpc;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.ipc.RPC;
public class RPCClient {
public static void main(String[] args) throws IOException {
Hello proxy = (Hello) RPC.getProxy(Hello.class,ClientProtocol.versionID,new InetSocketAddress("127.0.0.1",8787),new Configuration());
String result = proxy.doSomething("world");
System.out.println(result);
}
}
package com.dong.hadoop.rpc;
import org.apache.hadoop.ipc.VersionedProtocol;
public interface Hello extends VersionedProtocol {
public String doSomething(String str);
}
package com.dong.hadoop.rpc;
import java.io.IOException;
import org.apache.hadoop.ipc.ProtocolSignature;
public class HelloImpl implements Hello {
private static final long serialVersionUID = 1L;
public HelloImpl() {
super();
}
public String doSomething(String str) {
return "hello" + str;
}
@Override
public ProtocolSignature getProtocolSignature(String arg0, long arg1,
int arg2) throws IOException {
return new ProtocolSignature(serialVersionUID,null);
}
@Override
public long getProtocolVersion(String arg0, long arg1) throws IOException {
return serialVersionUID;
}
}