一、服务端代码实现

1、定义接口

package cn.sjq.rpc.java;

import org.apache.hadoop.ipc.VersionedProtocol;

/**

* 定义接口IMyNameNode并继承org.apache.hadoop.ipc.VersionedProtocol接口

* 本接口主要模拟定义自定义的Hadoop RPC通信,模拟Namenode节点在HDFS创建元数据(创建目录)、浏览元数据(浏览目录、文件)

* @author songjq

*

*/

public interface IMyNameNode extends VersionedProtocol {

/*

* 定义ID号 定义一个签名,通过这个ID,就能区分在客户端调用的时候,具体调用哪个实现 要求:名称必须叫versionID

*/

public static long versionID = 1l;

/*

* 创建目录

*/

public String createForder(String dir) throws Exception;

/*

* 浏览目录,包括子目录

*/

public String listForder(String dir)  throws Exception;

}

2、实现接口

package cn.sjq.rpc.java;

import java.io.IOException;

import java.net.URI;

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.FileSystem;

import org.apache.hadoop.fs.LocatedFileStatus;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.fs.RemoteIterator;

import org.apache.hadoop.fs.permission.FsAction;

import org.apache.hadoop.fs.permission.FsPermission;

import org.apache.hadoop.ipc.ProtocolSignature;

/**

* IMyNameNode的实现类

* 主要对IMyNameNode定义的方法进行实现

* @author songjq

*

*/

public class MyNameNodeImpl implements IMyNameNode {

/*

* 通过IMyNameNode.versionID构造一个签名

* (non-Javadoc)

* @see org.apache.hadoop.ipc.VersionedProtocol#getProtocolSignature(java.lang.String, long, int)

*/

@Override

public ProtocolSignature getProtocolSignature(String arg0, long arg1, int arg2) throws IOException {

return new ProtocolSignature(IMyNameNode.versionID, null);

}

/* 直接返回IMyNameNode.versionID

* (non-Javadoc)

* @see org.apache.hadoop.ipc.VersionedProtocol#getProtocolVersion(java.lang.String, long)

*/

@Override

public long getProtocolVersion(String arg0, long arg1) throws IOException {

return IMyNameNode.versionID;

}

/*

* 在HDFS上创建目录

* (non-Javadoc)

* @see cn.sjq.rpc.java.IMyNameNode#createForder(java.lang.String)

*/

@Override

public String createForder(String dir) throws Exception{

//获得HDFS客户端连接

Configuration conf = new Configuration();

conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");

FileSystem client = FileSystem.get(new URI("hdfs://hadoop-server01:9000"), conf, "root");

//创建目录

boolean mkdirs = client.mkdirs(new Path(dir));

client.close();

//返回创建结果

return "Direcotory ->\t\t"+dir+"\t\t"+(mkdirs?"successfull created!":"created failed!");

}

/*

* 在HDFS浏览目录

* (non-Javadoc)

* @see cn.sjq.rpc.java.IMyNameNode#listForder(java.lang.String)

*/

@Override

public String listForder(String dir) throws Exception {

// 获得HDFS客户端连接

Configuration conf = new Configuration();

conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");

FileSystem client = FileSystem.get(new URI("hdfs://hadoop-server01:9000"), conf, "root");

RemoteIterator<LocatedFileStatus> listFiles = client.listFiles(new Path(dir), true);

// 定义一个stringbuffer对象接收处理结果

StringBuffer filebuffer = new StringBuffer("Onwer \t\t UserPri \t\t BlockSize \t\t Path \t\t\n");

// 迭代listFiles

while (listFiles.hasNext()) {

LocatedFileStatus file = listFiles.next();

String fname = file.getPath().getName();

String path = file.getPath().toString();

String owner = file.getOwner();

long blockSize = file.getBlockSize();

FsPermission permission = file.getPermission();

FsAction userAction = permission.getUserAction();

FsAction groupAction = permission.getGroupAction();

FsAction otherAction = permission.getOtherAction();

userAction.toString();

filebuffer.append(owner).append(" \t\t").append(" ").append(userAction.toString()).append(" \t\t ")

.append(blockSize).append(" \t\t ").append(path).append(" \t\t").append("\n");

}

client.close();

return filebuffer.toString();

}

}

3、构建RPC通信服务

package cn.sjq.rpc.java;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.ipc.RPC;

import org.apache.hadoop.ipc.RPC.Builder;

import org.apache.hadoop.ipc.RPC.Server;

/**

* 构造RPC通信程序,并将业务类注册到RPC通信服务中

* @author songjq

*

*/

public class MyRpcInstance {

public static void main(String[] args) throws Exception, IOException {

//创建hadoop RPC通信builder

Builder builder = new RPC.Builder(new Configuration());

//设置RPC通信地址

builder.setBindAddress("hadoop-server01");

//设置RPC通信端口

builder.setPort(9090);

//将程序IMyNameNode部署到RPC server上

builder.setProtocol(IMyNameNode.class);

//将IMyNameNode接口实现也部署到RPC server上

builder.setInstance(new MyNameNodeImpl());

//构建一个RPC server

Server server = builder.build();

//启动RPC通信服务

server.start();

System.out.println("******* RPC Server has been started...  *********");

}

}

二、客户端代码实现

1、定义接口

package cn.sjq.rpc.java;

import org.apache.hadoop.ipc.VersionedProtocol;

/**

* RPC客户端定义接口,该接口需要和服务端定义的IMyNameNode完全一致

*/

public interface IMyNameNode extends VersionedProtocol {

/*

* 定义ID号 定义一个签名,通过这个ID,就能区分在客户端调用的时候,具体调用哪个实现 要求:名称必须叫versionID

*/

public static long versionID = 1l;

/*

* 创建目录

*/

public String createForder(String dir) throws Exception;

/*

* 浏览目录,包括子目录

*/

public String listForder(String dir)  throws Exception;

}

2、客户端代理调用

package cn.sjq.rpc.java;

import java.io.IOException;

import java.net.InetSocketAddress;

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.ipc.RPC;

/**

* RPC客户端调用,这个使用的RPC的动态代理实现对RPC服务端相关方法的访问

*

* @author songjq

*

*/

public class MyRpcClientInstance {

/**

* 通过RPC调用Server端的功能,拿到是一个代理对象 protocol 服务端部署的接口 clientVersion 服务端部署的版本号ID addr

* 服务端RPC监听通信地址及端口 conf Hdfs的一个configuration实例对象

*

* @param args

* @throws Exception

*/

public static void main(String[] args) throws Exception {

IMyNameNode proxy = RPC.getProxy(IMyNameNode.class,

1l,

new InetSocketAddress("hadoop-server01", 9090),

new Configuration());

String createForder = proxy.createForder("/rpc/20180720");

System.out.println("****************************创建目录**********************************");

System.out.println(createForder);

String listForder = proxy.listForder("/user");

System.out.println("****************************浏览目录**********************************");

System.out.println(listForder);

}

}