对于namenode是HA高可用集群时,客户端远程访问hdfs有两种实现方法: (1)将所有关于namenode的参数写入Configuration对象中 (2)将配置文件core-site.xml和hdfs-site.xml文件复制到项目的src目录下
(1)样例代码
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class HADemo {
public static void main(String[] args) {
// TODO Auto-generated method stub
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://my");
conf.set("dfs.nameservices", "my");
conf.set("dfs.ha.namenodes.my", "nn1,nn2");
conf.set("dfs.namenode.rpc-address.my.nn1", "192.168.12.156:8020");
conf.set("dfs.namenode.rpc-address.my.nn2", "192.168.157:8020");
conf.set("dfs.client.failover.proxy.provider.my",
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
try {
FileSystem fs = FileSystem.get(new URI("hdfs://my"), conf,"root");
boolean b=fs.mkdirs(new Path("/a"));
System.out.println(b);
fs.copyFromLocalFile(new Path("D:\\a.txt"), new Path("/a/a.txt"));
fs.close();
} catch (Exception e) {
System.out.println(e);
}
}
}
(2)运行结果
log4j:WARN No appenders could be found for logger (org.apache.hadoop.metrics2.lib.MutableMetricsFactory).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
true
(3)HDFS命令验证
[root@node1 ~]# hdfs dfs -ls /a
Found 2 items
-rw-r--r-- 3 root supergroup 13 2018-08-08 09:11 /a/a.txt
[root@node1 ~]# hdfs dfs -cat /a/a.txt
Hello,Hadoop!
[root@node1 ~]#