HDFS常用API(1)
程序员文章站
2024-02-05 22:01:34
一、HDFS集群API所需要jar包的maven配置信息 二、从HDFS下载数据文件/上传文件到HDFS文件系统 思路:1.获取配置信息 2.设置配置信息(块大小、副本数) 3.构造客户端 4.下载数据文件/上传数据文件 5.关闭资源 (1)下载文件 (2)上传文件 三、对HDFS系统进行操作的AP ......
一、hdfs集群api所需要jar包的maven配置信息
<dependency>
<groupid>org.apache.hadoop</groupid>
<artifactid>hadoop-common</artifactid>
<version>2.8.4</version>
</dependency>
<dependency>
<groupid>org.apache.hadoop</groupid>
<artifactid>hadoop-hdfs</artifactid>
<version>2.8.4</version>
</dependency>
二、从hdfs下载数据文件/上传文件到hdfs文件系统
思路:1.获取配置信息
2.设置配置信息(块大小、副本数)
3.构造客户端
4.下载数据文件/上传数据文件
5.关闭资源
(1)下载文件
/**
* @author: princesshug
* @date: 2019/3/18, 16:10
* @blog: https://www.cnblogs.com/hellobigtable/
*/
public class hdfsclientdemo02 {
public static void main(string[] args) throws urisyntaxexception, ioexception, interruptedexception {
//配置信息对象
configuration conf = new configuration();
//设置具体配置信息
conf.set("dfs.replication","2");
//构造客户端
filesystem fs = filesystem.get(new uri("hdfs://192.168.126.128:9000/"), conf, "root");
//下载数据到本地
fs.copytolocalfile(new path("/words1.txt"),new path("f://words1.txt"));
//关闭资源
fs.close();
system.out.println("下载完成");
}
}
(2)上传文件
/**
* @author: princesshug
* @date: 2019/3/18, 11:53
* @blog: https://www.cnblogs.com/hellobigtable/
*/
public class hdfsclientdemo01 {
public static void main(string[] args) throws urisyntaxexception, ioexception, interruptedexception {
//配置信息
configuration conf = new configuration();
//配置块大小和副本数
conf.set("dfs.blocksize","64m");
conf.set("dfs.replication","2");
//构造客户端
filesystem fs = filesystem.get(new uri("hdfs://192.168.126.128:9000/"), conf, "root");
//上传文件到hdfs客户端
fs.copyfromlocalfile(new path("/root/love.tsv"),new path("/love1.tsv"));
//关闭资源
fs.close();
system.out.println("上传成功!");
}
}
三、对hdfs系统进行操作的api
/**
* @author: princesshug
* @date: 2019/3/18, 16:16
* @blog: https://www.cnblogs.com/hellobigtable/
*/
public class hdfsclientdemo {
private static filesystem fs = null;
static {
configuration conf = new configuration();
conf.set("dfs.blocksize","64m");
conf.set("dfs.replication","3");
try {
fs = filesystem.get(new uri("hdfs://192.168.126.128:9000/"),conf,"root");
} catch (ioexception e) {
e.printstacktrace();
} catch (interruptedexception e) {
e.printstacktrace();
} catch (urisyntaxexception e) {
e.printstacktrace();
}
}
/**
* 创建文件夹方法
* @throws ioexception
*/
public void mkdir(string path) throws ioexception {
fs.mkdirs(new path(path));
fs.close();
}
/**
* 重命名或移动文件
* @param path1
* @param path2
* @throws ioexception
*/
public void hdfsrename(string path1,string path2) throws ioexception {
fs.rename(new path(path1),new path(path2));
fs.close();
}
/**
* 删除文件或文件夹
* @param path 路径
* @throws ioexception
*/
public void delete(string path) throws ioexception {
fs.delete(new path(path),true);
fs.close();
}
/**
* 列出hdfs指定的目录信息
* @param path
* @throws ioexception
*/
public void list(string path) throws ioexception {
remoteiterator<locatedfilestatus> iterator = fs.listfiles(new path(path), true);
while (iterator.hasnext()){
//拿数据
locatedfilestatus status = iterator.next();
system.out.println("文件的路径为:" + status.getpath());
system.out.println("文件的块大小为:" + status.getblocksize());
system.out.println("文件的块信息为:" + arrays.tostring(status.getblocklocations()));
system.out.println("文件的长度为:" + status.getlen());
system.out.println("文件的副本数为:" + status.getreplication());
system.out.println("=====================================================");
}
fs.close();
}
/**
* 判断时文件还是文件夹
* @param parh
* @throws ioexception
*/
public void judgefileordir(string parh) throws ioexception {
//展示状态信息
filestatus[] filestatuses = fs.liststatus(new path(parh));
//遍历所有文件
for (filestatus fs:filestatuses){
if (fs.isfile()){
system.out.println("文件-----f------" + fs.getpath().getname());
}else {
system.out.println("文件-----d------" + fs.getpath().getname());
}
}
}
}
public class hdfsdriver {
public static void main(string[] args) {
hdfsclientdemo hcd = new hdfsclientdemo();
try {
//hcd.mkdir("/wyh");
hcd.judgefileordir("/");
hcd.list("/");
} catch (ioexception e) {
e.printstacktrace();
}
}
}
四、