欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

Flink 自定义Sink 之 写入HDFS

程序员文章站 2022-07-14 14:05:42
...

一、pom.xml添加依赖。

<dependency>
  <groupId>org.apache.flink</groupId>
  <artifactId>flink-connector-filesystem_${scala.binary.version}</artifactId>
  <version>${flink.version}</version>
</dependency>
<!-- 访问hdfs -->
<dependency>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-common</artifactId>
    <version>${hadoop.version}</version>
    <scope>provided</scope>
</dependency>
<dependency>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-hdfs</artifactId>
    <version>${hadoop.version}</version>
    <scope>provided</scope>
    <exclusions>
        <exclusion>
            <groupId>xml-apis</groupId>
            <artifactId>xml-apis</artifactId>
        </exclusion>
    </exclusions>
</dependency>

二、HDFS帮助类。

object HdfsUtil {
  private val LOGGER: Logger = LoggerFactory.getLogger(HdfsUtil.getClass)

  /**
    * 是否目录
    * */
  def isDir(hdfs: FileSystem, path: String): Boolean = {
    if(StringUtils.isEmpty(path)) {
      return false
    }

    hdfs.isDirectory(new Path(path))
  }

  /**
    * 是否目录
    * */
  def isDir(hdfs: FileSystem, path: Path): Boolean = {
    hdfs.isDirectory(path)
  }

  /**
    * 是否文件
    * */
  def isFile(hdfs: FileSystem, path: String): Boolean = {
    if(StringUtils.isEmpty(path)) {
      return false
    }

    hdfs.isFile(new Path(path))
  }

  /**
    * 是否文件
    * */
  def isFile(hdfs: FileSystem, path: Path): Boolean = {
    hdfs.isFile(path)
  }

  /**
    * 创建文件
    * */
  def createFile(hdfs: FileSystem, path: String): Boolean = {
    if(StringUtils.isEmpty(path)) {
      return false
    }

    hdfs.createNewFile(new Path(path))
  }

  /**
    * 创建文件
    * */
  def createFile(hdfs: FileSystem, path: Path): Boolean = {
    hdfs.createNewFile(path)
  }

  /**
    * 创建目录
    * */
  def createDir(hdfs: FileSystem, path: String): Boolean = {
    if(StringUtils.isEmpty(path)) {
      return false
    }

    hdfs.mkdirs(new Path(path))
  }

  /**
    * 创建目录
    * */
  def createDir(hdfs: FileSystem, path: Path): Boolean = {
    hdfs.mkdirs(path)
  }

  /**
    * 文件是否存在
    * */
  def exists(hdfs: FileSystem, path: String): Boolean = {
    if(StringUtils.isEmpty(path)) {
      return false
    }

    hdfs.exists(new Path(path))
  }

  /**
    * 文件是否存在
    * */
  def exists(hdfs: FileSystem, path: Path): Boolean = {
    hdfs.exists(path)
  }

  /**
    * 删除
    * */
  def delete(hdfs: FileSystem, path: String): Boolean = {
    if(StringUtils.isEmpty(path)) {
      return false
    }

    hdfs.delete(new Path(path), true)
  }

  /**
    * 删除
    * */
  def delete(hdfs: FileSystem, path: Path): Boolean = {
    hdfs.delete(path, true)
  }

  /**
    * 追加写入
    * */
  def append(hdfs: FileSystem, path: String, content: String): Boolean = {
    if(StringUtils.isEmpty(path) || content == null) {
      return false
    }

    if(!exists(hdfs, path)) {
      createFile(hdfs, path)
    }
    hdfs.getConf.setBoolean("dfs.support.append", true)

    try {
      val append = hdfs.append(new Path(path))
      append.write(content.getBytes("UTF-8"))
      append.write(10) // 换行
      append.flush()
      append.close()
      true
    } catch {
      case e: Exception => {
        LOGGER.error(s"append file exception, path{$path}, content{$content}", e)
        false
      }
    }
  }

  /**
    * 读取文件
    * */
  def read(hdfs: FileSystem, file: String): Array[Byte] = {
    val result = new Array[Byte](0)
    if(StringUtils.isEmpty(file)) {
      return result
    }
    if(exists(hdfs, file)) {
      return result
    }

    var isr: InputStreamReader = null
    var br: BufferedReader = null
    try {
      val path = new Path(file)
      val inputStream = hdfs.open(path)
      isr = new InputStreamReader(inputStream)
      br = new BufferedReader(isr)

      var content = new StringBuilder
      var line: String = br.readLine()
      while (line != null) {
        content ++= line
        line = br.readLine()
      }

      br.close()
      isr.close()

      content.toString().getBytes("UTF-8")
    } catch {
      case e: Exception => {
        LOGGER.error(s"read file exception, file{$file}", e)
        result
      }
    } finally {
      try {
        isr.close()
      } catch {
        case _: Exception => {}
      }
      try {
        br.close()
      } catch {
        case _: Exception => {}
      }
    }
  }

  /**
    * 上传本地文件
    * */
  def uploadLocalFile(hdfs: FileSystem, localPath: String, hdfsPath: String): Boolean = {
    if(StringUtils.isEmpty(localPath) || StringUtils.isEmpty(hdfsPath)) {
      return false
    }

    val src = new Path(localPath)
    val dst = new Path(hdfsPath)
    hdfs.copyFromLocalFile(src, dst)
    true
  }

  /**
    * 列出目录下所有文件
    * */
  def list(hdfs: FileSystem, path: String): List[String] = {
    val list: List[String] = new ArrayList[String]()
    if(StringUtils.isEmpty(path)) {
      return list
    }

    val stats = hdfs.listStatus(new Path(path))
    for(i <- 0 to stats.length-1) {
      if(stats(i).isFile) {
        // path.getName,只是文件名,不包括路径
        // path.getParent,只是父文件的文件名,不包括路径
        // path.toString,完整的路径名
        list.add(stats(i).getPath.toString)
      } else if(stats(i).isDirectory) {
        list.add(stats(i).getPath.toString)
      } else if(stats(i).isSymlink) {
        list.add(stats(i).getPath.toString)
      }
    }

    list
  }

三、自定义Sink。

import java.util.{Date, Properties}

import com.igg.flink.tool.common.Constant
import com.igg.flink.tool.member.bean.MemberLogInfo
import com.igg.flink.tool.utils.{DateUtil, FileUtil, HdfsUtil}
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.functions.sink.{RichSinkFunction, SinkFunction}
import org.apache.hadoop.fs.FileSystem

class HdfsSink extends RichSinkFunction[MemberLogInfo] {
  private val serialVersionUID = 10000L

  var default_fs: String = null
  var conf: org.apache.hadoop.conf.Configuration = null
  var fs: FileSystem = null
  var rabbitmqProps: Properties = null

  override def open(parameters: Configuration): Unit = {
    // 加载配置文件
    rabbitmqProps = FileUtil.loadProperties(Constant.CONFIG_NAME_RABBITMQ)
    default_fs = rabbitmqProps.getProperty("hdfs.fs.default.fs")

    conf = new org.apache.hadoop.conf.Configuration()
    // 指定使用 hdfs 系统, namenode 所在节点
    // 9000端口用于Filesystem metadata operations
    conf.set("fs.defaultFS", default_fs)
    // 禁用 hdfs 的fs等缓存
    // conf.setBoolean("fs.hdfs.impl.disable.cache", true)
    fs = FileSystem.get(conf)

    // 初始化目录
    HdfsUtil.createDir(fs, rabbitmqProps.getProperty("hdfs.save.path.login"))
  }

  override def invoke(value: MemberLogInfo, context: SinkFunction.Context[_]): Unit = {
    // 文件名
    val fileName = DateUtil.format(new Date(value.getTimestamp), DateUtil.yyyyMMddSpt) + ".log"
    // 输出内容
    val content = value.getIggid + "\t" + value.getType

    HdfsUtil.append(fs, rabbitmqProps.getProperty("hdfs.save.path.login") + fileName, content)
  }

  override def close(): Unit = {
    if (fs != null) {
      try {
        fs.close()
      } catch {
        case e: Exception => {}
      }
    }
  }
}

四、总结。

(1)自定义sink,只需继承 SinkFunction 或 RichSinkFunction 即可。区别在于 RichSinkFunction 提供了更加丰富的方法,如open(), close()等方法。

(2)写入hdfs,使用了原生的hdfs接口。只适合单线程,不适合多线程写入。

(3)如果要多线程写入hdfs,推荐使用 StreamFileSink 或 BucketingSink 写入HDFS。如何使用,可查阅我之前写的文章。

 

如果有写的不对的地方,欢迎大家指正。有什么疑问,欢迎加QQ群:176098255

作者:magic_kid_2010,如果觉得笔者的博客对您有所帮助,欢迎【犒赏