欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

Flume 的安装与使用

程序员文章站 2022-06-14 11:27:52
...

安装步骤

  1. Flume安装地址
1) Flume官网地址
http://flume.apache.org/
2)文档查看地址
http://flume.apache.org/FlumeUserGuide.html
3)下载地址
http://archive.apache.org/dist/flume/
  1. 安装部署
1)将apache-flume-1.7.0-bin.tar.gz上传到linux的/opt/software目录下
2)解压apache-flume-1.7.0-bin.tar.gz到/opt/module/目录下
	[root@hadoop102 software]$ tar -zxvf apache-flume-1.7.0-bin.tar.gz -C /opt/module/
3)修改apache-flume-1.7.0-bin的名称为flume
	[root@hadoop102 software]$ mv apache-flume-1.7.0-bin flume
4)将flume/conf下的flume-env.sh.template文件修改为flume-env.sh,并配置flume-env.sh文件
	[root@hadoop102 conf]$ mv flume-env.sh.template flume-env.sh
	[root@hadoop102 conf]$ vim flume-env.sh
	export JAVA_HOME=/opt/module/jdk1.8.0_144

案例实操

  1. Flume实时读取目录中文件到HDFS
    案例需求:实时监控Hive日志,并上传到HDFS中
创建一个文件
[root@hadoop102 job]$ touch flume-dir-hdfs.conf
打开文件
[root@hadoop102 job]$ vim flume-dir-hdfs.conf
添加如下内容
a3.sources = r3
a3.sinks = k3
a3.channels = c3

# Describe/configure the source
a3.sources.r3.type = spooldir
a3.sources.r3.spoolDir = /opt/module/flume/upload
a3.sources.r3.fileSuffix = .COMPLETED
a3.sources.r3.fileHeader = true
#忽略所有以.tmp结尾的文件,不上传
a3.sources.r3.ignorePattern = ([^ ]*\.tmp)

# Describe the sink
a3.sinks.k3.type = hdfs
a3.sinks.k3.hdfs.path = hdfs://hadoop101:9000/flume/upload/%Y%m%d/%H
#上传文件的前缀
a3.sinks.k3.hdfs.filePrefix = upload-
#是否按照时间滚动文件夹
a3.sinks.k3.hdfs.round = true
#多少时间单位创建一个新的文件夹
a3.sinks.k3.hdfs.roundValue = 1
#重新定义时间单位
a3.sinks.k3.hdfs.roundUnit = hour
#是否使用本地时间戳
a3.sinks.k3.hdfs.useLocalTimeStamp = true

#积攒多少个Event才flush到HDFS一次
a3.sinks.k3.hdfs.batchSize = 100
#设置文件类型,可支持压缩
a3.sinks.k3.hdfs.fileType = DataStream
#多久生成一个新的文件
a3.sinks.k3.hdfs.rollInterval = 30
#设置每个文件的滚动大小大概是128M
a3.sinks.k3.hdfs.rollSize = 134217700
#文件的滚动与Event数量无关
a3.sinks.k3.hdfs.rollCount = 0

# Use a channel which buffers events in memory
a3.channels.c3.type = memory
a3.channels.c3.capacity = 1000
a3.channels.c3.transactionCapacity = 100

# Bind the source and sink to the channel
a3.sources.r3.channels = c3
a3.sinks.k3.channel = c3

Flume 的安装与使用2)执行监控配置

[root@hadoop102 flume]$ bin/flume-ng agent --conf conf/ --name a2 --conf-file job/flume-file-hdfs.conf

3)开启hadoop和hive并操作hive产生日志

[root@hadoop102 hadoop-2.7.2]$ sbin/start-dfs.sh
[root@hadoop102 hadoop-2.7.2]$ sbin/start-yarn.sh

[root@hadoop102 hive]$ bin/hive
  1. 单数据源多出口案例
1)案例需求:使用flume-1监控文件变动,flume-1将变动内容传递给flume-2,flume-2负责存储到HDFS。同时flume-1将变动内容传递给flume-3,flume-3负责输出到local filesystem。
2)实现步骤:
在job目录下创建group1文件夹
[root@hadoop102 job]$ cd group1//opt/module/datas/目录下创建flume3文件夹
[root@hadoop102 datas]$ mkdir flume3

1.创建flume-file-flume.conf
	配置1个接收日志文件的source和两个channel、两个sink,分别输送给flume-flume-hdfs和flume-flume-dir。
创建配置文件并打开
[root@hadoop102 group1]$ touch flume-file-flume.conf
[root@hadoop102 group1]$ vim flume-file-flume.conf
添加如下内容
# Name the components on this agent
a1.sources = r1
a1.sinks = k1 k2
a1.channels = c1 c2
# 将数据流复制给多个channel
a1.sources.r1.selector.type = replicating

# Describe/configure the source
a1.sources.r1.type = exec
a1.sources.r1.command = tail -F /opt/module/hive/logs/hive.log
a1.sources.r1.shell = /bin/bash -c

# Describe the sink
a1.sinks.k1.type = avro
a1.sinks.k1.hostname = hadoop102
a1.sinks.k1.port = 4141

a1.sinks.k2.type = avro
a1.sinks.k2.hostname = hadoop102
a1.sinks.k2.port = 4142

# Describe the channel
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100

a1.channels.c2.type = memory
a1.channels.c2.capacity = 1000
a1.channels.c2.transactionCapacity = 100

# Bind the source and sink to the channel
a1.sources.r1.channels = c1 c2
a1.sinks.k1.channel = c1
a1.sinks.k2.channel = c2

注:Avro是由Hadoop创始人Doug Cutting创建的一种语言无关的数据序列化和RPC框架。
注:RPC(Remote Procedure Call)—远程过程调用,它是一种通过网络从远程计算机程序上请求服务,而不需要了解底层网络技术的协议。
2.创建flume-flume-hdfs.conf
配置上级flume输出的source,输出是到hdfs的sink。
创建配置文件并打开
[root@hadoop102 group1]$ touch flume-flume-hdfs.conf
[root@hadoop102 group1]$ vim flume-flume-hdfs.conf

添加如下内容
# Name the components on this agent
a2.sources = r1
a2.sinks = k1
a2.channels = c1

# Describe/configure the source
a2.sources.r1.type = avro
a2.sources.r1.bind = hadoop102
a2.sources.r1.port = 4141

# Describe the sink
a2.sinks.k1.type = hdfs
a2.sinks.k1.hdfs.path = hdfs://hadoop101:9000/flume2/%Y%m%d/%H
#上传文件的前缀
a2.sinks.k1.hdfs.filePrefix = flume2-
#是否按照时间滚动文件夹
a2.sinks.k1.hdfs.round = true
#多少时间单位创建一个新的文件夹
a2.sinks.k1.hdfs.roundValue = 1
#重新定义时间单位
a2.sinks.k1.hdfs.roundUnit = hour
#是否使用本地时间戳
a2.sinks.k1.hdfs.useLocalTimeStamp = true
#积攒多少个Event才flush到HDFS一次
a2.sinks.k1.hdfs.batchSize = 100
#设置文件类型,可支持压缩
a2.sinks.k1.hdfs.fileType = DataStream
#多久生成一个新的文件
a2.sinks.k1.hdfs.rollInterval = 600
#设置每个文件的滚动大小大概是128M
a2.sinks.k1.hdfs.rollSize = 134217700
#文件的滚动与Event数量无关
a2.sinks.k1.hdfs.rollCount = 0
#最小冗余数
a2.sinks.k1.hdfs.minBlockReplicas = 1

# Describe the channel
a2.channels.c1.type = memory
a2.channels.c1.capacity = 1000
a2.channels.c1.transactionCapacity = 100

# Bind the source and sink to the channel
a2.sources.r1.channels = c1
a2.sinks.k1.channel = c1


3.创建flume-flume-dir.conf
配置上级flume输出的source,输出是到本地目录的sink。
创建配置文件并打开
[root@hadoop102 group1]$ touch flume-flume-dir.conf
[root@hadoop102 group1]$ vim flume-flume-dir.conf
添加如下内容
# Name the components on this agent
a3.sources = r1
a3.sinks = k1
a3.channels = c2

# Describe/configure the source
a3.sources.r1.type = avro
a3.sources.r1.bind = hadoop102
a3.sources.r1.port = 4142

# Describe the sink
a3.sinks.k1.type = file_roll
a3.sinks.k1.sink.directory = /opt/module/datas/flume3

# Describe the channel
a3.channels.c2.type = memory
a3.channels.c2.capacity = 1000
a3.channels.c2.transactionCapacity = 100

# Bind the source and sink to the channel
a3.sources.r1.channels = c2
a3.sinks.k1.channel = c2

提示:输出的本地目录必须是已经存在的目录,如果该目录不存在,并不会创建新的目录。
4.执行配置文件
分别开启对应配置文件:flume-flume-dir,flume-flume-hdfs,flume-file-flume。
[root@hadoop102 flume]$ bin/flume-ng agent --conf conf/ --name a3 --conf-file job/group1/flume-flume-dir.conf
[root@hadoop102 flume]$ bin/flume-ng agent --conf conf/ --name a2 --conf-file job/group1/flume-flume-hdfs.conf
[root@hadoop102 flume]$ bin/flume-ng agent --conf conf/ --name a1 --conf-file job/group1/flume-file-flume.conf

5.启动hadoop和hive
[root@hadoop102 hadoop-2.7.2]$ sbin/start-dfs.sh
[root@hadoop102 hadoop-2.7.2]$ sbin/start-yarn.sh
[root@hadoop102 hive]$ bin/hive
hive (default)>

6.检查HDFS上数据

7检查/opt/module/datas/flume3目录中数据
[root@hadoop102 flume3]$ ll
总用量 8
-rw-rw-r--. 1 root root 5942 522 00:09 1526918887550-3
  1. 多数据源汇总案例
    1)案例需求:
    hadoop102上的flume-1监控文件hive.log,
    hadoop103上的flume-2监控某一个端口的数据流,
    flume-1与flume-2将数据发送给hadoop102上的flume-3,flume-3将最终数据打印到控制台
0.准备工作
分发flume
[root@hadoop102 module]$ scp flume
	在hadoop102、hadoop103以及hadoop104的/opt/module/flume/job目录下创建一个group2文件夹
[root@hadoop102 job]$ mkdir group2
[root@hadoop102 job]$ mkdir group2
[root@hadoop103 job]$ mkdir group2
1.创建flume1.conf
配置source用于监控hive.log文件,配置sink输出数据到下一级flume。
在hadoop102上创建配置文件并打开
[root@hadoop102 group2]$ touch flume-file.conf
[root@hadoop102 group2]$ vim flume-file.conf 
添加如下内容
# Name the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1

# Describe/configure the source
a1.sources.r1.type = exec
a1.sources.r1.command = tail -F /opt/module/hive/logs/hive.log
a1.sources.r1.shell = /bin/bash -c

# Describe the sink
a1.sinks.k1.type = avro
a1.sinks.k1.hostname = hadoop102
a1.sinks.k1.port = 4141

# Describe the channel
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100

# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
2.创建flume2.conf
配置source监控端口44444数据流,配置sink数据到下一级flume:
在hadoop104上创建配置文件并打开
[root@hadoop103 group2]$ touch flume2.conf
[root@hadoop103 group2]$ vim flume2.conf
添加如下内容
# Name the components on this agent
a2.sources = r1
a2.sinks = k1
a2.channels = c1

# Describe/configure the source
a2.sources.r1.type = netcat
a2.sources.r1.bind = hadoop102
a2.sources.r1.port = 44444

# Describe the sink
a2.sinks.k1.type = avro
a2.sinks.k1.hostname = hadoop102
a2.sinks.k1.port = 4141

# Use a channel which buffers events in memory
a2.channels.c1.type = memory
a2.channels.c1.capacity = 1000
a2.channels.c1.transactionCapacity = 100

# Bind the source and sink to the channel
a2.sources.r1.channels = c1
a2.sinks.k1.channel = c1
3.创建flume3.conf
配置source用于接收flume1与flume2发送过来的数据流,最终合并后sink到控制台。
在hadoop102上创建配置文件并打开
[root@hadoop102 group2]$ touch flume3.conf
[root@hadoop102 group2]$ vim flume3.conf
添加如下内容
# Name the components on this agent
a3.sources = r1
a3.sinks = k1
a3.channels = c1

# Describe/configure the source
a3.sources.r1.type = avro
a3.sources.r1.bind = hadoop102
a3.sources.r1.port = 4141

# Describe the sink
# Describe the sink
a3.sinks.k1.type = logger

# Describe the channel
a3.channels.c1.type = memory
a3.channels.c1.capacity = 1000
a3.channels.c1.transactionCapacity = 100

# Bind the source and sink to the channel
a3.sources.r1.channels = c1
a3.sinks.k1.channel = c1
4.执行配置文件
分别开启对应配置文件:flume3.conf,flume2.conf,flume1.conf。
[root@hadoop102 flume]$ bin/flume-ng agent --conf conf/ --name a3 --conf-file job/group2/flume3.conf -Dflume.root.logger=INFO,console
[root@hadoop102 flume]$ bin/flume-ng agent --conf conf/ --name a2 --conf-file job/group2/flume2.conf
[root@hadoop103 flume]$ bin/flume-ng agent --conf conf/ --name a1 --conf-file job/group3/flume-file.conf 
5.在hadoop102上向/opt/module目录下的group.log追加内容
[root@hadoop102 module]$ echo 'hello' > group.log
6.在hadoop103上向44444端口发送数据
[root@hadoop103 flume]$ telnet hadoop104 44444
7.  检查数据
相关标签: 大数据