csv文件数据批量导入hbase1
程序员文章站
2022-06-11 08:03:43
...
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Date;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
public class UptoHBase {
/**
* @param args
*/
public static void main(String[] args) throws java.io.IOException, InterruptedException,ClassNotFoundException{
// TODO Auto-generated method stub
final Configuration configuration = new Configuration();
// 设置zookeeper
//configuration.set("hbase.zookeeper.quorum", "hadoop1");
configuration.set("hbase.zookeeper.quorum", "localhost");
// 设置hbase表名称
// configuration.set(TableOutputFormat.OUTPUT_TABLE, "wlan_log");
configuration.set(TableOutputFormat.OUTPUT_TABLE, "testhbase");
// 将该值改大,防止hbase超时退出
configuration.set("dfs.socket.timeout", "180000");
final Job job = new Job(configuration, "HBaseBatchImport");
job.setMapperClass(BatchImportMapper.class);
job.setReducerClass(BatchImportReducer.class);
// 设置map的输出,不设置reduce的输出类型
job.setMapOutputKeyClass(LongWritable.class);
job.setMapOutputValueClass(Text.class);
job.setInputFormatClass(TextInputFormat.class);
// 不再设置输出路径,而是设置输出格式类型
job.setOutputFormatClass(TableOutputFormat.class);
// FileInputFormat.setInputPaths(job, "hdfs://hadoop1:9000/input");
FileInputFormat.setInputPaths(job, "hdfs://localhost:9000/user/hadoop/testhbase");
job.waitForCompletion(true);
}
static class BatchImportMapper extends Mapper<LongWritable, Text, LongWritable, Text> {
SimpleDateFormat dateformat1 = new SimpleDateFormat("yyyyMMddHHmmss");
Text v2 = new Text();
protected void map(LongWritable key, Text value, Context context) throws java.io.IOException, InterruptedException {
//final String[] splited = value.toString().split("\t");
final String[] splited = value.toString().split(",");
try {
final Date date = new Date(Long.parseLong(splited[0].trim()));
final String dateFormat = dateformat1.format(date);
//String rowKey = splited[1] + ":" + dateFormat;
String rowKey = splited[0] ;
//第四列的前面一位补充成为 0 凑齐6位数
/*
String str="000000";
splited[3]=str.substring(0,6-splited[3].length())+splited[3];
String rowKey = splited[0] + splited[1]+splited[2]+splited[3];
*/
//v2.set(rowKey + "\t" + value.toString());
v2.set(rowKey + "," + value.toString());
context.write(key, v2);
} catch (NumberFormatException e) {
final Counter counter = context.getCounter("BatchImport","ErrorFormat");
counter.increment(1L);
System.out.println("出错了" + splited[0] + " " + e.getMessage());
}
}
}
static class BatchImportReducer extends TableReducer<LongWritable, Text, NullWritable> {
protected void reduce(LongWritable key,java.lang.Iterable<Text> values, Context context) throws java.io.IOException, InterruptedException {
for (Text text : values) {
// final String[] splited = text.toString().split("\t");
final String[] splited = text.toString().split(",");
final Put put = new Put(Bytes.toBytes(splited[0]));
// put.add(Bytes.toBytes("cf"), Bytes.toBytes("date"), Bytes.toBytes(splited[1]));
put.add(Bytes.toBytes("data"), Bytes.toBytes("h"),Bytes.toBytes(splited[2]));
put.add(Bytes.toBytes("data"), Bytes.toBytes("l"),Bytes.toBytes(splited[3]));
// 省略其他字段,调用put.add(....)即可
context.write(NullWritable.get(), put);
}
}
}
}
转载于:https://my.oschina.net/yonghengilulu/blog/655779