首页 > 其他分享 >生成HFile以及入库到HBase

生成HFile以及入库到HBase

时间:2023-09-21 10:06:13浏览次数:55  
标签:wl hadoop new org apache import HBase 入库 HFile


一、MR生成HFile文件

package insert.tools.hfile;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.KeyValueSortReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class TestHFileToHBase {

	public static class TestHFileToHBaseMapper extends Mapper {

		@Override
		protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
			String[] values = value.toString().split("/t", 2);
			byte[] row = Bytes.toBytes(values[0]);
			ImmutableBytesWritable k = new ImmutableBytesWritable(row);
			KeyValue kvProtocol = new KeyValue(row, "PROTOCOLID".getBytes(), "PROTOCOLID".getBytes(), values[1]
					.getBytes());
			context.write(k, kvProtocol);

			// KeyValue kvSrcip = new KeyValue(row, "SRCIP".getBytes(),
			// "SRCIP".getBytes(), values[1].getBytes());
			// context.write(k, kvSrcip);
//			 HFileOutputFormat.getRecordWriter 
		}

	}

	public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
		Configuration conf = HBaseConfiguration.create();
		Job job = new Job(conf, "TestHFileToHBase");
		job.setJarByClass(TestHFileToHBase.class);

		job.setOutputKeyClass(ImmutableBytesWritable.class);
		job.setOutputValueClass(KeyValue.class);

		job.setMapperClass(TestHFileToHBaseMapper.class);
		job.setReducerClass(KeyValueSortReducer.class);
//		job.setOutputFormatClass(org.apache.hadoop.hbase.mapreduce.HFileOutputFormat.class);
		job.setOutputFormatClass(HFileOutputFormat.class);
		// job.setNumReduceTasks(4);
		// job.setPartitionerClass(org.apache.hadoop.hbase.mapreduce.SimpleTotalOrderPartitioner.class);

		// HBaseAdmin admin = new HBaseAdmin(conf);
//		HTable table = new HTable(conf, "hua");

		 HFileOutputFormat.configureIncrementalLoad(job, table);

		FileInputFormat.addInputPath(job, new Path(args[0]));
		FileOutputFormat.setOutputPath(job, new Path(args[1]));

		System.exit(job.waitForCompletion(true) ? 0 : 1);
	}

}

 

二、改进后的HFileOutputFormat

源码中的HFileOutputFormat只适合一次生成一个列族的HFile,改进后的HFileOutputFormat适合同时多列族生成HFile文件。有add标签的是在源码上添加代码,有revise标签的是在源码上增加代码。参考:https://review.cloudera.org/r/1272/diff/1/?file=17977#file17977line93

/**
 * Copyright 2009 The Apache Software Foundation
 *
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package insert.tools.hfile;

import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.TreeSet;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.mapreduce.KeyValueSortReducer;
import org.apache.hadoop.hbase.mapreduce.hadoopbackport.TotalOrderPartitioner;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;

import com.google.common.base.Preconditions;

/**
 * Writes HFiles. Passed KeyValues must arrive in order. Currently, can only
 * write files to a single column family at a time. Multiple column families
 * requires coordinating keys cross family. Writes current time as the sequence
 * id for the file. Sets the major compacted attribute on created hfiles.
 * 
 * @see KeyValueSortReducer
 */
public class HFileOutputFormat extends
		FileOutputFormat {
	static Log LOG = LogFactory.getLog(HFileOutputFormat.class);

	public RecordWriter getRecordWriter(
			final TaskAttemptContext context) throws IOException,
			InterruptedException {
		// Get the path of the temporary output file
		final Path outputPath = FileOutputFormat.getOutputPath(context);
		final Path outputdir = new FileOutputCommitter(outputPath, context)
				.getWorkPath();
		Configuration conf = context.getConfiguration();
		final FileSystem fs = outputdir.getFileSystem(conf);
		// These configs. are from hbase-*.xml
		// revise
		// final long maxsize = conf.getLong("hbase.hregion.max.filesize",
		// 268435456);
		// final int blocksize = conf.getInt("hfile.min.blocksize.size", 65536);
		final long maxsize = conf.getLong("hbase.hregion.max.filesize",
				HConstants.DEFAULT_MAX_FILE_SIZE);
		final int blocksize = conf.getInt("hfile.min.blocksize.size",
				HFile.DEFAULT_BLOCKSIZE);
		// -revise

		// Invented config. Add to hbase-*.xml if other than default
		// compression.
		final String compression = conf.get("hfile.compression",
				Compression.Algorithm.NONE.getName());

		return new RecordWriter() {
			// Map of families to writers and how much has been output on the
			// writer.
			private final Map<byte[], WriterLength> writers = new TreeMap<byte[], WriterLength>(
					Bytes.BYTES_COMPARATOR);
			private byte[] previousRow = HConstants.EMPTY_BYTE_ARRAY;
			private final byte[] now = Bytes
					.toBytes(System.currentTimeMillis());
			// add
			private boolean rollRequested = false;

			// -add

			public void write(ImmutableBytesWritable row, KeyValue kv)
					throws IOException {
				// add
				// null input == user explicitly wants to flush
				if (row == null && kv == null) {
					rollWriters();
					return;
				}
				byte[] rowKey = kv.getRow();
				// -add

				long length = kv.getLength();
				byte[] family = kv.getFamily();
				WriterLength wl = this.writers.get(family);
				// revise
				// if (wl == null
				// || ((length + wl.written) >= maxsize)
				// && Bytes.compareTo(this.previousRow, 0,
				// this.previousRow.length, kv.getBuffer(), kv
				// .getRowOffset(), kv.getRowLength()) != 0) {
				// // Get a new writer.
				// Path basedir = new Path(outputdir, Bytes.toString(family));
				// if (wl == null) {
				// wl = new WriterLength();
				// this.writers.put(family, wl);
				// if (this.writers.size() > 1)
				// throw new IOException("One family only");
				// // If wl == null, first file in family. Ensure family
				// // dir exits.
				// if (!fs.exists(basedir))
				// fs.mkdirs(basedir);
				// }
				// wl.writer = getNewWriter(wl.writer, basedir);
				// LOG
				// .info("Writer="
				// + wl.writer.getPath()
				// + ((wl.written == 0) ? "" : ", wrote="
				// + wl.written));
				// wl.written = 0;
				// }

				// If this is a new column family, verify that the directory
				// exists
				if (wl == null) {
					fs.mkdirs(new Path(outputdir, Bytes.toString(family)));
				}
				// If any of the HFiles for the column families has reached
				// maxsize, we need to roll all the writers
				if (wl != null && wl.written + length >= maxsize) {
					this.rollRequested = true;
				}
				// This can only happen once a row is finished though
				if (rollRequested
						&& Bytes.compareTo(this.previousRow, rowKey) != 0) {
					rollWriters();
				}
				// create a new HLog writer, if necessary
				if (wl == null || wl.writer == null) {
					wl = getNewWriter(family);
				}
				// we now have the proper HLog writer. full steam ahead

				// -revise

				kv.updateLatestStamp(this.now);
				wl.writer.append(kv);
				wl.written += length;
				// Copy the row so we know when a row transition.
				// revise
				// this.previousRow = kv.getRow();
				this.previousRow = rowKey;
				// -revise
			}

			// revise
			// /*
			// * Create a new HFile.Writer. Close current if there is one.
			// *
			// * @param writer
			// *
			// * @param familydir
			// *
			// * @return A new HFile.Writer.
			// *
			// * @throws IOException
			// */
			// private HFile.Writer getNewWriter(final HFile.Writer writer,
			// final Path familydir) throws IOException {
			// close(writer);
			// return new HFile.Writer(fs, StoreFile.getUniqueFile(fs,
			// familydir), blocksize, compression,
			// KeyValue.KEY_COMPARATOR);
			// }
			private void rollWriters() throws IOException {
				for (WriterLength wl : this.writers.values()) {
					if (wl.writer != null) {
						LOG.info("Writer="
								+ wl.writer.getPath()
								+ ((wl.written == 0) ? "" : ", wrote="
										+ wl.written));
						close(wl.writer);
					}
					wl.writer = null;
					wl.written = 0;
				}
				this.rollRequested = false;
			}

			/*
			 * Create a new HFile.Writer.
			 * 
			 * @param family
			 * 
			 * @return A WriterLength, containing a new HFile.Writer.
			 * 
			 * @throws IOException
			 */
			private WriterLength getNewWriter(byte[] family) throws IOException {
				WriterLength wl = new WriterLength();
				Path familydir = new Path(outputdir, Bytes.toString(family));
				wl.writer = new HFile.Writer(fs, StoreFile.getUniqueFile(fs,
						familydir), blocksize, compression,
						KeyValue.KEY_COMPARATOR);
				this.writers.put(family, wl);
				return wl;
			}

			// -revise

			private void close(final HFile.Writer w) throws IOException {
				if (w != null) {
					w.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes
							.toBytes(System.currentTimeMillis()));
					w.appendFileInfo(StoreFile.BULKLOAD_TASK_KEY, Bytes
							.toBytes(context.getTaskAttemptID().toString()));
					w.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY, Bytes
							.toBytes(true));
					w.close();
				}
			}

			// revise
			// public void close(TaskAttemptContext c) throws IOException,
			// InterruptedException {
			// for (Map.Entry e : this.writers
			// .entrySet()) {
			// close(e.getValue().writer);
			// }
			// }
			public void close(TaskAttemptContext c) throws IOException,
					InterruptedException {
				for (WriterLength wl : this.writers.values()) {
					close(wl.writer);
				}
			}
			// -revise
		};
	}

	/*
	 * Data structure to hold a Writer and amount of data written on it.
	 */
	static class WriterLength {
		long written = 0;
		HFile.Writer writer = null;
	}

	/**
	 * Return the start keys of all of the regions in this table, as a list of
	 * ImmutableBytesWritable.
	 */
	private static List getRegionStartKeys(HTable table)
			throws IOException {
		byte[][] byteKeys = table.getStartKeys();
		ArrayList ret = new ArrayList(
				byteKeys.length);
		for (byte[] byteKey : byteKeys) {
			ret.add(new ImmutableBytesWritable(byteKey));
		}
		return ret;
	}

	/**
	 * Write out a SequenceFile that can be read by TotalOrderPartitioner that
	 * contains the split points in startKeys.
	 * 
	 * @param partitionsPath
	 *            output path for SequenceFile
	 * @param startKeys
	 *            the region start keys
	 */
	private static void writePartitions(Configuration conf,
			Path partitionsPath, List startKeys)
			throws IOException {
		Preconditions.checkArgument(!startKeys.isEmpty(), "No regions passed");

		// We're generating a list of split points, and we don't ever
		// have keys < the first region (which has an empty start key)
		// so we need to remove it. Otherwise we would end up with an
		// empty reducer with index 0
		TreeSet sorted = new TreeSet(
				startKeys);

		ImmutableBytesWritable first = sorted.first();
		Preconditions
				.checkArgument(
						first.equals(HConstants.EMPTY_BYTE_ARRAY),
						"First region of table should have empty start key. Instead has: %s",
						Bytes.toStringBinary(first.get()));
		sorted.remove(first);

		// Write the actual file
		FileSystem fs = partitionsPath.getFileSystem(conf);
		SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf,
				partitionsPath, ImmutableBytesWritable.class,
				NullWritable.class);

		try {
			for (ImmutableBytesWritable startKey : sorted) {
				writer.append(startKey, NullWritable.get());
			}
		} finally {
			writer.close();
		}
	}

	/**
	 * Configure a MapReduce Job to perform an incremental load into the given
	 * table. This
	 * 

	 * 
Inspects the table to configure a total order partitioner
	 * 
Uploads the partitions file to the cluster and adds it to the
	 * DistributedCache
	 * 
Sets the number of reduce tasks to match the current number of
	 * regions
	 * 
Sets the output key/value class to match HFileOutputFormat's
	 * requirements
	 * 
Sets the reducer up to perform the appropriate sorting (either
	 * KeyValueSortReducer or PutSortReducer)
	 * 

	 * The user should be sure to set the map output value class to either
	 * KeyValue or Put before running this function.
	 */
	public static void configureIncrementalLoad(Job job, HTable table)
			throws IOException {
		Configuration conf = job.getConfiguration();
		job.setPartitionerClass(TotalOrderPartitioner.class);
		job.setOutputKeyClass(ImmutableBytesWritable.class);
		job.setOutputValueClass(KeyValue.class);
		job.setOutputFormatClass(HFileOutputFormat.class);

		// Based on the configured map output class, set the correct reducer to
		// properly
		// sort the incoming values.
		// TODO it would be nice to pick one or the other of these formats.
		if (KeyValue.class.equals(job.getMapOutputValueClass())) {
			job.setReducerClass(KeyValueSortReducer.class);
		} else if (Put.class.equals(job.getMapOutputValueClass())) {
			job.setReducerClass(PutSortReducer.class);
		} else {
			LOG.warn("Unknown map output value type:"
					+ job.getMapOutputValueClass());
		}

		LOG.info("Looking up current regions for table " + table);
		List startKeys = getRegionStartKeys(table);
		LOG.info("Configuring " + startKeys.size() + " reduce partitions "
				+ "to match current region count");
		job.setNumReduceTasks(startKeys.size());

		Path partitionsPath = new Path(job.getWorkingDirectory(), "partitions_"
				+ System.currentTimeMillis());
		LOG.info("Writing partition information to " + partitionsPath);

		FileSystem fs = partitionsPath.getFileSystem(conf);
		writePartitions(conf, partitionsPath, startKeys);
		partitionsPath.makeQualified(fs);
		URI cacheUri;
		try {
			cacheUri = new URI(partitionsPath.toString() + "#"
					+ TotalOrderPartitioner.DEFAULT_PATH);
		} catch (URISyntaxException e) {
			throw new IOException(e);
		}
		DistributedCache.addCacheFile(cacheUri, conf);
		DistributedCache.createSymlink(conf);

		LOG.info("Incremental table output configured.");
	}

}

 

三、MR生成HFile的注意事项

1. 无论是map还是reduce作为最终的输出结果,输出的key和value的类型应该是: 或者< ImmutableBytesWritable, Put>。

2. Map或者reduce的输出类型是KeyValue 或Put对应KeyValueSortReducer或PutSortReducer。

3. MR例子中job.setOutputFormatClass(HFileOutputFormat.class); HFileOutputFormat是改进后的mr,可适用于多列族同时生成HFile文件,源码中只适合一次对单列族组织成HFile文件。

4. MR例子中HFileOutputFormat.configureIncrementalLoad(job, table);自动对job进行配置,SimpleTotalOrderPartitioner是需要先对key进行整体排序,然后划分到每个reduce中,保证每一个reducer中的的key最小最大值区间范围,是不会有交集的。

因为入库到HBase的时候,作为一个整体的Region,key是绝对有序的。

5. MR例子中最后生成HFile存储在HDFS上,输出路径下的子目录是各个列族。如果对HFile进行入库HBase,相当于move HFile到HBase的Region中,HFile子目录的列族内容没有了。

四、HFile入库到HBase

import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
import org.apache.hadoop.hbase.util.Bytes;

public class TestLoadIncrementalHFileToHBase {

	// private static final byte[] TABLE = Bytes.toBytes("hua");
	// private static final byte[] QUALIFIER = Bytes.toBytes("PROTOCOLID");
	// private static final byte[] FAMILY = Bytes.toBytes("PROTOCOLID");

	public static void main(String[] args) throws IOException {
		Configuration conf = HBaseConfiguration.create();
//		byte[] TABLE = Bytes.toBytes("hua");
		byte[] TABLE = Bytes.toBytes(args[0]);
		HTable table = new HTable(TABLE);
		LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
		loader.doBulkLoad(new Path(args[1]), table);
//		loader.doBulkLoad(new Path("/hua/testHFileResult/"), table);
	}

}

 

五、HFile入库到HBase注意事项

1. 通过HBase中 LoadIncrementalHFiles的doBulkLoad方法,对生成的HFile文件入库,入库的第一个参数是表名,第二个参数是HFile的路径(以上MR生成HFile的输出路径),也可一个个列族录入到HBase中对应的表列族。

2. 如何入库的相关链接:

http://hbase.apache.org/docs/r0.89.20100726/bulk-loads.html

http://hbase.apache.org/docs/r0.20.6/api/org/apache/hadoop/hbase/mapreduce/package-summary.html#bulk

http://genius-bai.javaeye.com/blog/641927

3. 入库分为代码入库以及脚本入库。代码入库有两种,一种是

hadoop jar hbase-VERSION.jar completebulkload /myoutput mytable;

另外一种是通过以上的TestLoadIncrementalHFileToHBase类。

脚本入库为:jruby  $HBASE_HOME/bin/loadtable.rb  hbase-mytable  hadoop-hbase-hfile-outputdir。

标签:wl,hadoop,new,org,apache,import,HBase,入库,HFile
From: https://blog.51cto.com/u_16255870/7548627

相关文章

  • hbase 备份及恢复
    1,hbase自带的备份恢复工具hbaseorg.apache.hadoop.hbase.mapreduce.Export'table1'/home/fred/table1hbaseorg.apache.hadoop.hbase.mapreduce.Import'table1'/home/fred/table1导入时必须先创建表结构。http://www.iteye.com/topic/1114721Java代码......
  • hbase-0.90.4的主要更新
    apache邮件列表中提到0.90.4己经准备release了,看了一下所有的patch,这个版本在性能改进上基本没有改进,主要是对很多异常下bug的修复,其中比较重要的bug有以下几个:1https://issues.apache.org/jira/browse/HBASE-3820----Splitlog()executedwhilethenamenodewasinsafemodem......
  • Hbase MapReduce例子
    HbaseMapreduce例子http://hadoop.apache.org/hbase/docs/current/api/org/apache/hadoop/hbase/mapreduce/package-summary.html#package_descriptionhttp://wiki.apache.org/hadoop/Hbase/MapReduce (Deprecated) 需要重启Hadoop的方式所有机器都有修改配置1:修改$HADOOP_HOME/......
  • HBase 之HFileOutputFormat
     hadoopmr输出需要导入hbase的话最好先输出成HFile格式,再导入到HBase,因为HFile是HBase的内部存储格式,所以导入效率很高,下面是一个示例1.创建HBase表t11.hbase(main):157:0*create't1','f1'2.0row(s)in1.3280seconds3.4.hbase(main):158:0>5.ROW......
  • hadoop,hbase,hive安装全记录
    操作系统:CentOS5.5Hadoop:hadoop-0.20.203.0jdk1.7.0_01namenode主机名:master,namenode的IP:10.10.102.15datanode主机名:slave1,datanode的IP:10.10.106.8datanode主机名:slave2,datanode的IP:10.10.106.9一、hadoop安装1、建立用户useraddhadooppasswdhadoop2.安装JDK*先查......
  • 把xls的数据导到Hbase
    这属于Hbase的一个例子,不过Hbase的例子有点问题,需要更改下。其实我感觉Hbase属于一个BigTable,感觉和xls真的很像,闲话不说了,上code才是王道。Java代码importjava.io.IOException;importorg.apache.hadoop.conf.Configuration;importorg.apache.hadoop.fs.Path;i......
  • Hbase--执行hbase shell命令时提示:ERROR: KeeperErrorCode = NoNode for /hbase/mast
    1、问题描述执行hbase shell命令时提示:ERROR:KeeperErrorCode=NoNodefor/hbase/master2、问题原因这是与因为服务器重启后Hadoop的运行和Hbase的运行异常。3、解决办法依次去停止和启动Hadoop(1)到hadoop的sbin目录下 ./stop-all.sh(2)再./start-all.sh(3)再到hbase的b......
  • Hbase Shell的常用命令
    总结的一些Hbaseshell的命令都很简单,可以help来查看帮助create'user_test','info'describe'user_test'disable'user_testinfo'drop'user_testinfo'put'user_test','test-1','info:username','test1......
  • HFile详解-基于HBase0.90.5
    1.HFile详解HFile文件分为以下六大部分 序号名称描述1数据块由多个block(块)组成,每个块的格式为:[块头]+[key长]+[value长]+[key]+[value]。2元数据块元数据是key-value类型的值,但元数据快只保存元数据的value值,元数据的key值保存在第五项(元数据索引块)中。该块由多个元数......
  • HBase HFile与Prefix Compression内部实现全解--KeyValue格式
    1.引子 HFile(HBaseFile)是HBase使用的一种文件存储格式的抽象, 目前存在两种版本的HFile:HFileV1和HFileV2 HBase0.92之前的版本仅支持HFileV1,HBase0.92/0.94同时支持HFileV1和HFileV2。 以下分别是HFileV1/V2的结构图: HFileV1HFileV2(注:这两个图片在hbase......