package cn.itcast.bigdata.hbase;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.ArrayList;
import java.util.Iterator;
import org.apache.commons.collections.comparators.NullComparator;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.BinaryComparator;
import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
import org.apache.hadoop.hbase.filter.ByteArrayComparable;
import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FilterBase;
import org.apache.hadoop.hbase.filter.PrefixFilter;
import org.apache.hadoop.hbase.filter.RegexStringComparator;
import org.apache.hadoop.hbase.filter.RowFilter;
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
import org.apache.hadoop.hbase.filter.SubstringComparator;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.BinaryComparable;
import org.junit.Before;
import org.junit.Test;
/**
* Hbase客户端操作示例
*
* @author songjq
*
*/
/**
* Hbase客户端操作示例
*
* @author songjq
*
*/
public class HbaseAPIDemo {
// 数据库连接对象
private HBaseAdmin hBaseAdmin = null;
// 表连接对象
private HTable hTable = null;
/**
* 获取Hbase对象连接
*
* @throws MasterNotRunningException
* @throws ZooKeeperConnectionException
* @throws IOException
*/
@Before
public void getHbaseConn() throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
/**
* 通过这种通用的配置对象构造的方法来创建一个配置对象 这种方法会自动加载classpath下的
* core-site.xml,hdfs-site.xml,core-default.xml...等这些hadoop的配置文件
*/
// Configuration hdfsconf = new Configuration();
/**
* HBaseConfiguration.create()则会自动加载classpath下的hadoop下的配置文件 及hbase-site.xml
*/
Configuration conf = HBaseConfiguration.create();
/**
* 这里通过zk集群地址连接hbase集群,只需要把hbase-site.xml中zk连接地址拷贝过来即可
* /usr/local/apps/hbase-0.96.2-hadoop2/conf/hbase-site.xml
*/
conf.set("hbase.zookeeper.quorum", "hadoop-server01:2181,hadoop-server02:2181,hadoop-server03:2181");
// 构造一个DDL操作的客户端对象hBaseAdmin
hBaseAdmin = new HBaseAdmin(conf);
// 实例化表连接对象
hTable = new HTable(conf, "oadb:t_user_info");
}
/**
* 数据更新
*
* @throws RetriesExhaustedWithDetailsException
* @throws InterruptedIOException
*/
@Test
public void updateTest() throws RetriesExhaustedWithDetailsException, InterruptedIOException {
/*
* 获取行键对象rowkey
*/
Put put = new Put("user0000010".getBytes());
/*
* 更新列族base_info中address字段,并将其值设置为Kunming
*/
put.add(Bytes.toBytes("base_info"), Bytes.toBytes("address"), Bytes.toBytes("Kunming"));
/*
* hbase的数据更新和插入是相同的原理,都是put操作,该方法可以执行批量更新hTable.put(puts);
*/
hTable.put(put);
}
}