其他分享
首页 > 其他分享> > HBase 通过api实现表的操作

HBase 通过api实现表的操作

作者:互联网

(1)环境准备

  需要用到hadoop、hbase、zookeeper

(2)配置pom.xml文件

<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
https://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>
    <groupId>hadoop</groupId>
    <artifactId>hdfstest</artifactId>
    <version>0.0.1-SNAPSHOT</version>
    <dependencies>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-common</artifactId>
            <version>3.2.1</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-hdfs</artifactId>
            <version>3.2.1</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-client</artifactId>
            <version>3.2.1</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-mapreduce-client-core</artifactId>
            <version>3.2.1</version>
        </dependency>
        <dependency>
            <groupId>junit</groupId>
            <artifactId>junit</artifactId>
            <version>4.12</version>
        </dependency>

        <dependency>
            <groupId>org.apache.zookeeper</groupId>
            <artifactId>zookeeper</artifactId>
            <version>3.5.6</version>
        </dependency>


        <dependency>
            <groupId>org.apache.hbase</groupId>
            <artifactId>hbase-shaded-client</artifactId>
            <version>2.2.3</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hbase</groupId>
            <artifactId>hbase-client</artifactId>
            <version>2.2.3</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hbase</groupId>
            <artifactId>hbase-server</artifactId>
            <version>2.2.3</version>
        </dependency>
        </dependencies>
</project>

(3)配置HbaseTest类,在里面设置hbase里面表的创建、插入和删除并运行程序

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.*;
import java.io.IOException;
public class HBaseTest {
    public static void main(String[] args) {
        HBaseTest hbase = new HBaseTest();
        hbase.connHbase();//建立连接

hbase.createTable("user","cf");//创建表
hbase.insert();//插入数据
        hbase.query();//查询数据

//        hbase.delete();//删除数据
    }
    Connection conn=null;
    public void connHbase(){
        Configuration config = HBaseConfiguration.create();
        config.set("hbase.zookeeper.quorum",
                "192.168.171.56,192.168.171.57,192.168.171.63");
        config.set("zookeeper.znode.parent","/hbase/master");
        config.set("hbase.zookeeper.property.clientPort","2181");
        try {
            conn = ConnectionFactory.createConnection(config);
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
    /**
     * 利用create 'tablename','列簇'
     27 / 30


     package hbase;



     * hbase里面所有数据都是以二进制数组形式存放,所以都需要getBytes
     * @throws IOException
     */
    public void createTable(String tableName,String columnFamily){
        Admin admin;
        try {
            admin = conn.getAdmin();
            TableName table = TableName.valueOf(tableName.getBytes());
            if(admin.tableExists(table)){
//删除表之前要disable
                admin.disableTable(table);
                admin.deleteTable(table);
            }
            HTableDescriptor tabledesc = new HTableDescriptor(table);
            HColumnDescriptor columndesc = new
                    HColumnDescriptor("cf".getBytes());
            columndesc.setBlockCacheEnabled(true);
            columndesc.setBlocksize(64000);
            tabledesc.addFamily(columndesc);
            admin.createTable(tabledesc);
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
    public void insert(){
        TableName tbname = TableName.valueOf("user".getBytes());
        try {
            Table tb = conn.getTable(tbname);
            Put put = new Put("123".getBytes());//rowkey
            put.addColumn("cf".getBytes(), "name".getBytes(),
                    "Dongming".getBytes());
            put.addColumn("cf".getBytes(), "age".getBytes(),
                    "10".getBytes());
            tb.put(put);
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
    public void query(){
        TableName tbname = TableName.valueOf("user".getBytes());
        try{
            Table tb = conn.getTable(tbname);
            Get get = new Get("123".getBytes());
            get.addColumn("cf".getBytes(),"name".getBytes());
            Result result = tb.get(get);
            Cell cell = result.getColumnLatestCell("cf".getBytes(),
                    "name".getBytes());
            System.out.println(new String(cell.getValueArray()));
        }catch(Exception e){
            e.printStackTrace();
        }
    }
    public void delete() {
        TableName tbname = TableName.valueOf("user".getBytes());
        try{
            Table tb = conn.getTable(tbname);
            Delete del = new Delete("123".getBytes());
            tb.delete(del);
            System.out.println("delete done!");
        }catch(Exception e){
            e.printStackTrace();
        }
    }
}

(4)hbase中会出现hbasetext类里面配置的内容

标签:TableName,hadoop,getBytes,api,org,apache,操作,HBase,hbase
来源: https://www.cnblogs.com/lixianhui/p/16309698.html