spark與hbase
package hgs.spark.hbase import org.apache.spark.SparkConf import org.apache.spark.SparkContext import org.apache.hadoop.conf.Configuration import org.apache.hadoop.hbase.HBaseConfiguration import org.apache.spark.rdd.NewHadoopRDD import org.apache.hadoop.hbase.mapreduce.TableInputFormat object HbaseTest { def main(args: Array[String]): Unit = { val conf = new SparkConf conf.setMaster("local").setAppName("local") val context = new SparkContext(conf) val hadoopconf = new HBaseConfiguration hadoopconf.set("hbase.zookeeper.quorum", "bigdata01:2181,bigdata02:2181,bigdata03:2181") hadoopconf.set("hbase.zookeeper.property.clientPort", "2181") val tableName = "test1" hadoopconf.set(TableInputFormat.INPUT_TABLE, tableName) hadoopconf.set(TableInputFormat.SCAN_ROW_START, "h") hadoopconf.set(TableInputFormat.SCAN_ROW_STOP, "x") hadoopconf.set(TableInputFormat.SCAN_COLUMN_FAMILY, "cf1") hadoopconf.set(TableInputFormat.SCAN_COLUMNS, "cf1:col1,cf1:col2") /*val startrow = "h" val stoprow = "w" val scan = new Scan scan.setStartRow(startrow.getBytes) scan.setStartRow(stoprow.getBytes) val proto = ProtobufUtil.toScan(scan) val scanToString = Base64.encodeBytes(proto.toByteArray()) println(scanToString) hadoopconf.set(TableInputFormat.SCAN, scanToString) */ val hbaseRdd = context.newAPIHadoopRDD(hadoopconf, classOf[TableInputFormat], classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable], classOf[org.apache.hadoop.hbase.client.Result]) hbaseRdd.foreach(x=>{ val vale = x._2.getValue("cf1".getBytes, "col1".getBytes) val val2 = x._2.getValue("cf1".getBytes, "col2".getBytes) println(new String(vale),new String(val2)) }) context.stop() } }
package hgs.spark.hbase import org.apache.spark.SparkConf import org.apache.spark.SparkContext import org.apache.hadoop.hbase.HBaseConfiguration import org.apache.hadoop.hbase.mapred.TableOutputFormat import org.apache.hadoop.mapred.JobConf import org.apache.hadoop.hbase.client.Put import org.apache.hadoop.hbase.io.ImmutableBytesWritable object SparkToHbase { def main(args: Array[String]): Unit = { val conf = new SparkConf conf.setMaster("local").setAppName("local") val context = new SparkContext(conf) val rdd = context.parallelize(List(("aaaaaaa","aaaaaaa"),("bbbbb","bbbbb")), 2) val hadoopconf = new HBaseConfiguration hadoopconf.set("hbase.zookeeper.quorum", "bigdata01:2181,bigdata02:2181,bigdata03:2181") hadoopconf.set("hbase.zookeeper.property.clientPort", "2181") hadoopconf.set(TableOutputFormat.OUTPUT_TABLE, "test1") //hadoopconf.set(TableOutputFormat., "test1") val jobconf = new JobConf(hadoopconf,this.getClass) jobconf.set(TableOutputFormat.OUTPUT_TABLE, "test1") jobconf.setOutputFormat(classOf[TableOutputFormat]) val exterrdd = rdd.map(x=>{ val put = new Put(x._1.getBytes) put.add("cf1".getBytes, "col1".getBytes, x._2.getBytes) (new ImmutableBytesWritable,put) }) exterrdd.saveAsHadoopDataset(jobconf) context.stop() } }
來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/31506529/viewspace-2220682/,如需轉載,請註明出處,否則將追究法律責任。
相關文章
- HBase實操:HBase-Spark-Read-Demo 分享Spark
- spark讀取hbase的資料Spark
- MapReduce和Spark讀取HBase快照表Spark
- Spark 如何寫入HBase/Redis/MySQL/KafkaSparkRedisMySqlKafka
- CDH版Hadoop-zookeeper-hbase-spark安裝文件HadoopSpark
- Spark+Hbase 億級流量分析實戰( PV/UV )Spark
- Spark拉取Kafka的流資料,轉插入HBase中SparkKafka
- hbase與phoenix整合(使用phoenix操作hbase資料)
- Spark+Hbase 億級流量分析實戰( 留存計算)Spark
- Spark讀Hbase優化 --手動劃分region提高並行數Spark優化並行
- Hbase的安裝與部署
- spark 與flume 1.6.0Spark
- Spark 讀取 Hbase 優化 --手動劃分 region 提高並行數Spark優化並行
- HBase學習的第四天--HBase的進階與APIAPI
- HBase架構與基礎命令架構
- spark與kafaka整合workcount示例 spark-stream-kafkaSparkKafka
- Spark+Hbase 億級流量分析實戰(日誌儲存設計)Spark
- Hadoop與Spark關係HadoopSpark
- spark 與 yarn 結合SparkYarn
- Spark安裝與配置Spark
- hbase之hbase shell
- spark學習筆記--Spark調優與除錯Spark筆記除錯
- Spark讀取Hbase報錯NoSuchMethodError: org.apache.hadoop.conf.Configuration.getPassword(Ljava/lang/String;SparkErrorApacheHadoopJava
- Hbase(二)Hbase常用操作
- Spark Streaming的PIDRateEstimator與backpressureSpark
- Spark GraphX簡介與教程Spark
- hbase - [04] java訪問hbaseJava
- HBase 教程:什麼是 HBase?
- spark大批量讀取Hbase時出現java.lang.OutOfMemoryError: unable to create new native threadSparkJavaErrorthread
- HBase
- Spark 安裝部署與快速上手Spark
- Spark Connector Reader 原理與實踐Spark
- HBase 系列(五)——HBase常用 Shell 命令
- HBase可用性分析與高可用實踐
- Hbase單機部署 java連線HbaseJava
- Hbase一:Hbase介紹及特點
- Spark Streaming(六):快取與持久化Spark快取持久化
- Spark SQL / Catalyst 內部原理 與 RBOSparkSQL