问题描述
spark版本:1.2.1hbase版本:0.98importorg.apache.hadoop.hbase.HBaseConfigurationimportorg.apache.hadoop.hbase.mapreduce.TableInputFormatimportorg.apache.spark.SparkConfimportorg.apache.spark.SparkContextobjectHBaseTest{defmain(args:Array[String]){valsparkConf=newSparkConf().setAppName("HBaseTest")valsc=newSparkContext(sparkConf)valconf=HBaseConfiguration.create()//hbase-site.xml已加入到工程,并且打包到了jar包中,同样的配置,本地写了个程序能成功读取HBase,用spark就不好使了conf.set("hbase.zookeeper.quorum","node1,node2,node3")conf.set("hbase.zookeeper.property.clientPort","2181")conf.set("hbase.master","node1:60000")conf.set(TableInputFormat.INPUT_TABLE,"student")//hbase中已创建student表valhBaseRDD=sc.newAPIHadoopRDD(conf,classOf[TableInputFormat],classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],classOf[org.apache.hadoop.hbase.client.Result])valcount=hBaseRDD.count()println("UsersRDDCoun:"+count)sc.stop()}}提交方式如下:./bin/spark-submit--classHBaseTest--masterlocal[2]--num-executors3--driver-memory512m--executor-memory512m--executor-cores1/usr/local/share/testhbase.jar出现以下异常,貌似没连上Hbase15/07/2020:59:45INFORecoverableZooKeeper:Processidentifier=hconnection-0x633e79e7connectingtoZooKeeperensemble=node2:2181,node1:2181,node3:218115/07/2020:59:45INFOClientCnxn:Openingsocketconnectiontoservernode2.fd.h3c.com/192.38.18.102:2181.WillnotattempttoauthenticateusingSASL(unknownerror)15/07/2020:59:45INFOClientCnxn:Socketconnectionestablishedtonode2.fd.h3c.com/192.38.18.102:2181,initiatingsession15/07/2020:59:45INFOClientCnxn:Sessionestablishmentcompleteonservernode2.fd.h3c.com/192.38.18.102:2181,sessionid=0x24e9fc3f322001a,negotiatedtimeout=4000015/07/2020:59:45INFOZooKeeperRegistry:ClusterIdreadinZooKeeperisnull15/07/2020:59:45INFORegionSizeCalculator:Calculatingregionsizesfortable"student".Error:applicationfailedwithexceptionjava.lang.RuntimeException:java.lang.NullPointerExceptionatorg.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:209)atorg.apache.hadoop.hbase.client.ClientScanner.call(ClientScanner.java:288)atorg.apache.hadoop.hbase.client.ClientScanner.nextScanner(ClientScanner.java:268)atorg.apache.hadoop.hbase.client.ClientScanner.initializeScannerInConstruction(ClientScanner.java:140)atorg.apache.hadoop.hbase.client.ClientScanner.<init>(ClientScanner.java:135)atorg.apache.hadoop.hbase.client.HTable.getScanner(HTable.java:802)atorg.apache.hadoop.hbase.client.MetaScanner.metaScan(MetaScanner.java:200)atorg.apache.hadoop.hbase.client.MetaScanner.metaScan(MetaScanner.java:85)atorg.apache.hadoop.hbase.client.MetaScanner.allTableRegions(MetaScanner.java:310)atorg.apache.hadoop.hbase.client.HTable.getRegionLocations(HTable.java:666)atorg.apache.hadoop.hbase.util.RegionSizeCalculator.<init>(RegionSizeCalculator.java:79)atorg.apache.hadoop.hbase.util.RegionSizeCalculator.<init>(RegionSizeCalculator.java:64)atorg.apache.hadoop.hbase.mapreduce.TableInputFormatBase.getSplits(TableInputFormatBase.java:160)atorg.apache.spark.rdd.NewHadoopRDD.getPartitions(NewHadoopRDD.scala:98)atorg.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:220)atorg.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:218)atscala.Option.getOrElse(Option.scala:120)atorg.apache.spark.rdd.RDD.partitions(RDD.scala:218)atorg.apache.spark.SparkContext.runJob(SparkContext.scala:1335)atorg.apache.spark.rdd.RDD.count(RDD.scala:925)atHBaseTest$.main(HBaseTest.scala:27)atHBaseTest.main(HBaseTest.scala)atsun.reflect.NativeMethodAccessorImpl.invoke0(NativeMethod)atsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)atsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)atjava.lang.reflect.Method.invoke(Method.java:606)atorg.apache.spark.deploy.SparkSubmit$.launch(SparkSubmit.scala:367)atorg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:77)atorg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)Causedby:java.lang.NullPointerExceptionatorg.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.getMetaReplicaNodes(ZooKeeperWatcher.java:269)atorg.apache.hadoop.hbase.zookeeper.MetaRegionTracker.blockUntilAvailable(MetaRegionTracker.java:241)atorg.apache.hadoop.hbase.client.ZooKeeperRegistry.getMetaRegionLocation(ZooKeeperRegistry.java:62)atorg.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateMeta(ConnectionManager.java:1213)atorg.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateRegion(ConnectionManager.java:1174)atorg.apache.hadoop.hbase.client.RpcRetryingCallerWithReadReplicas.getRegionLocations(RpcRetryingCallerWithReadReplicas.java:294)atorg.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:130)atorg.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:55)atorg.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:201)...28more
解决方案
解决方案二:
楼主问题解决了吗,我也遇到类似的问题,找不到原因。
解决方案三:
楼主问题解决了吗,我也遇到类似的问题,找不到原因。