pyspark从hbase表读取记录时无法获取副本0错误的位置

时间:2019-01-25 09:20:35

标签: pyspark hbase

我正在尝试通过pyspark读取hbase。我正在获取此org.apache.hadoop.hbase.client.RetriesExhaustedException:无法获取副本0的位置。我正在使用shc-core连接spark和hbase。我正在使用以下hbase,spark和python版本.HBase-1.1.2.2.6.3.0-235, spark-2.6.3.0-235,Python-2.7.5

代码:

from pyspark.sql import Row
from pyspark.sql import SQLContext
from pyspark import SparkContext
sc = SparkContext()
sqlc = SQLContext(sc)
data = range(0,255)
rdd = sc.parallelize(data).map(lambda i : Row(name=i,age=i))
df=rdd.toDF(schema=['age'])
hasattr(df, "toDF")
import json
cat = json.dumps({"table":{"namespace":"default", "name":"dk", "tableCoder":"PrimitiveType"},"rowkey":"key","columns":{"name":{"cf":"rowkey", "col":"key", "type":"string"},"age":{"cf":"cf1", "col":"age", "type":"string"}}})
print(cat)
df = sqlc.read.option("catalog",cat).format("org.apache.spark.sql.execution.datasources.hbase") \
.option("hbase.security.authentication","kerberos")  \
.option("hbase.master.kerberos.principal","xx")  \
.option("hbase.master.keytab.file","xx")  \
.option("hbase.regionserver.kerberos.principal","xx")  \
.option("hbase.regionserver.keytab.file","xx")  \
.option("hbase.rpc.protection","xx") \
.option("hbase.zookeeper.quorum","xx") \
.option("zookeeper.znode.parent","/hbase-secure") \
.option("hbase.rootdir","xx")  \
.option("hbase.security.authentication.spnego.kerberos.keytab","xx")  \
.option("hbase.security.authentication.spnego.kerberos.principal","xx")  \
.option("hbase.zookeeper.property.clientPort", "2181") \
.option("hbase.security.authorization","true") \
.option("hbase.superuser","hbase") \
.option("hbase.zookeeper.useMulti","true") \
.option("hbase.zookeeper.useMulti","true") \
.option("user.name","xx") \
.load()
print("values:")
df.show()

火花提交:

 spark-submit --packages com.hortonworks:shc-core:1.1.1-1.6-s_2.10 --repositories https://repo.hortonworks.com/content/groups/public/ --jars /usr/hdp/current/hbase-client/lib/hbase-server.jar --files /etc/hbase/conf/hbase-site.xml /home/hbaseuser/sasi/check/pysprk/test2.py

错误:

 INFO client.ZooKeeperRegistry: ClusterId read in ZooKeeper is null
Traceback (most recent call last):
  File "/home/hbaseuser/sasi/check/pysprk/test2.py", line 44, in <module>
    df.show()
  File "/usr/hdp/current/spark-client/python/lib/pyspark.zip/pyspark/sql/dataframe.py", line 257, in show
  File "/usr/hdp/current/spark-client/python/lib/py4j-0.9-src.zip/py4j/java_gateway.py", line 813, in __call__
  File "/usr/hdp/current/spark-client/python/lib/pyspark.zip/pyspark/sql/utils.py", line 45, in deco
  File "/usr/hdp/current/spark-client/python/lib/py4j-0.9-src.zip/py4j/protocol.py", line 308, in get_return_value
py4j.protocol.Py4JJavaError: An error occurred while calling o82.showString.
: org.apache.hadoop.hbase.client.RetriesExhaustedException: Can't get the location for replica 0
        at org.apache.hadoop.hbase.client.RpcRetryingCallerWithReadReplicas.getRegionLocations(RpcRetryingCallerWithReadReplicas.java:354)
        at org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:159)
        at org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:61)
        at org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:211)
        at org.apache.hadoop.hbase.client.ClientScanner.call(ClientScanner.java:327)
        at org.apache.hadoop.hbase.client.ClientScanner.nextScanner(ClientScanner.java:302)
        at org.apache.hadoop.hbase.client.ClientScanner.initializeScannerInConstruction(ClientScanner.java:167)
        at org.apache.hadoop.hbase.client.ClientScanner.<init>(ClientScanner.java:162)
        at org.apache.hadoop.hbase.client.HTable.getScanner(HTable.java:799)
        at org.apache.hadoop.hbase.client.MetaScanner.metaScan(MetaScanner.java:193)
        at org.apache.hadoop.hbase.client.MetaScanner.metaScan(MetaScanner.java:89)
        at org.apache.hadoop.hbase.client.MetaScanner.listTableRegionLocations(MetaScanner.java:343)
        at org.apache.hadoop.hbase.client.HRegionLocator.listRegionLocations(HRegionLocator.java:146)
        at org.apache.hadoop.hbase.client.HRegionLocator.getStartEndKeys(HRegionLocator.java:122)
        at org.apache.spark.sql.execution.datasources.hbase.RegionResource$$anonfun$1.apply(HBaseResources.scala:109)
        at org.apache.spark.sql.execution.datasources.hbase.RegionResource$$anonfun$1.apply(HBaseResources.scala:108)
        at org.apache.spark.sql.execution.datasources.hbase.ReferencedResource$class.releaseOnException(HBaseResources.scala:77)
        at org.apache.spark.sql.execution.datasources.hbase.RegionResource.releaseOnException(HBaseResources.scala:88)
        at org.apache.spark.sql.execution.datasources.hbase.RegionResource.<init>(HBaseResources.scala:108)
        at org.apache.spark.sql.execution.datasources.hbase.HBaseTableScanRDD.getPartitions(HBaseTableScan.scala:61)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:242)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:240)
        at scala.Option.getOrElse(Option.scala:120)
        at org.apache.spark.rdd.RDD.partitions(RDD.scala:240)
        at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:242)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:240)
        at scala.Option.getOrElse(Option.scala:120)
        at org.apache.spark.rdd.RDD.partitions(RDD.scala:240)
        at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:242)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:240)
        at scala.Option.getOrElse(Option.scala:120)
        at org.apache.spark.rdd.RDD.partitions(RDD.scala:240)
        at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:190)
        at org.apache.spark.sql.execution.Limit.executeCollect(basicOperators.scala:165)
        at org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)
        at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1500)
        at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1500)
        at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)
        at org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2087)
        at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1499)
        at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1506)
        at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1376)
        at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1375)
        at org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2100)
        at org.apache.spark.sql.DataFrame.head(DataFrame.scala:1375)
        at org.apache.spark.sql.DataFrame.take(DataFrame.scala:1457)
        at org.apache.spark.sql.DataFrame.showString(DataFrame.scala:170)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:231)
        at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:381)
        at py4j.Gateway.invoke(Gateway.java:259)
        at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:133)
        at py4j.commands.CallCommand.execute(CallCommand.java:79)
        at py4j.GatewayConnection.run(GatewayConnection.java:209)
        at java.lang.Thread.run(Thread.java:745)

0 个答案:

没有答案