我有一个简单的scala / java程序来演示Cassandra java API。
我有一个简单的UDT类地址,在User类中使用。由于某种原因,userMapper.get(userId)失败,没有明确的错误消息。 代码是scala项目的一部分。
亚军代码(java):
void exp02() {
log.debug("JAVA -- exp02");
Cluster cluster = null;
try {
CodecRegistry codecRegistry = new CodecRegistry();
cluster = Cluster.builder() // (1)
.withCodecRegistry(codecRegistry)
.addContactPoint("127.0.0.1")
.build();
log.debug("connect...exp02");
Session session = cluster.connect(); // (2)
MappingManager manager = new MappingManager(session);
Mapper<User> userMapper = manager.mapper(User.class);
// For some reason this will break
{
log.debug("create user *********************** isClosed: " + cluster.isClosed());
log.debug("get users");
ResultSet results = session.execute("SELECT * FROM cTest.user;");
Result<User> user = userMapper.map(results);
for (User u : user) {
log.debug("User : " + u);
}
log.debug("Users printed");
UUID userId = UUID.fromString("567378a9-8533-4d1c-80a8-71bf4b77189e");
User u2 = userMapper.get(userId); // <<<--- This line throws exception, (JRunner.java:67)
log.debug("Select user = " + u2);
}
} catch (RuntimeException e) {
log.error("Exception: " + e);
e.printStackTrace();
} finally {
log.debug("close...exp02");
if (cluster != null) cluster.close(); // (5)
}
}
Main(scala):
package com.example.crunner
import org.slf4j.{Logger, LoggerFactory}
object MainRunner {
val log: Logger = LoggerFactory.getLogger(getClass())
def main(args: Array[String]): Unit = {
val jrunner = new JRunner()
jrunner.exp02()
}
}
用户类(java):
package com.example.crunner;
import java.util.UUID;
import com.datastax.driver.mapping.annotations.Column;
import com.datastax.driver.mapping.annotations.PartitionKey;
import com.datastax.driver.mapping.annotations.Table;
@Table(keyspace = "cTest", name = "user",
readConsistency = "QUORUM",
writeConsistency = "QUORUM"
// caseSensitiveKeyspace = false,
// caseSensitiveTable = false
)
public class User {
@PartitionKey
@Column(name = "user_id")
private UUID userId;
private String name;
private Address address;
public User(UUID userId, String name, Address address) {
this.userId = userId;
this.name = name;
this.address = address;
}
public User() { address = new Address(); }
public UUID getUserId() {
return userId;
}
public void setUserId(UUID userId) {
this.userId = userId;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Address getAddress() {
return address;
}
public void setAddress(Address address) {
this.address = address;
}
@Override
public String toString() {
return "User{" +
"userId=" + userId +
", name='" + name + '\'' +
", address=" + address +
'}';
}
}
UDT地址类(java)
package com.example.crunner;
import com.datastax.driver.mapping.annotations.Field;
import com.datastax.driver.mapping.annotations.UDT;
@UDT(keyspace = "cTest", name = "addressT") //, caseSensitiveType = true)
public class Address {
private String street;
private int zipCode;
public Address(String street, int zipCode) {
this.street = street;
this.zipCode = zipCode;
}
public Address() {
}
public String getStreet() {
return street;
}
public void setStreet(String street) {
this.street = street;
}
public int getZipCode() {
return zipCode;
}
public void setZipCode(int zipCode) {
this.zipCode = zipCode;
}
@Override
public String toString() {
return "Address{" +
"street='" + street + '\'' +
", zipCode=" + zipCode +
'}';
}
}
CQL(此处未包含的其他表格):
CREATE TYPE ctest.addresst (
street text,
zipcode int
);
CREATE TABLE ctest.user (
user_id uuid PRIMARY KEY,
address addresst,
name text
) WITH bloom_filter_fp_chance = 0.01
AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}
AND comment = ''
AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'}
AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'}
AND crc_check_chance = 1.0
AND dclocal_read_repair_chance = 0.1
AND default_time_to_live = 0
AND gc_grace_seconds = 864000
AND max_index_interval = 2048
AND memtable_flush_period_in_ms = 0
AND min_index_interval = 128
AND read_repair_chance = 0.0
AND speculative_retry = '99PERCENTILE';
build.sbt
name := "CassJExp2"
version := "0.1-SNAPSHOT"
scalaVersion := "2.11.9"
resolvers += "Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/"
val cassandraVersion = "3.2.0"
val logbackVersion = "1.2.3"
libraryDependencies ++= Seq(
"ch.qos.logback" % "logback-classic" % logbackVersion withSources() withJavadoc(), //
"ch.qos.logback" % "logback-core" % logbackVersion withSources() withJavadoc(), //
"ch.qos.logback" % "logback-access" % logbackVersion withSources() withJavadoc(), //
"org.slf4j" % "slf4j-api" % "1.7.25" withSources() withJavadoc(), //
"joda-time" % "joda-time" % "2.9.9" withSources() withJavadoc(), //
"com.datastax.cassandra" % "cassandra-driver-core" % cassandraVersion withSources() withJavadoc(), //
"com.datastax.cassandra" % "cassandra-driver-mapping" % cassandraVersion withSources() withJavadoc(), //
"com.datastax.cassandra" % "cassandra-driver-extras" % cassandraVersion withSources() withJavadoc() //
)
scalacOptions += "-deprecation"
当我在sbt控制台上运行此代码时,我得到以下输出:
18:08:41.447 [run-main-f] DEBUG com.example.crunner.JRunner - JAVA -- exp02
18:08:41.497 [run-main-f] INFO c.d.driver.core.GuavaCompatibility - Detected Guava >= 19 in the classpath, using modern compatibility layer
18:08:41.634 [run-main-f] INFO c.datastax.driver.core.ClockFactory - Using native clock to generate timestamps.
18:08:41.644 [run-main-f] DEBUG com.example.crunner.JRunner - connect...exp02
18:08:41.674 [run-main-f] INFO com.datastax.driver.core.NettyUtil - Did not find Netty's native epoll transport in the classpath, defaulting to NIO.
18:08:42.049 [run-main-f] INFO c.d.d.c.p.DCAwareRoundRobinPolicy - Using data-center name 'datacenter1' for DCAwareRoundRobinPolicy (if this is incorrect, please provide the correct datacenter name with DCAwareRoundRobinPolicy constructor)
18:08:42.051 [run-main-f] INFO com.datastax.driver.core.Cluster - New Cassandra host /127.0.0.1:9042 added
18:08:42.107 [run-main-f] DEBUG com.example.crunner.JRunner - create user *********************** isClosed: false
18:08:42.108 [run-main-f] DEBUG com.example.crunner.JRunner - get users
18:08:42.139 [run-main-f] DEBUG com.example.crunner.JRunner - User : User{userId=54cbad6e-3f27-4b7e-bce0-8a4a4fbffbdf, name='John Doe', address=Address{street='street', zipCode=512}}
18:08:42.139 [run-main-f] DEBUG com.example.crunner.JRunner - User : User{userId=6122b896-8b28-448d-ac5c-4bc9b5c7c7ab, name='John Doe', address=Address{street='street', zipCode=512}}
... output truncated here, table contains about 150 rows ...
18:08:42.175 [run-main-f] DEBUG com.example.crunner.JRunner - User : User{userId=44f69277-ff97-4ba2-9216-bdf65eccd7c3, name='John Doe', address=Address{street='street', zipCode=512}}
18:08:42.175 [run-main-f] DEBUG com.example.crunner.JRunner - Users printed
18:08:42.203 [run-main-f] ERROR com.example.crunner.JRunner - Exception: com.datastax.driver.core.exceptions.ReadFailureException: Cassandra failure during read query at consistency QUORUM (1 responses were required but only 0 replica responded, 1 failed)
com.datastax.driver.core.exceptions.ReadFailureException: Cassandra failure during read query at consistency QUORUM (1 responses were required but only 0 replica responded, 1 failed)
at com.datastax.driver.core.exceptions.ReadFailureException.copy(ReadFailureException.java:130)
at com.datastax.driver.core.exceptions.ReadFailureException.copy(ReadFailureException.java:30)
at com.datastax.driver.mapping.DriverThrowables.propagateCause(DriverThrowables.java:41)
at com.datastax.driver.mapping.Mapper.get(Mapper.java:435)
at com.example.crunner.JRunner.exp02(JRunner.java:67)
at com.example.crunner.MainRunner$.main(MainRunner.scala:18)
at com.example.crunner.MainRunner.main(MainRunner.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at sbt.Run.invokeMain(Run.scala:67)
at sbt.Run.run0(Run.scala:61)
at sbt.Run.sbt$Run$$execute$1(Run.scala:51)
at sbt.Run$$anonfun$run$1.apply$mcV$sp(Run.scala:55)
at sbt.Run$$anonfun$run$1.apply(Run.scala:55)
at sbt.Run$$anonfun$run$1.apply(Run.scala:55)
at sbt.Logger$$anon$4.apply(Logger.scala:84)
at sbt.TrapExit$App.run(TrapExit.scala:248)
at java.lang.Thread.run(Thread.java:745)
Caused by: com.datastax.driver.core.exceptions.ReadFailureException: Cassandra failure during read query at consistency QUORUM (1 responses were required but only 0 replica responded, 1 failed)
at com.datastax.driver.core.exceptions.ReadFailureException.copy(ReadFailureException.java:142)
at com.datastax.driver.core.Responses$Error.asException(Responses.java:140)
at com.datastax.driver.core.DefaultResultSetFuture.onSet(DefaultResultSetFuture.java:179)
at com.datastax.driver.core.RequestHandler.setFinalResult(RequestHandler.java:179)
at com.datastax.driver.core.RequestHandler.access$2400(RequestHandler.java:49)
at com.datastax.driver.core.RequestHandler$SpeculativeExecution.setFinalResult(RequestHandler.java:799)
at com.datastax.driver.core.RequestHandler$SpeculativeExecution.onSet(RequestHandler.java:633)
at com.datastax.driver.core.Connection$Dispatcher.channelRead0(Connection.java:1075)
at com.datastax.driver.core.Connection$Dispatcher.channelRead0(Connection.java:998)
at io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:357)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:343)
at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:336)
at io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:287)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:357)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:343)
at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:336)
at io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:357)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:343)
at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:336)
at io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:293)
at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:267)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:357)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:343)
at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:336)
at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1294)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:357)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:343)
at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:911)
at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:131)
at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:643)
at io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:566)
at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:480)
at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:442)
at io.netty.util.concurrent.SingleThreadEventExecutor$2.run(SingleThreadEventExecutor.java:131)
at io.netty.util.concurrent.DefaultThreadFactory$DefaultRunnableDecorator.run(DefaultThreadFactory.java:144)
... 1 more
Caused by: com.datastax.driver.core.exceptions.ReadFailureException: Cassandra failure during read query at consistency QUORUM (1 responses were required but only 0 replica responded, 1 failed)
at com.datastax.driver.core.Responses$Error$1.decode(Responses.java:88)
at com.datastax.driver.core.Responses$Error$1.decode(Responses.java:38)
at com.datastax.driver.core.Message$ProtocolDecoder.decode(Message.java:289)
at com.datastax.driver.core.Message$ProtocolDecoder.decode(Message.java:269)
at io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:88)
... 20 more
18:08:42.205 [run-main-f] DEBUG com.example.crunner.JRunner - close...exp02
[success] Total time: 4 s, completed Apr 18, 2017 6:08:45 PM
同时我在/var/log/cassandra/system.log中收到以下错误消息:
WARN [ReadStage-2] 2017-04-18 18:08:42,202 AbstractLocalAwareExecutorService.java:169 - Uncaught exception on thread Thread[ReadStage-2,10,main]: {}
java.lang.AssertionError: null
at org.apache.cassandra.db.rows.BTreeRow.getCell(BTreeRow.java:212) ~[apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.db.SinglePartitionReadCommand.canRemoveRow(SinglePartitionReadCommand.java:895) ~[apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.db.SinglePartitionReadCommand.reduceFilter(SinglePartitionReadCommand.java:859) ~[apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.db.SinglePartitionReadCommand.queryMemtableAndSSTablesInTimestampOrder(SinglePartitionReadCommand.java:744) ~[apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.db.SinglePartitionReadCommand.queryMemtableAndDiskInternal(SinglePartitionReadCommand.java:515) ~[apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.db.SinglePartitionReadCommand.queryMemtableAndDisk(SinglePartitionReadCommand.java:492) ~[apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.db.SinglePartitionReadCommand.queryStorage(SinglePartitionReadCommand.java:358) ~[apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.db.ReadCommand.executeLocally(ReadCommand.java:397) ~[apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.service.StorageProxy$LocalReadRunnable.runMayThrow(StorageProxy.java:1801) ~[apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.service.StorageProxy$DroppableRunnable.run(StorageProxy.java:2486) ~[apache-cassandra-3.9.jar:3.9]
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) ~[na:1.8.0_121]
at org.apache.cassandra.concurrent.AbstractLocalAwareExecutorService$FutureTask.run(AbstractLocalAwareExecutorService.java:164) ~[apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.concurrent.AbstractLocalAwareExecutorService$LocalSessionFutureTask.run(AbstractLocalAwareExecutorService.java:136) [apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.concurrent.SEPWorker.run(SEPWorker.java:109) [apache-cassandra-3.9.jar:3.9]
at java.lang.Thread.run(Thread.java:745) [na:1.8.0_121]
Cassandra版本是[cqlsh 5.0.1 |卡桑德拉3.9 | CQL规范3.4.2 |原生协议v4]
因此userMapper可以映射用户的ResultSet,但获取单个用户将失败。我尝试获取的userId存在于用户表中。也可以使用userMapper将新用户保存到db中而不会失败。
我不知道这是否与在User类中拥有UDT地址有关。没有UDT类的表/映射器工作正常。
编辑:
正如MarkoŠvaljek建议我在命令行尝试查询:
cqlsh> SELECT * FROM cTest.user where user_id=567378a9-8533-4d1c-80a8-71bf4b77189e;
ReadFailure: Error from server: code=1300 [Replica(s) failed to execute read] message="Operation failed - received 0 responses and 1 failures" info={'failures': 1, 'received_responses': 0, 'required_responses': 1, 'consistency': 'ONE'}
与java客户端看起来相同的错误。
SELECT * FROM cTest.user工作正常。
编辑2:
这是单实例环境。
nodetool status
Datacenter: datacenter1
=======================
Status=Up/Down
|/ State=Normal/Leaving/Joining/Moving
-- Address Load Tokens Owns Host ID Rack
UN 127.0.0.1 354.4 KiB 256 ? 33490146-da36-4359-bb24-42854bdb3c26 rack1
Note: Non-system keyspaces don't have the same replication settings, effective ownership information is meaningless
此错误的原因是什么以及如何解决?感谢您的支持。