我在使用mod_jk在不同机器上运行的两个tomcats之间设置会话复制,当一个tomcat关闭时,我的会话过期,当我刷新它到下一个tomcat时。但理想情况下,它不应该过期。任何帮助将受到高度赞赏。这是我的配置文件。
的Server.xml
<Engine name="Catalina" defaultHost="localhost" jvmRoute="worker1">
<!--For clustering, please take a look at documentation at:
/docs/cluster-howto.html (simple how to)
/docs/config/cluster.html (reference documentation) -->
<!--
<Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/>
-->
<!-- Clustering configuration start -->
<Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster" channelSendOptions="8">
<!--<Manager className="org.apache.catalina.ha.session.DeltaManager"
expireSessionsOnShutdown="false"
notifyListenersOnReplication="true"/>-->
<Channel className="org.apache.catalina.tribes.group.GroupChannel">
<Membership className="org.apache.catalina.tribes.membership.McastService"
address="228.0.0.4"
port="45564" frequency="500"
dropTime="3000"/>
<Sender className="org.apache.catalina.tribes.transport.ReplicationTransmitter">
<Transport className="org.apache.catalina.tribes.transport.nio.PooledParallelSender"/>
</Sender>
<Receiver className="org.apache.catalina.tribes.transport.nio.NioReceiver"
address="auto" port="4000" autoBind="100"
selectorTimeout="5000" maxThreads="6"/>
<Interceptor className="org.apache.catalina.tribes.group.interceptors.TcpFailureDetector"/>
<Interceptor className="org.apache.catalina.tribes.group.interceptors.MessageDispatch15Interceptor"/>
<Interceptor className="org.apache.catalina.tribes.group.interceptors.ThroughputInterceptor"/>
</Channel>
<Valve className="org.apache.catalina.ha.tcp.ReplicationValve" />
<ClusterListener className="org.apache.catalina.ha.session.ClusterSessionListener" />
workers.properties
# First we define virtual worker's list
worker.list=jkstatus,LoadBalancer
# Enable virtual workers earlier
worker.jkstatus.type=status
worker.LoadBalancer.type=lb
# Add Tomcat instances as workers, three workers in our case
worker.worker1.type=ajp13
worker.worker1.host=localhost
worker.worker1.port=8009
worker.worker2.type=ajp13
worker.worker2.host=10.57.79.232
worker.worker2.port=8019
# Provide workers list to the load balancer
worker.LoadBalancer.balance_workers=worker1,worker2