我使用centos 6.5作为操作系统,我的postgres版本是postgreSql 9.3,我使用jar c3p0-0.9.1.1.jar和postgresql-9.4.1208.jre6.jar。
我的应用程序和数据库位于同一物理服务器中,功能非常强大且具有以下性能:24 cpu,256 GB内存
这是与数据库连接的配置:applicationContext-db.xml
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE beans PUBLIC "-//SPRING//DTD BEAN//EN"
"http://www.springframework.org/dtd/spring-beans.dtd">
<beans>
<bean id="dataSource" class = "com.mchange.v2.c3p0.ComboPooledDataSource" destroy-method="close">
<property name="driverClass" value="org.postgresql.Driver"/>
<property name="jdbcUrl" value="jdbc:postgresql://localhost:5432/test"/>
<property name="user" value="postgres"/>
<property name="password" value="postgres"/>
<!-- pool sizing -->
<property name="initialPoolSize" value="20" />
<property name="minPoolSize" value="20" />
<property name="maxPoolSize" value="200" />
<property name="acquireIncrement" value="3" />
<property name="maxStatements" value="0" />
<!-- retries -->
<property name="acquireRetryAttempts" value="30" />
<property name="acquireRetryDelay" value="1000" /> <!-- 1s -->
<property name="breakAfterAcquireFailure" value="false" />
<!-- refreshing connections -->
<property name="maxIdleTime" value="180" /> <!-- 3min -->
<property name="maxConnectionAge" value="10" /> <!-- 1h -->
<!-- timeouts and testing -->
<property name="checkoutTimeout" value="30000" /> <!-- 30s -->
<property name="idleConnectionTestPeriod" value="60" /> <!-- 60 -->
<property name="testConnectionOnCheckout" value="true" />
<property name="preferredTestQuery" value="SELECT 1" />
<property name="testConnectionOnCheckin" value="true" />
</bean>
</beans>
这是 postgresql.conf :
的配置# HEADER: This file was autogenerated at 2015-04-15 19:27:27 +0300
# HEADER: by puppet. While it can still be managed manually, it
# HEADER: is definitely not recommended.
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
max_connections = 1500 # (change requires restart)
# Note: Increasing max_connections costs ~400 bytes of shared memory per
# connection slot, plus lock space (see max_locks_per_transaction).
#superuser_reserved_connections = 3 # (change requires restart)
#unix_socket_directories = '/tmp' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - Security and Authentication -
authentication_timeout = 5min # 1s-600s
#ssl = off # (change requires restart)
#ssl_ciphers = 'DEFAULT:!LOW:!EXP:!MD5:@STRENGTH' # allowed SSL ciphers
# (change requires restart)
#ssl_renegotiation_limit = 512MB # amount of data between renegotiations
#ssl_cert_file = 'server.crt' # (change requires restart)
#ssl_key_file = 'server.key' # (change requires restart)
#ssl_ca_file = '' # (change requires restart)
#ssl_crl_file = '' # (change requires restart)
#password_encryption = on
#db_user_namespace = off
# Kerberos and GSSAPI
#krb_server_keyfile = ''
#krb_srvname = 'postgres' # (Kerberos only)
#krb_caseins_users = off
# - TCP Keepalives -
# see "man 7 tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = 8GB # min 128kB
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Note: Increasing max_prepared_transactions costs ~600 bytes of shared memory
# per transaction slot, plus lock space (see max_locks_per_transaction).
# It is not advisable to set max_prepared_transactions nonzero unless you
# actively intend to use prepared transactions.
#work_mem = 1MB # min 64kB
#maintenance_work_mem = 16MB # min 1MB
#max_stack_depth = 2MB # min 100kB
# - Disk -
#temp_file_limit = -1 # limits per-session temp file space
# in kB, or -1 for no limit
# - Kernel Resource Usage -
#max_files_per_process = 1000 # min 25
# (change requires restart)
#shared_preload_libraries = '' # (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 10 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multipler on buffers scanned/round
# - Asynchronous Behavior -
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
#------------------------------------------------------------------------------
# WRITE AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
#wal_level = minimal # minimal, archive, or hot_standby
# (change requires restart)
#fsync = on # turns forced synchronization on or off
#synchronous_commit = on # synchronization level;
# off, local, remote_write, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux)
# fsync
# fsync_writethrough
# open_sync
#full_page_writes = on # recover from partial page writes
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
#checkpoint_segments = 3 # in logfile segments, min 1, 16MB each
#checkpoint_timeout = 5min # range 30s-1h
#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0
#checkpoint_warning = 30s # 0 disables
# - Archiving -
#archive_mode = off # allows archiving to be done
# (change requires restart)
#archive_command = '' # command to use to archive a logfile segment
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Server(s) -
# Set these on the master and on any standby that will send replication data.
#max_wal_senders = 0 # max number of walsender processes
# (change requires restart)
wal_keep_segments = 32
#wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
# - Master Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
# - Standby Servers -
# These settings are ignored on a master server.
#hot_standby = off # "on" allows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from master
# in milliseconds; 0 disables
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_bitmapscan = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
#random_page_cost = 4.0 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#effective_cache_size = 128MB
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#------------------------------------------------------------------------------
# ERROR REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
log_destination = stderr # Valid values are combinations of
# stderr, csvlog, syslog, and eventlog,
# depending on platform. csvlog
# requires logging_collector to be on.
# This is used when logging to stderr:
logging_collector = on # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)
# These are only used if logging_collector is on:
log_directory = pg_log # directory where log files are written,
# can be absolute or relative to PGDATA
log_filename = 'postgresql-%a.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
log_truncate_on_rotation = on # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
log_rotation_size = 0 # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
# This is only relevant when logging to eventlog (win32):
#event_source = 'PostgreSQL'
# - When to Log -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
#log_checkpoints = off
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_error_verbosity = default # terse, default, or verbose messages
#log_hostname = off
log_line_prefix = '----------------- %m --------------------------------------------------------\n %a %u@%d %p \n %r \n %e' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %p = process ID
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_statement = 'none' # none, ddl, mod, all
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'Asia/Aden'
#------------------------------------------------------------------------------
# RUNTIME STATISTICS
#------------------------------------------------------------------------------
# - Query/Index Statistics Collector -
#track_activities = on
#track_counts = on
#track_io_timing = off
#track_functions = none # none, pl, all
#track_activity_query_size = 1024 # (change requires restart)
#update_process_title = on
#stats_temp_directory = 'pg_stat_tmp'
# - Statistics Monitoring -
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#log_statement_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM PARAMETERS
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum Multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#search_path = '"$user",public' # schema names
#default_tablespace = '' # a tablespace name, '' uses the default
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_min_age = 50000000
#vacuum_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_freeze_table_age = 150000000
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
# - Locale and Formatting -
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'Asia/Aden'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 0 # min -15, max 3
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = C # locale for system error message
# strings
lc_monetary = C # locale for monetary formatting
lc_numeric = C # locale for number formatting
lc_time = C # locale for time formatting
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Other Defaults -
#dynamic_library_path = '$libdir'
#local_preload_libraries = ''
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
# Note: Each lock table slot uses ~270 bytes of shared memory, and there are
# max_locks_per_transaction * (max_connections + max_prepared_transactions)
# lock table slots.
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#------------------------------------------------------------------------------
# VERSION/PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#default_with_oids = off
#escape_string_warning = on
#lo_compat_privileges = off
#quote_all_identifiers = off
#sql_inheritance = on
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf.
#include_dir = 'conf.d' # include files ending in '.conf' from
# directory 'conf.d'
#include_if_exists = 'exists.conf' # include file only if it exists
#include = 'special.conf' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here
maintenance_work_mem = 1GB
temp_buffers = 32MB
work_mem = 64MB
checkpoint_segments = 32
max_wal_senders = 5
wal_keep_segments = 32
wal_level = hot_standby
data_directory = '/var/lib/pgsql/9.3/data'
listen_addresses = '*'
log_min_duration_statement = 2min
port = 5432
effective_cache_size = 16GB
当应用程序的使用变得重要且请求数量增加时
我有这个错误:
Caused by: java.sql.SQLException: An attempt by a client to checkout a Connection has timed out.
at com.mchange.v2.sql.SqlUtils.toSQLException(SqlUtils.java:106)
at com.mchange.v2.sql.SqlUtils.toSQLException(SqlUtils.java:65)
at com.mchange.v2.c3p0.impl.C3P0PooledConnectionPool.checkoutPooledConnection(C3P0PooledConnectionPool.java:527)
at com.mchange.v2.c3p0.impl.AbstractPoolBackedDataSource.getConnection(AbstractPoolBackedDataSource.java:128)
at org.springframework.orm.hibernate3.LocalDataSourceConnectionProvider.getConnection(LocalDataSourceConnectionProvider.java:81)
at org.hibernate.jdbc.ConnectionManager.openConnection(ConnectionManager.java:446)
... 202 more
Caused by: com.mchange.v2.resourcepool.TimeoutException: A client timed out while waiting to acquire a resource from com.mchange.v2.resourcepool.BasicResourcePool@50e84eee -- timeout at awaitAvailable()
at com.mchange.v2.resourcepool.BasicResourcePool.awaitAvailable(BasicResourcePool.java:1317)
at com.mchange.v2.resourcepool.BasicResourcePool.prelimCheckoutResource(BasicResourcePool.java:557)
at com.mchange.v2.resourcepool.BasicResourcePool.checkoutResource(BasicResourcePool.java:477)
at com.mchange.v2.c3p0.impl.C3P0PooledConnectionPool.checkoutPooledConnection(C3P0PooledConnectionPool.java:525)
... 205 more
注意:使用该应用程序的用户数量约为1500
我认为错误发生在postgres配置(postgresql.conf)中,特别是在这两个参数中:
shared_buffers
max_connections
我无法很好地计算max_connections的值(现在是1500,在我看来很大),这个值通常应该与applicationContext-db.xml文件中的参数兼容,并且应该与服务器兼容性能
同样在我的应用程序中我有这段代码,以便在我们丢失连接时重新连接
protected Session getSession(String configFile) {
if ((null != session) && session.isOpen()) {
return session;
} else if (null != sessionFactory) {
Session s = currentSession.get();
if ((null == s) || !s.isOpen()) {
s = sessionFactory.openSession();
currentSession.set(s);
} else {
try {
HibernateWork hibernateWork = new HibernateWork("SELECT 1 ");
s.doWork(hibernateWork);
} catch (Exception e) {
System.err.println("------ Connection is closed, reconnection start");
s = sessionFactory.openSession();
currentSession.set(s);
}
}
return s;
} else {
Session s = currentSession.get();
if ((null == s) || !s.isOpen()) {
s = getSessionFactory(configFile).openSession();
currentSession.set(s);
}
return s;
}
}
是否有人可以帮助我正确配置连接池设置
提前谢谢
已更新:
我使用的是最新版本的c3p0,即c3p0-0.9.5.2.jar,但仍存在相同的连接丢失问题:
2017-12-03 11:32:08,414 WARN [org.hibernate.util.JDBCExceptionReporter] (ajp-0.0.0.0-8010-68) SQL Error: 0, SQLState: null
2017-12-03 11:32:08,415 ERROR [org.hibernate.util.JDBCExceptionReporter] (ajp-0.0.0.0-8010-68) An attempt by a client to checkout a Connection has timed out.
..........
..........
2017-12-03 11:32:08,423 ERROR [STDERR] (ajp-0.0.0.0-8010-130) Caused by: com.mchange.v2.resourcepool.TimeoutException: A client timed out while waiting to acquire a resource from com.mchange.v2.resourcepool.BasicResourcePool@49d523bd -- timeout at awaitAvailable()
2017-12-03 11:32:08,423 ERROR [STDERR] (ajp-0.0.0.0-8010-130) at com.mchange.v2.resourcepool.BasicResourcePool.awaitAvailable(BasicResourcePool.java:1467)
我在搜索后发现有些人遇到了同样类型的问题,他们把这个配置放在applicationContext-db.xml中:
<property name="maxStatements" value="1100" />
<property name="checkoutTimeout" value="60000" />
我希望在投入生产之前了解您对此更改的看法。
答案 0 :(得分:0)
您必须开始关闭会话。这是典型的资源泄漏。
典型的CRUD操作应该如下所示
Session s=getSession();
try{
//do your work with session, save, load update whatever
// optionally catch some exceptions
}finally{
s.close(); // close session and return connection
}
s.close()
就是你在这里所缺少的。
close()
位于finally
块中,因此它将始终执行,并且正常执行时会发生异常。
记住这一点:
SessionFactory
是线程安全和重量级组件,应该创建一次,并在整个应用程序中共享
Session
非线程安全并且是轻量级组件,因此它不应该在整个应用程序中共享,而是尽快创建和关闭。
答案 1 :(得分:0)
对于我的crud方法我有 _BaseRootDAO 类:
import java.io.Serializable;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.hibernate.Criteria;
import org.hibernate.HibernateException;
import org.hibernate.Query;
import org.hibernate.Session;
import org.hibernate.Transaction;
import org.hibernate.SessionFactory;
import org.hibernate.cfg.Configuration;
import org.hibernate.criterion.Expression;
import org.hibernate.criterion.Order;
import com.test.hibernate.util.HibernateWork;
public abstract class _BaseRootDAO {
public _BaseRootDAO () {}
public _BaseRootDAO (Session session) {
setSession(session);
}
protected static Map<String, SessionFactory> sessionFactoryMap;
protected SessionFactory sessionFactory;
protected Session session;
protected final static ThreadLocal<Session> currentSession = new ThreadLocal<Session>();
public Session getSession() {
return getSession(
getConfigurationFileName());
}
protected Session getSession(String configFile) {
if ((null != session) && session.isOpen()) {
return session;
} else if (null != sessionFactory) {
Session s = currentSession.get();
if ((null == s) || !s.isOpen()) {
s = sessionFactory.openSession();
currentSession.set(s);
} else {
try {
HibernateWork hibernateWork = new HibernateWork("SELECT 1 ");
s.doWork(hibernateWork);
} catch (Exception e) {
System.err.println("------ Connection is closed, reconnection start");
s = sessionFactory.openSession();
currentSession.set(s);
}
}
return s;
} else {
Session s = currentSession.get();
if ((null == s) || !s.isOpen()) {
s = getSessionFactory(configFile).openSession();
currentSession.set(s);
}
return s;
}
}
public void setSession (Session session) {
this.session = session;
}
/**
* Configure the session factory by reading hibernate config file
*/
public static void initialize () {
com.dq.admincom.hibernate.model.dao._RootDAO.initialize(
(String) null);
}
public static void initialize (String configFileName) {
com.dq.admincom.hibernate.model.dao._RootDAO.initialize(
configFileName,
com.dq.admincom.hibernate.model.dao._RootDAO.getNewConfiguration(
null));
}
public static void initialize (String configFileName, Configuration configuration) {
if (null != sessionFactoryMap && null != sessionFactoryMap.get(configFileName)) return;
else {
if (null == configFileName) {
configuration.configure();
com.dq.admincom.hibernate.model.dao._RootDAO.setSessionFactory(
null,
configuration.buildSessionFactory());
}
else {
configuration.configure(
configFileName);
com.dq.admincom.hibernate.model.dao._RootDAO.setSessionFactory(
configFileName,
configuration.buildSessionFactory());
}
}
}
/**
* Set the session factory
*/
public void setSessionFactory (SessionFactory sessionFactory) {
this.sessionFactory = sessionFactory;
}
/**
* Set the session factory
*/
protected static void setSessionFactory (String configFileName, SessionFactory sf) {
if (null == configFileName) configFileName = "";
if (null == sessionFactoryMap) sessionFactoryMap = new HashMap<String, SessionFactory>();
sessionFactoryMap.put(
configFileName,
sf);
}
public SessionFactory getSessionFactory() {
if (null != sessionFactory) return sessionFactory;
else return getSessionFactory(
getConfigurationFileName());
}
public SessionFactory getSessionFactory(String configFileName) {
if (null == configFileName) configFileName = "";
if (null == sessionFactoryMap)
initialize(configFileName);
SessionFactory sf = (SessionFactory) sessionFactoryMap.get(configFileName);
if (null == sf)
throw new RuntimeException("The session factory for '" + configFileName + "' has not been initialized (or an error occured during initialization)");
else
return sf;
}
/**
* Close all sessions for the current thread
*/
public static void closeCurrentSession () {
Session s = currentSession.get();
if (null != s) {
if (s.isOpen()) s.close();
currentSession.set(null);
}
}
/**
* Close the session
*/
public void closeSession (Session session) {
if (null != session) session.close();
}
/**
* Begin the transaction related to the session
*/
public Transaction beginTransaction(Session s) {
return s.beginTransaction();
}
/**
* Commit the given transaction
*/
public void commitTransaction(Transaction t) {
t.commit();
}
public static Configuration getNewConfiguration (String configFileName) {
return new Configuration();
}
/**
* Return the name of the configuration file to be used with this DAO or null if default
*/
public String getConfigurationFileName () {
return null;
}
/**
* Return the specific Object class that will be used for class-specific
* implementation of this DAO.
* @return the reference Class
*/
protected abstract Class getReferenceClass();
/**
* Used by the base DAO classes but here for your modification
* Get object matching the given key and return it.
*/
protected Object get(Class refClass, Serializable key) {
Session s = null;
try {
s = getSession();
return get(refClass, key, s);
} finally {
closeSession(s);
}
}
/**
* Used by the base DAO classes but here for your modification
* Get object matching the given key and return it.
*/
protected Object get(Class refClass, Serializable key, Session s) {
return s.get(refClass, key);
}
/**
* Used by the base DAO classes but here for your modification
* Load object matching the given key and return it.
*/
protected Object load(Class refClass, Serializable key) {
Session s = null;
try {
s = getSession();
return load(refClass, key, s);
} finally {
closeSession(s);
}
}
/**
* Used by the base DAO classes but here for your modification
* Load object matching the given key and return it.
*/
protected Object load(Class refClass, Serializable key, Session s) {
return s.load(refClass, key);
}
/**
* Return all objects related to the implementation of this DAO with no filter.
*/
public java.util.List findAll () {
Session s = null;
try {
s = getSession();
return findAll(s);
}
finally {
closeSession(s);
}
}
/**
* Return all objects related to the implementation of this DAO with no filter.
* Use the session given.
* @param s the Session
*/
public java.util.List findAll (Session s) {
return findAll(s, getDefaultOrder());
}
/**
* Return all objects related to the implementation of this DAO with no filter.
*/
public java.util.List findAll (Order defaultOrder) {
Session s = null;
try {
s = getSession();
return findAll(s, defaultOrder);
}
finally {
closeSession(s);
}
}
/**
* Return all objects related to the implementation of this DAO with no filter.
* Use the session given.
* @param s the Session
*/
public java.util.List findAll (Session s, Order defaultOrder) {
Criteria crit = s.createCriteria(getReferenceClass());
if (null != defaultOrder) crit.addOrder(defaultOrder);
return crit.list();
}
protected Criteria findFiltered (String propName, Object filter) {
return findFiltered(propName, filter, getDefaultOrder());
}
protected Criteria findFiltered (String propName, Object filter, Order order) {
Session s = null;
try {
s = getSession();
return findFiltered(s, propName, filter, order);
}
finally {
closeSession(s);
}
}
protected Criteria findFiltered (Session s, String propName, Object filter, Order order) {
Criteria crit = s.createCriteria(getReferenceClass());
crit.add(Expression.eq(propName, filter));
if (null != order) crit.addOrder(order);
return crit;
}
protected Query getNamedQuery(String name) {
Session s = null;
try {
s = getSession();
return getNamedQuery(name, s);
} finally {
closeSession(s);
}
}
protected Query getNamedQuery(String name, Session s) {
Query q = s.getNamedQuery(name);
return q;
}
protected Query getNamedQuery(String name, Serializable param) {
Session s = null;
try {
s = getSession();
return getNamedQuery(name, param, s);
} finally {
closeSession(s);
}
}
protected Query getNamedQuery(String name, Serializable param, Session s) {
Query q = s.getNamedQuery(name);
q.setParameter(0, param);
return q;
}
protected Query getNamedQuery(String name, Serializable[] params) {
Session s = null;
try {
s = getSession();
return getNamedQuery(name, params, s);
} finally {
closeSession(s);
}
}
protected Query getNamedQuery(String name, Serializable[] params, Session s) {
Query q = s.getNamedQuery(name);
if (null != params) {
for (int i = 0; i < params.length; i++) {
q.setParameter(i, params[i]);
}
}
return q;
}
protected Query getNamedQuery(String name, Map params) {
Session s = null;
try {
s = getSession();
return getNamedQuery(name, params, s);
} finally {
closeSession(s);
}
}
/**
* Obtain an instance of Query for a named query string defined in the mapping file.
* Use the parameters given and the Session given.
* @param name the name of a query defined externally
* @param params the parameter Map
* @s the Session
* @return Query
*/
protected Query getNamedQuery(String name, Map params, Session s) {
Query q = s.getNamedQuery(name);
if (null != params) {
for (Iterator i=params.entrySet().iterator(); i.hasNext(); ) {
Map.Entry entry = (Map.Entry) i.next();
q.setParameter((String) entry.getKey(), entry.getValue());
}
}
return q;
}
/**
* Execute a query.
* @param queryStr a query expressed in Hibernate's query language
* @return a distinct list of instances (or arrays of instances)
*/
public Query getQuery(String queryStr) {
Session s = null;
try {
s = getSession();
return getQuery(queryStr, s);
} finally {
closeSession(s);
}
}
/**
* Execute a query but use the session given instead of creating a new one.
* @param queryStr a query expressed in Hibernate's query language
* @s the Session to use
*/
public Query getQuery(String queryStr, Session s) {
return s.createQuery(queryStr);
}
/**
* Execute a query.
* @param query a query expressed in Hibernate's query language
* @param queryStr the name of a query defined externally
* @param param the first parameter to set
* @return Query
*/
protected Query getQuery(String queryStr, Serializable param) {
Session s = null;
try {
s = getSession();
return getQuery(queryStr, param, s);
} finally {
closeSession(s);
}
}
/**
* Execute a query but use the session given instead of creating a new one.
* @param queryStr a query expressed in Hibernate's query language
* @param param the first parameter to set
* @s the Session to use
* @return Query
*/
protected Query getQuery(String queryStr, Serializable param, Session s) {
Query q = getQuery(queryStr, s);
q.setParameter(0, param);
return q;
}
/**
* Execute a query.
* @param queryStr a query expressed in Hibernate's query language
* @param params the parameter array
* @return Query
*/
protected Query getQuery(String queryStr, Serializable[] params) {
Session s = null;
try {
s = getSession();
return getQuery(queryStr, params, s);
} finally {
closeSession(s);
}
}
protected Query getQuery(String queryStr, Serializable[] params, Session s) {
Query q = getQuery(queryStr, s);
if (null != params) {
for (int i = 0; i < params.length; i++) {
q.setParameter(i, params[i]);
}
}
return q;
}
protected Query getQuery(String queryStr, Map params) {
Session s = null;
try {
s = getSession();
return getQuery(queryStr, params, s);
} finally {
closeSession(s);
}
}
/**
* Obtain an instance of Query for a named query string defined in the mapping file.
* Use the parameters given and the Session given.
* @param queryStr a query expressed in Hibernate's query language
* @param params the parameter Map
* @s the Session
* @return Query
*/
protected Query getQuery(String queryStr, Map params, Session s) {
Query q = getQuery(queryStr, s);
if (null != params) {
for (Iterator i=params.entrySet().iterator(); i.hasNext(); ) {
Map.Entry entry = (Map.Entry) i.next();
q.setParameter((String) entry.getKey(), entry.getValue());
}
}
return q;
}
protected Order getDefaultOrder () {
return null;
}
/**
* Used by the base DAO classes but here for your modification
* Persist the given transient instance, first assigning a generated identifier.
* (Or using the current value of the identifier property if the assigned generator is used.)
*/
protected Serializable save(final Object obj) {
return (Serializable) run (
new TransactionRunnable () {
public Object run (Session s) {
return save(obj, s);
}
});
}
/**
* Used by the base DAO classes but here for your modification
* Persist the given transient instance, first assigning a generated identifier.
* (Or using the current value of the identifier property if the assigned generator is used.)
*/
protected Serializable save(Object obj, Session s) {
return s.save(obj);
}
/**
* Used by the base DAO classes but here for your modification
* Either save() or update() the given instance, depending upon the value of its
* identifier property.
*/
protected void saveOrUpdate(final Object obj) {
run (
new TransactionRunnable () {
public Object run (Session s) {
saveOrUpdate(obj, s);
return null;
}
});
}
/**
* Used by the base DAO classes but here for your modification
* Either save() or update() the given instance, depending upon the value of its
* identifier property.
*/
protected void saveOrUpdate(Object obj, Session s) {
s.saveOrUpdate(obj);
}
/**
* Used by the base DAO classes but here for your modification
* Update the persistent state associated with the given identifier. An exception is thrown if there is a persistent
* instance with the same identifier in the current session.
* @param obj a transient instance containing updated state
*/
protected void update(final Object obj) {
run (
new TransactionRunnable () {
public Object run (Session s) {
update(obj, s);
return null;
}
});
}
/**
* Used by the base DAO classes but here for your modification
* Update the persistent state associated with the given identifier. An exception is thrown if there is a persistent
* instance with the same identifier in the current session.
* @param obj a transient instance containing updated state
* @param s the Session
*/
protected void update(Object obj, Session s) {
s.update(obj);
}
/**
* Delete all objects returned by the query
*/
protected int delete (final Query query) {
Integer rtn = (Integer) run (
new TransactionRunnable () {
public Object run (Session s) {
return new Integer(delete((Query) query, s));
}
});
return rtn.intValue();
}
/**
* Delete all objects returned by the query
*/
protected int delete (Query query, Session s) {
List list = query.list();
for (Iterator i=list.iterator(); i.hasNext(); ) {
delete(i.next(), s);
}
return list.size();
}
/**
* Used by the base DAO classes but here for your modification
* Remove a persistent instance from the datastore. The argument may be an instance associated with the receiving
* Session or a transient instance with an identifier associated with existing persistent state.
*/
protected void delete(final Object obj) {
run (
new TransactionRunnable () {
public Object run (Session s) {
delete(obj, s);
return null;
}
});
}
/**
* Used by the base DAO classes but here for your modification
* Remove a persistent instance from the datastore. The argument may be an instance associated with the receiving
* Session or a transient instance with an identifier associated with existing persistent state.
*/
protected void delete(Object obj, Session s) {
s.delete(obj);
}
/**
* Used by the base DAO classes but here for your modification
* Re-read the state of the given instance from the underlying database. It is inadvisable to use this to implement
* long-running sessions that span many business tasks. This method is, however, useful in certain special circumstances.
*/
protected void refresh(Object obj, Session s) {
s.refresh(obj);
}
protected void throwException (Throwable t) {
if (t instanceof HibernateException) throw (HibernateException) t;
else if (t instanceof RuntimeException) throw (RuntimeException) t;
else throw new HibernateException(t);
}
/**
* Execute the given transaction runnable.
*/
protected Object run (TransactionRunnable transactionRunnable) {
Transaction t = null;
Session s = null;
try {
s = getSession();
t = beginTransaction(s);
Object obj = transactionRunnable.run(s);
commitTransaction(t);
return obj;
}
catch (Throwable throwable) {
if (null != t) {
try {
t.rollback();
}
catch (HibernateException e) {handleError(e);}
}
if (transactionRunnable instanceof TransactionFailHandler) {
try {
((TransactionFailHandler) transactionRunnable).onFail(s);
}
catch (Throwable e) {handleError(e);}
}
throwException(throwable);
return null;
}
finally {
closeSession(s);
}
}
/**
* Execute the given transaction runnable.
*/
protected TransactionPointer runAsnyc (TransactionRunnable transactionRunnable) {
final TransactionPointer transactionPointer = new TransactionPointer(transactionRunnable);
ThreadRunner threadRunner = new ThreadRunner(transactionPointer);
threadRunner.start();
return transactionPointer;
}
/**
* This class can be used to encapsulate logic used for a single transaction.
*/
public abstract class TransactionRunnable {
public abstract Object run (Session s) throws Exception;
}
/**
* This class can be used to handle any error that has occured during a transaction
*/
public interface TransactionFailHandler {
public void onFail (Session s);
}
/**
* This class can be used to handle failed transactions
*/
public abstract class TransactionRunnableFailHandler extends TransactionRunnable implements TransactionFailHandler {
}
public class TransactionPointer {
private TransactionRunnable transactionRunnable;
private Throwable thrownException;
private Object returnValue;
private boolean hasCompleted = false;
public TransactionPointer (TransactionRunnable transactionRunnable) {
this.transactionRunnable = transactionRunnable;
}
public boolean hasCompleted() {
return hasCompleted;
}
public void complete() {
this.hasCompleted = true;
}
public Object getReturnValue() {
return returnValue;
}
public void setReturnValue(Object returnValue) {
this.returnValue = returnValue;
}
public Throwable getThrownException() {
return thrownException;
}
public void setThrownException(Throwable thrownException) {
this.thrownException = thrownException;
}
public TransactionRunnable getTransactionRunnable() {
return transactionRunnable;
}
public void setTransactionRunnable(TransactionRunnable transactionRunnable) {
this.transactionRunnable = transactionRunnable;
}
/**
* Wait until the transaction completes and return the value returned from the run method of the TransactionRunnable.
* If the transaction throws an Exception, throw that Exception.
* @param timeout the timeout in milliseconds (or 0 for no timeout)
* @return the return value from the TransactionRunnable
* @throws TimeLimitExcededException if the timeout has been reached before transaction completion
* @throws Throwable the thrown Throwable
*/
public Object waitUntilFinish (long timeout) throws Throwable {
long killTime = -1;
if (timeout > 0) killTime = System.currentTimeMillis() + timeout;
do {
try {
Thread.sleep(50);
}
catch (InterruptedException e) {}
}
while (!hasCompleted && ((killTime > 0 && System.currentTimeMillis() < killTime) || killTime <= 0));
if (!hasCompleted) throw new javax.naming.TimeLimitExceededException();
if (null != thrownException) throw thrownException;
else return returnValue;
}
}
private class ThreadRunner extends Thread {
private TransactionPointer transactionPointer;
public ThreadRunner (TransactionPointer transactionPointer) {
this.transactionPointer = transactionPointer;
}
public void run () {
Transaction t = null;
Session s = null;
try {
s = getSession();
t = beginTransaction(s);
Object obj = transactionPointer.getTransactionRunnable().run(s);
t.commit();
transactionPointer.setReturnValue(obj);
}
catch (Throwable throwable) {
if (null != t) {
try {
t.rollback();
}
catch (HibernateException e) {handleError(e);}
}
if (transactionPointer.getTransactionRunnable() instanceof TransactionFailHandler) {
try {
((TransactionFailHandler) transactionPointer.getTransactionRunnable()).onFail(s);
}
catch (Throwable e) {handleError(e);}
}
transactionPointer.setThrownException(throwable);
}
finally {
transactionPointer.complete();
try {
closeSession(s);
}
catch (HibernateException e) {
transactionPointer.setThrownException(e);
}
}
}
}
protected void handleError (Throwable t) {
}
}
这是 HibernateWork 类:
package com.test.hibernate.util;
import org.apache.log4j.Logger;
import org.hibernate.jdbc.Work;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
/**
* HibernateWork
*
* Oct 10, 2012
*/
public class HibernateWork implements Work {
private static Logger log = Logger.getLogger(HibernateWork.class);
private String query;
private ResultSet resultSet;
/**
* Constructor
* @param query
*/
public HibernateWork(String query) {
super();
this.query = query;
}
/**
* execute
*/
public void execute(Connection connection) throws SQLException {
PreparedStatement ps = connection.prepareStatement(this.query);
this.resultSet = ps.executeQuery();
}
/**
* @return the query
*/
public String getQuery() {
return query;
}
/**
* @return the resultSet
*/
public ResultSet getResultSet() {
return resultSet;
}
}
关闭方法存在于_BaseRootDAO
中