休眠状态不佳

时间:2018-10-24 13:05:27

标签: java performance hibernate

我了解了很多有关设置休眠状态以节省大量新实体的信息,但是由于插入性能确实很差,我错过了一些东西...

这是我的配置

休眠配置:

@Configuration
@EnableTransactionManagement
@ComponentScans(value = { @ComponentScan("com.xxxxx)})
public class HibernateConfiguration
{


@Bean
public LocalSessionFactoryBean sessionFactory() {
    LocalSessionFactoryBean sessionFactory = new LocalSessionFactoryBean();
    sessionFactory.setDataSource(dataSource());
    sessionFactory.setPackagesToScan(new String[] { "com.xxxx" });
    sessionFactory.setHibernateProperties(hibernateProperties());
    return sessionFactory;
 }

@Bean
public DataSource dataSource() {
    BasicDataSource dataSource = new BasicDataSource();
    dataSource.setUrl(XXXX);
    dataSource.setUsername(XXXX);
    dataSource.setPassword(XXXX);
    dataSource.setTestOnBorrow(true);
    dataSource.setValidationQuery("SELECT 1");
    dataSource.setInitialSize(3);
    dataSource.setMaxActive(10);
    return dataSource;
}

private Properties hibernateProperties() {
    Properties properties = new Properties();
    properties.put(Environment.DIALECT, "org.hibernate.dialect.PostgreSQL95Dialect");
    properties.put(Environment.SHOW_SQL, false);
    properties.put(Environment.FORMAT_SQL, false);
    properties.put("hibernate.default-lazy", true);
    properties.put(Environment.USE_NEW_ID_GENERATOR_MAPPINGS, true);
    properties.put(Environment.HBM2DDL_AUTO, "none");
    properties.put(Environment.STATEMENT_BATCH_SIZE, 50);
    properties.put(Environment.STATEMENT_FETCH_SIZE, 400);
    properties.put(Environment.ORDER_INSERTS, true);
    properties.put(Environment.ORDER_UPDATES, true);
    properties.put(Environment.BATCH_VERSIONED_DATA, true);
    properties.put(Environment.GENERATE_STATISTICS, true);
    properties.put(Environment.HQL_BULK_ID_STRATEGY, InlineIdsSubSelectValueListBulkIdStrategy.class);
   return properties;        
}

@Bean
public HibernateTransactionManager transactionManager()
{
   HibernateTransactionManager txManager = new HibernateTransactionManager();
   txManager.setSessionFactory(sessionFactory().getObject());
   return txManager;
}

@Bean
public PersistenceExceptionTranslationPostProcessor exceptionTranslation()
{
      return new PersistenceExceptionTranslationPostProcessor();
}

}

为进行测试,我创建了一个没有任何关系的简单表

实体:

@Entity
@Table(name="performance_test")
@NamedQuery(name="PerformanceTest.findAll", query="SELECT t FROM PerformanceTest t")
public class PerformanceTest
{
@Id
@Id
@GenericGenerator(
        name = "PERFORMANCE_TEST_ID_GENERATOR",
        strategy = "org.hibernate.id.enhanced.SequenceStyleGenerator",
        parameters = {
                @Parameter(name = "sequence_name", value = "performance_test_id_seq"),
                @Parameter(name = "optimizer", value = "pooled-lo"),
                @Parameter(name = "increment_size", value = "1")
        }
)
@GeneratedValue(strategy=GenerationType.SEQUENCE, generator="PERFORMANCE_TEST_ID_GENERATOR")
private Long id;
private Long id;

@Column(name="first_name")
private String firstName;

@Column(name="last_name")
private String lastName;

@Column(name="salary")
private Integer salary;

public PerformanceTest(){};

public Long getId()
{
    return id;
}

public void setId(Long id)
{
    this.id = id;
}

public String getFirstName()
{
    return firstName;
}

public void setFirstName(String firstName)
{
    this.firstName = firstName;
}

public String getLastName()
{
    return lastName;
}

public void setLastName(String lastName)
{
    this.lastName = lastName;
}

public Integer getSalary()
{
    return salary;
}

public void setSalary(Integer salary)
{
    this.salary = salary;
}

}

DAO的实现(我在没有明显改进的情况下使用了这两种方法

@Override
public void saveBulkElement (Set<T> listOfElement,Integer bulkSize)
{
    if(listOfElement == null || listOfElement.size() == 0)
        return;

    Session session = sessionFactory.openSession();
    Transaction tx = session.beginTransaction();
    session.setJdbcBatchSize(batchSize);
    try
    {
        int flushIndex = 0;
        Iterator<T> ite = listOfElement.iterator();
        while (ite.hasNext())
        {
            T element = (T) ite.next();
            session.persist(element);
            flushIndex++;
            int size = bulkSize != null ? bulkSize:batchSize;
            if((flushIndex % size == 0 && flushIndex > 0) || !ite.hasNext())
            {
                session.flush();
                session.clear();
            }
        }

        tx.commit();
    }
    catch (HibernateException e)
    {
        if (tx != null)
            tx.rollback();
    }
    finally
    {
        session.close();
    }              
}

@Override
public void saveStatelessBulkElement (Set<T> listOfElement)
{
    if(listOfElement == null || listOfElement.size() == 0)
        return;

    StatelessSession session = sessionFactory.openStatelessSession();
    Transaction tx = session.beginTransaction();
    session.setJdbcBatchSize(listOfElement.size() < statelessBatchSize ? listOfElement.size():statelessBatchSize);
    try
    {
        Iterator<T> ite = listOfElement.iterator();
        while (ite.hasNext())
        {
            T element = (T) ite.next();
            session.insert(element);
        }

        tx.commit();
    }
    catch (HibernateException e)
    {
        if (tx != null)
            tx.rollback();
    }
    finally
    {
        session.close();
    }              
}

我的测试实际上是简单地插入100个新元素 我将休眠设置为显示统计信息

这是我得到的:

[StatisticalLoggingSessionEventListener] - Session Metrics {
137291307 nanoseconds spent acquiring 1 JDBC connections;
0 nanoseconds spent releasing 0 JDBC connections;
12909270 nanoseconds spent preparing 100 JDBC statements;
13660416454 nanoseconds spent executing 100 JDBC statements;
0 nanoseconds spent executing 0 JDBC batches;
0 nanoseconds spent performing 0 L2C puts;
0 nanoseconds spent performing 0 L2C hits;
0 nanoseconds spent performing 0 L2C misses;
32506326 nanoseconds spent executing 2 flushes (flushing a total of 100 entities and 0 collections);
0 nanoseconds spent executing 0 partial-flushes (flushing a total of 0 entities and 0 collections)
}
[StatisticalLoggingSessionEventListener] - Session Metrics {
141927634 nanoseconds spent acquiring 1 JDBC connections;
0 nanoseconds spent releasing 0 JDBC connections;
0 nanoseconds spent preparing 0 JDBC statements;
0 nanoseconds spent executing 0 JDBC statements;
0 nanoseconds spent executing 0 JDBC batches;
0 nanoseconds spent performing 0 L2C puts;
0 nanoseconds spent performing 0 L2C hits;
0 nanoseconds spent performing 0 L2C misses;
0 nanoseconds spent executing 0 flushes (flushing a total of 0 entities and 0 collections);
0 nanoseconds spent executing 0 partial-flushes (flushing a total of 0 entities and 0 collections)
}

仅100件商品就用了14秒!!!

我在休眠配置上错过了什么吗?

我用一个好的生成器更新了我的实体,问题仍然存在

================================================ ====

第一次更新:

我检查数据库seq生成器

Sequence "recntrek.performance_test_id_seq"
Column     |  Type   |          Value          
---------------+---------+-------------------------
sequence_name | name    | performance_test_id_seq
last_value    | bigint  | 293551
start_value   | bigint  | 1
increment_by  | bigint  | 1
max_value     | bigint  | 9223372036854775807
min_value     | bigint  | 1
cache_value   | bigint  | 1
log_cnt       | bigint  | 32
is_cycled     | boolean | f
is_called     | boolean | t

我将增量值和缓存值更改为50 ...

我重新运行测试,花了1.4秒

我用10000个新元素进行了另一项测试,耗时约30秒

这是一个很大的进步,但是与Vlad Mihalcea的page相比,我仍然遥遥领先于拥有出色的表现

1 个答案:

答案 0 :(得分:1)

性能差似乎与数据库序列的定义有关

这里有一些序列配置不同的结果:

  • 100行:increment_by和cache_value设置为1,耗时约14秒
  • 100行:increment_by和cache_value设置为50,耗时约1.4秒
  • 1万行:increment_by和cache_value设置为50,耗时约30秒
  • 1万行:increment_by和cache_value设置为500,耗时约5秒
  • 1万行:increment_by和cache_value设置为1000,耗时约4秒

每次更改这些值时,我也更改了生成器实体定义(increment_size值)

@GenericGenerator(
    name = "PERFORMANCE_TEST_ID_GENERATOR",
    strategy = "org.hibernate.id.enhanced.SequenceStyleGenerator",
    parameters = {
            @Parameter(name = "sequence_name", value = "performance_test_id_seq"),
            @Parameter(name = "optimizer", value = "pooled-lo"),
            @Parameter(name = "increment_size", value = "1")
    }
)

希望这会有所帮助...