我试图通过示例了解同步块和同步方法之间的区别。考虑以下简单类:
public class Main {
private static final Object lock = new Object();
private static long l;
public static void main(String[] args) {
}
public static void action(){
synchronized(lock){
l = (l + 1) * 2;
System.out.println(l);
}
}
}
编译后的Main::action()
将如下所示:
public static void action();
Code:
0: getstatic #2 // Field lock:Ljava/lang/Object;
3: dup
4: astore_0
5: monitorenter // <---- ENTERING
6: getstatic #3 // Field l:J
9: lconst_1
10: ladd
11: ldc2_w #4 // long 2l
14: lmul
15: putstatic #3 // Field l:J
18: getstatic #6 // Field java/lang/System.out:Ljava/io/PrintStream;
21: getstatic #3 // Field l:J
24: invokevirtual #7 // Method java/io/PrintStream.println:(J)V
27: aload_0
28: monitorexit // <---- EXITING
29: goto 37
32: astore_1
33: aload_0
34: monitorexit // <---- EXITING TWICE????
35: aload_1
36: athrow
37: return
我认为我们最好使用synchronized块而不是synchronized方法,因为它提供了更多的封装,防止客户端影响同步策略(使用synchronized方法,任何客户端都可以获取影响同步策略的锁定)。但从表现的角度来看,在我看来几乎是一样的。现在考虑同步方法版本:
public static synchronized void action(){
l = (l + 1) * 2;
System.out.println(l);
}
public static synchronized void action();
Code:
0: getstatic #2 // Field l:J
3: lconst_1
4: ladd
5: ldc2_w #3 // long 2l
8: lmul
9: putstatic #2 // Field l:J
12: getstatic #5 // Field java/lang/System.out:Ljava/io/PrintStream;
15: getstatic #2 // Field l:J
18: invokevirtual #6 // Method java/io/PrintStream.println:(J)V
21: return
因此,在同步方法版本中,执行的内容要少得多,所以我会说它的速度更快。
问题: 同步方法比同步块更快吗?
答案 0 :(得分:3)
使用在此答案底部发布的Java代码进行的快速测试导致synchronized method
更快。在i7上的Windows JVM上运行代码导致以下平均值
synchronized块:0.004254 s
同步方法:0.001056 s
根据您的字节码评估,这意味着synchronized method
实际上更快。
当构建.class
文件时,如果方法是同步的,则放入字节代码以警告JVM该方法是同步的(类似于方法为static/public/final/varargs
时添加的字节代码)等等,并且底层的JVM代码在方法结构上设置了一个标志来实现这种效果。
当字节码解释器命中字节码进行方法调用时,在调用方法之前调用以下代码来检查是否需要锁定它:
case method_entry: {
/* CODE_EDIT: irrelevant code removed for brevities sake */
// lock method if synchronized
if (METHOD->is_synchronized()) {
// oop rcvr = locals[0].j.r;
oop rcvr;
if (METHOD->is_static()) {
rcvr = METHOD->constants()->pool_holder()->java_mirror();
} else {
rcvr = LOCALS_OBJECT(0);
VERIFY_OOP(rcvr);
}
// The initial monitor is ours for the taking
BasicObjectLock* mon = &istate->monitor_base()[-1];
oop monobj = mon->obj();
assert(mon->obj() == rcvr, "method monitor mis-initialized");
bool success = UseBiasedLocking;
if (UseBiasedLocking) {
/* CODE_EDIT: this code is only run if you have biased locking enabled as a JVM option */
}
if (!success) {
markOop displaced = rcvr->mark()->set_unlocked();
mon->lock()->set_displaced_header(displaced);
if (Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) {
// Is it simple recursive case?
if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
mon->lock()->set_displaced_header(NULL);
} else {
CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
}
}
}
}
/* CODE_EDIT: irrelevant code removed for brevities sake */
goto run;
}
然后,当方法完成并返回到JVM函数处理程序时,将调用以下代码来解锁方法(请注意,在将方法调用到method_unlock_needed
之前设置了布尔bool method_unlock_needed = METHOD->is_synchronized()
):
if (method_unlock_needed) {
if (base->obj() == NULL) {
/* CODE_EDIT: irrelevant code removed for brevities sake */
} else {
oop rcvr = base->obj();
if (rcvr == NULL) {
if (!suppress_error) {
VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "");
illegal_state_oop = THREAD->pending_exception();
THREAD->clear_pending_exception();
}
} else {
BasicLock* lock = base->lock();
markOop header = lock->displaced_header();
base->set_obj(NULL);
// If it isn't recursive we either must swap old header or call the runtime
if (header != NULL) {
if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
// restore object for the slow case
base->set_obj(rcvr);
{
// Prevent any HandleMarkCleaner from freeing our live handles
HandleMark __hm(THREAD);
CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
}
if (THREAD->has_pending_exception()) {
if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
THREAD->clear_pending_exception();
}
}
}
}
}
}
语句CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
和CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
,更具体地说,函数InterpreterRuntime::monitorenter
和InterpreterRuntime::monitorexit
是在JVM中为同步方法和块锁定调用的代码/解锁底层对象。代码中的run
标签是大量的字节码解释器switch
语句,用于处理正在解析的不同字节码。
从此处,如果遇到同步块操作码(monitorenter
和monitorexit
字节码),则运行以下case
语句(对于monitorenter
和{分别为{1}}:
monitorexit
同样,调用相同的CASE(_monitorenter): {
oop lockee = STACK_OBJECT(-1);
// derefing's lockee ought to provoke implicit null check
CHECK_NULL(lockee);
// find a free monitor or one already allocated for this object
// if we find a matching object then we need a new monitor
// since this is recursive enter
BasicObjectLock* limit = istate->monitor_base();
BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
BasicObjectLock* entry = NULL;
while (most_recent != limit ) {
if (most_recent->obj() == NULL) entry = most_recent;
else if (most_recent->obj() == lockee) break;
most_recent++;
}
if (entry != NULL) {
entry->set_obj(lockee);
markOop displaced = lockee->mark()->set_unlocked();
entry->lock()->set_displaced_header(displaced);
if (Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
// Is it simple recursive case?
if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
entry->lock()->set_displaced_header(NULL);
} else {
CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
}
}
UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
} else {
istate->set_msg(more_monitors);
UPDATE_PC_AND_RETURN(0); // Re-execute
}
}
CASE(_monitorexit): {
oop lockee = STACK_OBJECT(-1);
CHECK_NULL(lockee);
// derefing's lockee ought to provoke implicit null check
// find our monitor slot
BasicObjectLock* limit = istate->monitor_base();
BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
while (most_recent != limit ) {
if ((most_recent)->obj() == lockee) {
BasicLock* lock = most_recent->lock();
markOop header = lock->displaced_header();
most_recent->set_obj(NULL);
// If it isn't recursive we either must swap old header or call the runtime
if (header != NULL) {
if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
// restore object for the slow case
most_recent->set_obj(lockee);
CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
}
}
UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
}
most_recent++;
}
// Need to throw illegal monitor state exception
CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception);
ShouldNotReachHere();
}
和InterpreterRuntime::monitorenter
函数来锁定底层对象,但是在进程中有更多的开销,这就解释了为什么与同步方法和时间的差异。同步块。
显然,同步方法和synchronized块在使用时都有其优缺点,但问题是询问哪个更快,并且基于初步测试和来自OpenJDK的源,它看起来好像是同步方法(单独)确实比同步块(单独)更快。您的结果可能会有所不同(特别是代码越复杂),因此如果性能是一个问题,那么最好自己进行测试并从那里测量对您的案例有意义的事情。
以下是相关的Java测试代码:
InterpreterRuntime::monitorexit
希望这有助于增加一些清晰度。
答案 1 :(得分:2)
考虑到你的同步块有一个goto,否则后面有6个左右的指令,指令的数量实际上并没有那么不同。
这实际上归结为如何最好地在多个访问线程中公开对象。
答案 2 :(得分:0)
相反,实际中的同步方法应该比同步块慢得多,因为同步方法会使代码顺序更多。
但是,如果两者都包含相同数量的代码,那么下面的测试所支持的性能不应该有太大差异。
支持班级
public interface TestMethod {
public void test(double[] array);
public String getName();
}
public class TestSynchronizedBlock implements TestMethod{
private static final Object lock = new Object();
public synchronized void test(double[] arr) {
synchronized (lock) {
double sum = 0;
for(double d : arr) {
for(double d1 : arr) {
sum += d*d1;
}
}
//System.out.print(sum + " ");
}
}
@Override
public String getName() {
return getClass().getName();
}
}
public class TestSynchronizedMethod implements TestMethod {
public synchronized void test(double[] arr) {
double sum = 0;
for(double d : arr) {
for(double d1 : arr) {
sum += d*d1;
}
}
//System.out.print(sum + " ");
}
@Override
public String getName() {
return getClass().getName();
}
}
主要类
import java.util.Random;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
public class TestSynchronizedMain {
public static void main(String[] args) {
TestSynchronizedMain main = new TestSynchronizedMain();
TestMethod testMethod = null;
Random rand = new Random();
double[] arr = new double[10000];
for(int j = 0; j < arr.length; j++) {
arr[j] = rand.nextDouble() * 10000;
}
/*testMethod = new TestSynchronizedBlock();
main.testSynchronized(testMethod, arr);*/
testMethod = new TestSynchronizedMethod();
main.testSynchronized(testMethod, arr);
}
public void testSynchronized(final TestMethod testMethod, double[] arr) {
System.out.println("Testing " + testMethod.getName());
ExecutorService executor = Executors.newCachedThreadPool();
AtomicLong time = new AtomicLong();
AtomicLong startCounter = new AtomicLong();
AtomicLong endCounter = new AtomicLong();
for (int i = 0; i < 100; i++) {
executor.submit(new Runnable() {
@Override
public void run() {
// System.out.println("Started");
startCounter.incrementAndGet();
long startTime = System.currentTimeMillis();
testMethod.test(arr);
long endTime = System.currentTimeMillis();
long delta = endTime - startTime;
//System.out.print(delta + " ");
time.addAndGet(delta);
endCounter.incrementAndGet();
}
});
}
executor.shutdown();
try {
executor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS);
System.out.println("time taken = " + (time.get() / 1000.0) + " : starts = " + startCounter.get() + " : ends = " + endCounter);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
多次运行中的主要输出
1. Testing TestSynchronizedBlock
time taken = 537.974 : starts = 100 : ends = 100
Testing TestSynchronizedMethod
time taken = 537.052 : starts = 100 : ends = 100
2. Testing TestSynchronizedBlock
time taken = 535.983 : starts = 100 : ends = 100
Testing TestSynchronizedMethod
time taken = 537.534 : starts = 100 : ends = 100
3. Testing TestSynchronizedBlock
time taken = 553.964 : starts = 100 : ends = 100
Testing TestSynchronizedMethod
time taken = 552.352 : starts = 100 : ends = 100
注意:测试是在Windows 8,64 bit,i7机器上完成的。 实际时间并不重要,但相对值为。