我想知道使用POSIX调用(例如pthread_once()
和sem_wait()
或dispatch_ *函数)会更好/更快,所以我创建了一个小测试并对结果感到惊讶(问题和结果在最后)。
在测试代码中,我使用mach_absolute_time()来计时。我真的不在乎这与纳秒没有完全匹配;我正在将这些值相互比较,因此确切的时间单位无关紧要,只有间隔之间的差异。结果部分中的数字是可重复的而不是平均数;我可以平均时间,但我不是在寻找确切的数字。
test.m(简单的控制台应用程序;易于编译):
#import <Foundation/Foundation.h>
#import <dispatch/dispatch.h>
#include <semaphore.h>
#include <pthread.h>
#include <time.h>
#include <mach/mach_time.h>
// *sigh* OSX does not have pthread_barrier (you can ignore the pthread_barrier
// code, the interesting stuff is lower)
typedef int pthread_barrierattr_t;
typedef struct
{
pthread_mutex_t mutex;
pthread_cond_t cond;
int count;
int tripCount;
} pthread_barrier_t;
int pthread_barrier_init(pthread_barrier_t *barrier, const pthread_barrierattr_t *attr, unsigned int count)
{
if(count == 0)
{
errno = EINVAL;
return -1;
}
if(pthread_mutex_init(&barrier->mutex, 0) < 0)
{
return -1;
}
if(pthread_cond_init(&barrier->cond, 0) < 0)
{
pthread_mutex_destroy(&barrier->mutex);
return -1;
}
barrier->tripCount = count;
barrier->count = 0;
return 0;
}
int pthread_barrier_destroy(pthread_barrier_t *barrier)
{
pthread_cond_destroy(&barrier->cond);
pthread_mutex_destroy(&barrier->mutex);
return 0;
}
int pthread_barrier_wait(pthread_barrier_t *barrier)
{
pthread_mutex_lock(&barrier->mutex);
++(barrier->count);
if(barrier->count >= barrier->tripCount)
{
barrier->count = 0;
pthread_cond_broadcast(&barrier->cond);
pthread_mutex_unlock(&barrier->mutex);
return 1;
}
else
{
pthread_cond_wait(&barrier->cond, &(barrier->mutex));
pthread_mutex_unlock(&barrier->mutex);
return 0;
}
}
//
// ok you can start paying attention now...
//
void onceFunction(void)
{
}
@interface SemaphoreTester : NSObject
{
sem_t *sem1;
sem_t *sem2;
pthread_barrier_t *startBarrier;
pthread_barrier_t *finishBarrier;
}
@property (nonatomic, assign) sem_t *sem1;
@property (nonatomic, assign) sem_t *sem2;
@property (nonatomic, assign) pthread_barrier_t *startBarrier;
@property (nonatomic, assign) pthread_barrier_t *finishBarrier;
@end
@implementation SemaphoreTester
@synthesize sem1, sem2, startBarrier, finishBarrier;
- (void)thread1
{
pthread_barrier_wait(startBarrier);
for(int i = 0; i < 100000; i++)
{
sem_wait(sem1);
sem_post(sem2);
}
pthread_barrier_wait(finishBarrier);
}
- (void)thread2
{
pthread_barrier_wait(startBarrier);
for(int i = 0; i < 100000; i++)
{
sem_wait(sem2);
sem_post(sem1);
}
pthread_barrier_wait(finishBarrier);
}
@end
int main (int argc, const char * argv[])
{
NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
int64_t start;
int64_t stop;
// semaphore non contention test
{
// grrr, OSX doesn't have sem_init
sem_t *sem1 = sem_open("sem1", O_CREAT, 0777, 0);
start = mach_absolute_time();
for(int i = 0; i < 100000; i++)
{
sem_post(sem1);
sem_wait(sem1);
}
stop = mach_absolute_time();
sem_close(sem1);
NSLog(@"0 Contention time = %d", stop - start);
}
// semaphore contention test
{
__block sem_t *sem1 = sem_open("sem1", O_CREAT, 0777, 0);
__block sem_t *sem2 = sem_open("sem2", O_CREAT, 0777, 0);
__block pthread_barrier_t startBarrier;
pthread_barrier_init(&startBarrier, NULL, 3);
__block pthread_barrier_t finishBarrier;
pthread_barrier_init(&finishBarrier, NULL, 3);
dispatch_queue_t queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_LOW, 0);
dispatch_async(queue, ^{
pthread_barrier_wait(&startBarrier);
for(int i = 0; i < 100000; i++)
{
sem_wait(sem1);
sem_post(sem2);
}
pthread_barrier_wait(&finishBarrier);
});
dispatch_async(queue, ^{
pthread_barrier_wait(&startBarrier);
for(int i = 0; i < 100000; i++)
{
sem_wait(sem2);
sem_post(sem1);
}
pthread_barrier_wait(&finishBarrier);
});
pthread_barrier_wait(&startBarrier);
// start timing, everyone hit this point
start = mach_absolute_time();
// kick it off
sem_post(sem2);
pthread_barrier_wait(&finishBarrier);
// stop timing, everyone hit the finish point
stop = mach_absolute_time();
sem_close(sem1);
sem_close(sem2);
NSLog(@"2 Threads always contenting time = %d", stop - start);
pthread_barrier_destroy(&startBarrier);
pthread_barrier_destroy(&finishBarrier);
}
// NSTask semaphore contention test
{
sem_t *sem1 = sem_open("sem1", O_CREAT, 0777, 0);
sem_t *sem2 = sem_open("sem2", O_CREAT, 0777, 0);
pthread_barrier_t startBarrier;
pthread_barrier_init(&startBarrier, NULL, 3);
pthread_barrier_t finishBarrier;
pthread_barrier_init(&finishBarrier, NULL, 3);
SemaphoreTester *tester = [[[SemaphoreTester alloc] init] autorelease];
tester.sem1 = sem1;
tester.sem2 = sem2;
tester.startBarrier = &startBarrier;
tester.finishBarrier = &finishBarrier;
[NSThread detachNewThreadSelector:@selector(thread1) toTarget:tester withObject:nil];
[NSThread detachNewThreadSelector:@selector(thread2) toTarget:tester withObject:nil];
pthread_barrier_wait(&startBarrier);
// start timing, everyone hit this point
start = mach_absolute_time();
// kick it off
sem_post(sem2);
pthread_barrier_wait(&finishBarrier);
// stop timing, everyone hit the finish point
stop = mach_absolute_time();
sem_close(sem1);
sem_close(sem2);
NSLog(@"2 NSTasks always contenting time = %d", stop - start);
pthread_barrier_destroy(&startBarrier);
pthread_barrier_destroy(&finishBarrier);
}
// dispatch_semaphore non contention test
{
dispatch_semaphore_t sem1 = dispatch_semaphore_create(0);
start = mach_absolute_time();
for(int i = 0; i < 100000; i++)
{
dispatch_semaphore_signal(sem1);
dispatch_semaphore_wait(sem1, DISPATCH_TIME_FOREVER);
}
stop = mach_absolute_time();
NSLog(@"Dispatch 0 Contention time = %d", stop - start);
}
// dispatch_semaphore non contention test
{
__block dispatch_semaphore_t sem1 = dispatch_semaphore_create(0);
__block dispatch_semaphore_t sem2 = dispatch_semaphore_create(0);
__block pthread_barrier_t startBarrier;
pthread_barrier_init(&startBarrier, NULL, 3);
__block pthread_barrier_t finishBarrier;
pthread_barrier_init(&finishBarrier, NULL, 3);
dispatch_queue_t queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_LOW, 0);
dispatch_async(queue, ^{
pthread_barrier_wait(&startBarrier);
for(int i = 0; i < 100000; i++)
{
dispatch_semaphore_wait(sem1, DISPATCH_TIME_FOREVER);
dispatch_semaphore_signal(sem2);
}
pthread_barrier_wait(&finishBarrier);
});
dispatch_async(queue, ^{
pthread_barrier_wait(&startBarrier);
for(int i = 0; i < 100000; i++)
{
dispatch_semaphore_wait(sem2, DISPATCH_TIME_FOREVER);
dispatch_semaphore_signal(sem1);
}
pthread_barrier_wait(&finishBarrier);
});
pthread_barrier_wait(&startBarrier);
// start timing, everyone hit this point
start = mach_absolute_time();
// kick it off
dispatch_semaphore_signal(sem2);
pthread_barrier_wait(&finishBarrier);
// stop timing, everyone hit the finish point
stop = mach_absolute_time();
NSLog(@"Dispatch 2 Threads always contenting time = %d", stop - start);
pthread_barrier_destroy(&startBarrier);
pthread_barrier_destroy(&finishBarrier);
}
// pthread_once time
{
pthread_once_t once = PTHREAD_ONCE_INIT;
start = mach_absolute_time();
for(int i = 0; i <100000; i++)
{
pthread_once(&once, onceFunction);
}
stop = mach_absolute_time();
NSLog(@"pthread_once time = %d", stop - start);
}
// dispatch_once time
{
dispatch_once_t once = 0;
start = mach_absolute_time();
for(int i = 0; i <100000; i++)
{
dispatch_once(&once, ^{});
}
stop = mach_absolute_time();
NSLog(@"dispatch_once time = %d", stop - start);
}
[pool drain];
return 0;
}
On My iMac(Snow Leopard Server 10.6.4):
Model Identifier: iMac7,1 Processor Name: Intel Core 2 Duo Processor Speed: 2.4 GHz Number Of Processors: 1 Total Number Of Cores: 2 L2 Cache: 4 MB Memory: 4 GB Bus Speed: 800 MHz
我明白了:
0 Contention time = 101410439 2 Threads always contenting time = 109748686 2 NSTasks always contenting time = 113225207 0 Contention named semaphore time = 166061832 2 Threads named semaphore contention time = 203913476 2 NSTasks named semaphore contention time = 204988744 Dispatch 0 Contention time = 3411439 Dispatch 2 Threads always contenting time = 708073977 pthread_once time = 2707770 dispatch_once time = 87433
在我的MacbookPro(Snow Leopard 10.6.4)上:
Model Identifier: MacBookPro6,2 Processor Name: Intel Core i5 Processor Speed: 2.4 GHz Number Of Processors: 1 Total Number Of Cores: 2 (though HT is enabled) L2 Cache (per core): 256 KB L3 Cache: 3 MB Memory: 8 GB Processor Interconnect Speed: 4.8 GT/s
我得到了:
0 Contention time = 74172042 2 Threads always contenting time = 82975742 2 NSTasks always contenting time = 82996716 0 Contention named semaphore time = 106772641 2 Threads named semaphore contention time = 162761973 2 NSTasks named semaphore contention time = 162919844 Dispatch 0 Contention time = 1634941 Dispatch 2 Threads always contenting time = 759753865 pthread_once time = 1516787 dispatch_once time = 120778
在iPhone 3GS 4.0.2上我得到了:
0 Contention time = 5971929 2 Threads always contenting time = 11989710 2 NSTasks always contenting time = 11950564 0 Contention named semaphore time = 16721876 2 Threads named semaphore contention time = 35333045 2 NSTasks named semaphore contention time = 35296579 Dispatch 0 Contention time = 151909 Dispatch 2 Threads always contenting time = 46946548 pthread_once time = 193592 dispatch_once time = 25071
问题和陈述:
sem_wait()
和sem_post()
在没有争用的情况下很慢
sem_wait()
和sem_post()
在竞争时的速度和其他情况一样缓慢(存在差异,但我认为在争用与否之间存在巨大差异;我预期数字喜欢dispatch_semaphore代码中的内容)sem_wait()
和sem_post()
速度较慢。
dispatch_semaphore_wait()
和dispatch_semaphore_signal()
在没有争用的情况下疯狂得快(因为苹果公司大肆宣传这一点并不令人意外)。dispatch_semaphore_wait()
和dispatch_semaphore_signal()
在争用时比sem_wait()
和sem_post()
慢3倍
dispatch_once()
比pthread_once()
快,大约10倍,为什么?我从标题中唯一可以看出,dispatch_once()
与pthread_once()
没有函数调用负担。动机: 我提供了两套工具来完成信号量或一次调用的工作(我实际上找到了其他信号量变量,但我会忽略它们,除非提出作为更好的选择)。我只是想知道这项工作的最佳工具是什么(如果您可以选择拧入带有飞利浦或平头的螺丝,如果我不需要拧紧螺丝和扁平头,我会选择飞利浦拧紧螺钉)。 似乎如果我开始使用libdispatch编写实用程序,我可能无法将它们移植到其他没有libdispatch工作的操作系统......但是使用起来非常诱人;)
目前的情况: 当我不必担心可移植性和POSIX调用时,我将使用libdispatch。
谢谢!
答案 0 :(得分:11)
sem_wait()和sem_post()是可以在进程之间使用的重量级同步工具。它们总是涉及到内核的往返,并且可能总是需要重新安排您的线程。它们通常不是进程内同步的正确选择。我不确定为什么命名变体会比匿名变种慢......
Mac OS X实际上与Posix兼容性相当不错......但是Posix规范有很多可选功能,Mac并没有全部功能。你的帖子实际上是我第一次听说过pthread_barriers,所以我猜它们要么是相对较新的,要么不是那么常见。 (过去十年左右,我对pthreads的演变并没有太多关注。)
发送内容在强制极端争用下崩溃的原因可能是因为在封面下行为类似于自旋锁。您的调度工作者线程很可能在乐观的假设下浪费了大量的量子,即争用资源现在可以在任何周期使用...... Shark的一些时间会告诉您。然而,回归点应该是“优化”争用期间的颠簸是程序员时间的不良投资。而是花时间优化代码,以便首先避免重大争用。
如果你真的有一个资源在你的过程中是一个不可避免的瓶颈,那么在它周围放置一个信号量是非常不理想的。将它放在自己的串行调度队列上,并尽可能在该队列上执行dispatch_async块。
最后,dispatch_once()比pthread_once()更快,因为它规范并实现为在当前处理器上快速。可能Apple可以加速pthread_once()实现,因为我怀疑参考实现使用pthread同步原语,但是......好吧......他们已经提供了所有的libdispatch优点。 : - )