gpt4 book ai didi

objective-c - 性能测试 : sem_t v. s。 dispatch_semaphore_t 和 pthread_once_t 对比dispatch_once_t

转载 作者:太空狗 更新时间:2023-10-30 03:19:13 26 4
gpt4 key购买 nike

我想知道使用像 pthread_once()sem_wait() 或 dispatch_* 函数这样的 POSIX 调用会更好/更快,所以我创建了一些测试,对结果感到惊讶(问题和结果在最后)。

在测试代码中,我使用 mach_absolute_time() 来为调用计时。我真的不在乎这与纳秒不完全匹配;我正在相互比较这些值,因此确切的时间单位并不重要,只有时间间隔之间的差异才重要。结果部分的数字是可重复的,不是平均的;我可以计算时间的平均值,但我不是在寻找确切的数字。

test.m(简单的控制台应用程序;易于编译):

#import <Foundation/Foundation.h>
#import <dispatch/dispatch.h>
#include <semaphore.h>
#include <pthread.h>
#include <time.h>
#include <mach/mach_time.h>

// *sigh* OSX does not have pthread_barrier (you can ignore the pthread_barrier
// code, the interesting stuff is lower)
typedef int pthread_barrierattr_t;
typedef struct
{
pthread_mutex_t mutex;
pthread_cond_t cond;
int count;
int tripCount;
} pthread_barrier_t;


int pthread_barrier_init(pthread_barrier_t *barrier, const pthread_barrierattr_t *attr, unsigned int count)
{
if(count == 0)
{
errno = EINVAL;
return -1;
}
if(pthread_mutex_init(&barrier->mutex, 0) < 0)
{
return -1;
}
if(pthread_cond_init(&barrier->cond, 0) < 0)
{
pthread_mutex_destroy(&barrier->mutex);
return -1;
}
barrier->tripCount = count;
barrier->count = 0;

return 0;
}

int pthread_barrier_destroy(pthread_barrier_t *barrier)
{
pthread_cond_destroy(&barrier->cond);
pthread_mutex_destroy(&barrier->mutex);
return 0;
}

int pthread_barrier_wait(pthread_barrier_t *barrier)
{
pthread_mutex_lock(&barrier->mutex);
++(barrier->count);
if(barrier->count >= barrier->tripCount)
{
barrier->count = 0;
pthread_cond_broadcast(&barrier->cond);
pthread_mutex_unlock(&barrier->mutex);
return 1;
}
else
{
pthread_cond_wait(&barrier->cond, &(barrier->mutex));
pthread_mutex_unlock(&barrier->mutex);
return 0;
}
}

//
// ok you can start paying attention now...
//

void onceFunction(void)
{
}

@interface SemaphoreTester : NSObject
{
sem_t *sem1;
sem_t *sem2;
pthread_barrier_t *startBarrier;
pthread_barrier_t *finishBarrier;
}
@property (nonatomic, assign) sem_t *sem1;
@property (nonatomic, assign) sem_t *sem2;
@property (nonatomic, assign) pthread_barrier_t *startBarrier;
@property (nonatomic, assign) pthread_barrier_t *finishBarrier;
@end
@implementation SemaphoreTester
@synthesize sem1, sem2, startBarrier, finishBarrier;
- (void)thread1
{
pthread_barrier_wait(startBarrier);
for(int i = 0; i < 100000; i++)
{
sem_wait(sem1);
sem_post(sem2);
}
pthread_barrier_wait(finishBarrier);
}

- (void)thread2
{
pthread_barrier_wait(startBarrier);
for(int i = 0; i < 100000; i++)
{
sem_wait(sem2);
sem_post(sem1);
}
pthread_barrier_wait(finishBarrier);
}
@end


int main (int argc, const char * argv[])
{
NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
int64_t start;
int64_t stop;

// semaphore non contention test
{
// grrr, OSX doesn't have sem_init
sem_t *sem1 = sem_open("sem1", O_CREAT, 0777, 0);

start = mach_absolute_time();
for(int i = 0; i < 100000; i++)
{
sem_post(sem1);
sem_wait(sem1);
}
stop = mach_absolute_time();
sem_close(sem1);

NSLog(@"0 Contention time = %d", stop - start);
}

// semaphore contention test
{
__block sem_t *sem1 = sem_open("sem1", O_CREAT, 0777, 0);
__block sem_t *sem2 = sem_open("sem2", O_CREAT, 0777, 0);
__block pthread_barrier_t startBarrier;
pthread_barrier_init(&startBarrier, NULL, 3);
__block pthread_barrier_t finishBarrier;
pthread_barrier_init(&finishBarrier, NULL, 3);

dispatch_queue_t queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_LOW, 0);
dispatch_async(queue, ^{
pthread_barrier_wait(&startBarrier);
for(int i = 0; i < 100000; i++)
{
sem_wait(sem1);
sem_post(sem2);
}
pthread_barrier_wait(&finishBarrier);
});
dispatch_async(queue, ^{
pthread_barrier_wait(&startBarrier);
for(int i = 0; i < 100000; i++)
{
sem_wait(sem2);
sem_post(sem1);
}
pthread_barrier_wait(&finishBarrier);
});
pthread_barrier_wait(&startBarrier);
// start timing, everyone hit this point
start = mach_absolute_time();
// kick it off
sem_post(sem2);
pthread_barrier_wait(&finishBarrier);
// stop timing, everyone hit the finish point
stop = mach_absolute_time();
sem_close(sem1);
sem_close(sem2);
NSLog(@"2 Threads always contenting time = %d", stop - start);
pthread_barrier_destroy(&startBarrier);
pthread_barrier_destroy(&finishBarrier);
}

// NSTask semaphore contention test
{
sem_t *sem1 = sem_open("sem1", O_CREAT, 0777, 0);
sem_t *sem2 = sem_open("sem2", O_CREAT, 0777, 0);
pthread_barrier_t startBarrier;
pthread_barrier_init(&startBarrier, NULL, 3);
pthread_barrier_t finishBarrier;
pthread_barrier_init(&finishBarrier, NULL, 3);

SemaphoreTester *tester = [[[SemaphoreTester alloc] init] autorelease];
tester.sem1 = sem1;
tester.sem2 = sem2;
tester.startBarrier = &startBarrier;
tester.finishBarrier = &finishBarrier;
[NSThread detachNewThreadSelector:@selector(thread1) toTarget:tester withObject:nil];
[NSThread detachNewThreadSelector:@selector(thread2) toTarget:tester withObject:nil];
pthread_barrier_wait(&startBarrier);
// start timing, everyone hit this point
start = mach_absolute_time();
// kick it off
sem_post(sem2);
pthread_barrier_wait(&finishBarrier);
// stop timing, everyone hit the finish point
stop = mach_absolute_time();
sem_close(sem1);
sem_close(sem2);
NSLog(@"2 NSTasks always contenting time = %d", stop - start);
pthread_barrier_destroy(&startBarrier);
pthread_barrier_destroy(&finishBarrier);
}

// dispatch_semaphore non contention test
{
dispatch_semaphore_t sem1 = dispatch_semaphore_create(0);

start = mach_absolute_time();
for(int i = 0; i < 100000; i++)
{
dispatch_semaphore_signal(sem1);
dispatch_semaphore_wait(sem1, DISPATCH_TIME_FOREVER);
}
stop = mach_absolute_time();

NSLog(@"Dispatch 0 Contention time = %d", stop - start);
}


// dispatch_semaphore non contention test
{
__block dispatch_semaphore_t sem1 = dispatch_semaphore_create(0);
__block dispatch_semaphore_t sem2 = dispatch_semaphore_create(0);
__block pthread_barrier_t startBarrier;
pthread_barrier_init(&startBarrier, NULL, 3);
__block pthread_barrier_t finishBarrier;
pthread_barrier_init(&finishBarrier, NULL, 3);

dispatch_queue_t queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_LOW, 0);
dispatch_async(queue, ^{
pthread_barrier_wait(&startBarrier);
for(int i = 0; i < 100000; i++)
{
dispatch_semaphore_wait(sem1, DISPATCH_TIME_FOREVER);
dispatch_semaphore_signal(sem2);
}
pthread_barrier_wait(&finishBarrier);
});
dispatch_async(queue, ^{
pthread_barrier_wait(&startBarrier);
for(int i = 0; i < 100000; i++)
{
dispatch_semaphore_wait(sem2, DISPATCH_TIME_FOREVER);
dispatch_semaphore_signal(sem1);
}
pthread_barrier_wait(&finishBarrier);
});
pthread_barrier_wait(&startBarrier);
// start timing, everyone hit this point
start = mach_absolute_time();
// kick it off
dispatch_semaphore_signal(sem2);
pthread_barrier_wait(&finishBarrier);
// stop timing, everyone hit the finish point
stop = mach_absolute_time();

NSLog(@"Dispatch 2 Threads always contenting time = %d", stop - start);
pthread_barrier_destroy(&startBarrier);
pthread_barrier_destroy(&finishBarrier);
}

// pthread_once time
{
pthread_once_t once = PTHREAD_ONCE_INIT;
start = mach_absolute_time();
for(int i = 0; i <100000; i++)
{
pthread_once(&once, onceFunction);
}
stop = mach_absolute_time();

NSLog(@"pthread_once time = %d", stop - start);
}

// dispatch_once time
{
dispatch_once_t once = 0;
start = mach_absolute_time();
for(int i = 0; i <100000; i++)
{
dispatch_once(&once, ^{});
}
stop = mach_absolute_time();

NSLog(@"dispatch_once time = %d", stop - start);
}

[pool drain];
return 0;
}

在我的 iMac 上(雪豹服务器 10.6.4):

  Model Identifier: iMac7,1  Processor Name:   Intel Core 2 Duo  Processor Speed:  2.4 GHz  Number Of Processors: 1  Total Number Of Cores:    2  L2 Cache: 4 MB  Memory:   4 GB  Bus Speed:    800 MHz

我得到:

0 Contention time                         =    1014104392 Threads always contenting time          =    1097486862 NSTasks always contenting time          =    1132252070 Contention named semaphore time         =    1660618322 Threads named semaphore contention time =    2039134762 NSTasks named semaphore contention time =    204988744Dispatch 0 Contention time                =      3411439Dispatch 2 Threads always contenting time =    708073977pthread_once time  =      2707770dispatch_once time =        87433

在我的 MacbookPro(Snow Leopard 10.6.4)上:

  Model Identifier: MacBookPro6,2  Processor Name:   Intel Core i5  Processor Speed:  2.4 GHz  Number Of Processors: 1  Total Number Of Cores:    2 (though HT is enabled)  L2 Cache (per core):  256 KB  L3 Cache: 3 MB  Memory:   8 GB  Processor Interconnect Speed: 4.8 GT/s

我得到了:

0 Contention time                         =     741720422 Threads always contenting time          =     829757422 NSTasks always contenting time          =     829967160 Contention named semaphore time         =    1067726412 Threads named semaphore contention time =    1627619732 NSTasks named semaphore contention time =    162919844Dispatch 0 Contention time                =      1634941Dispatch 2 Threads always contenting time =    759753865pthread_once time  =      1516787dispatch_once time =       120778

在 iPhone 3GS 4.0.2 上我得到了:

0 Contention time                         =      59719292 Threads always contenting time          =     119897102 NSTasks always contenting time          =     119505640 Contention named semaphore time         =     167218762 Threads named semaphore contention time =     353330452 NSTasks named semaphore contention time =     35296579Dispatch 0 Contention time                =       151909Dispatch 2 Threads always contenting time =     46946548pthread_once time  =       193592dispatch_once time =        25071

问题和陈述:

  • sem_wait()sem_post() 在未处于竞争状态时速度较慢
    • 为什么会这样?
    • OSX 不关心兼容的 API 吗?是否有一些遗留代码迫使它变慢?
    • 为什么这些数字与 dispatch_semaphore 函数不同?
  • sem_wait()sem_post() 在争用时和不争用时一样慢(有区别,但我认为这将是一个巨大的争用与非争用之间的区别;我希望数字类似于 dispatch_semaphore 代码中的数字)
  • sem_wait()sem_post() 在使用命名信号量时速度较慢。
    • 为什么?这是因为信号量必须在进程之间同步吗?这样做可能会带来更多负担。
  • dispatch_semaphore_wait()dispatch_semaphore_signal() 在不处于竞争状态时速度非常快(这并不奇怪,因为 apple 经常吹捧这个)。
  • dispatch_semaphore_wait()dispatch_semaphore_signal()sem_wait()sem_post() 慢 3 倍争论中
    • 为什么这么慢?这对我来说没有意义。我原以为这与争论中的 sem_t 不相上下。
  • dispatch_once()pthread_once() 快 10 倍左右,为什么?从 header 中我唯一可以看出的是,与 pthread_once() 相比,dispatch_once() 没有函数调用负担。

动机:我看到了两套工具来完成信号量或一次调用的工作(同时我实际上发现了其他信号量变体,但我会忽略它们,除非提出更好的选择)。我只想知道什么是最适合这项工作的工具(如果你可以选择用飞利浦或平头螺丝拧紧螺丝,我会选择飞利浦如果我不必拧紧螺丝和平头如果我必须拧螺丝)。似乎如果我开始使用 libdispatch 编写实用程序,我可能无法将它们移植到其他还没有 libdispatch 工作的操作系统......但它是如此诱人使​​用;)

就目前而言:当我不必担心可移植性和 POSIX 调用时,我将使用 libdispatch。

谢谢!

最佳答案

sem_wait() 和 sem_post() 是可以在进程之间使用的重量级同步工具。它们总是涉及到内核的往返,并且可能总是需要重新安排您的线程。它们通常不是进程内同步的正确选择。我不确定为什么命名变体会比匿名变体慢......

Mac OS X其实对Posix的兼容性还是不错的。。。但是Posix规范有很多可选的功能,而Mac并没有全部具备。您的帖子实际上是我第一次听说 pthread_barriers,所以我猜它们要么是相对较新的,要么不是那么常见。 (过去十年左右,我没有太多关注pthreads的演变。)

dispatch 东西在被迫的极端争用下崩溃的原因可能是因为在幕后行为类似于自旋锁。在乐观的假设下,您的调度工作线程很可能会浪费大量的量子,因为争用的资源现在将在任何周期都可用……与 Shark 的一些时间肯定会告诉您。不过,要点应该是“优化”争用期间的抖动是对程序员时间的不良投资。而是花时间优化代码,从一开始就避免激烈的争用。

如果您的流程中确实有一种资源是不可避免的瓶颈,那么在它周围放置一个信号量是非常次优的。将其放在自己的串行调度队列中,并尽可能多地在该队列上执行 dispatch_async block 。

最后,dispatch_once() 比 pthread_once() 更快,因为它的规范和实现在当前处理器上速度很快。 Apple 可能会加快 pthread_once() 的实现,因为我怀疑引用实现使用 pthread 同步原语,但是......好吧......他们已经提供了所有 libdispatch 优点。 :-)

关于objective-c - 性能测试 : sem_t v. s。 dispatch_semaphore_t 和 pthread_once_t 对比dispatch_once_t,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/3640853/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com