我在Rust中对函数式编程性能进行了一些测试:
extern crate rand; // 0.5.5
use rand::Rng;
fn time(f: impl FnOnce()) -> std::time::Duration {
let s = std::time::Instant::now();
f();
s.elapsed()
}
fn main() {
let iteration = 10000000;
let mut rng = rand::thread_rng();
println!(
"while: {:?}",
time(|| {
let mut i = 0;
let mut num = 0i64;
while i < iteration {
num += rng.gen::<i64>();
i += 1;
}
})
); // 29.116528ms
println!(
"for: {:?}",
time(|| {
let mut num = 0i64;
for _ in 0..iteration {
num += rng.gen::<i64>();
}
})
); // 26.68407ms
println!(
"fold: {:?}",
time(|| {
rng.gen_iter::<i64>().take(iteration).fold(0, |x, y| x + y);
})
); // 26.065936ms
}
我已设置优化标志以进行编译。
这三个案例花了几乎相同的时间,这是否意味着Rust中的函数式编程是零成本的?
答案 0 :(得分:9)
标准性能警告与往常一样,您应该在您的情况下对代码进行基准测试,并了解权衡取舍。从可理解的代码开始,并在必要时使其更快。
以下是功能,分解并永不内联。我还阻止了随机数生成器的内联,并使迭代计数更小,以便以后使用:
extern crate rand; // 0.5.5
use rand::{distributions::Standard, Rng, RngCore};
const ITERATION: usize = 10000;
#[inline(never)]
fn add_manual(mut rng: impl Rng) -> i64 {
let mut num = 0;
let mut i = 0;
while i < ITERATION {
num += rng.gen::<i64>();
i += 1;
}
num
}
#[inline(never)]
fn add_range(mut rng: impl Rng) -> i64 {
let mut num = 0;
for _ in 0..ITERATION {
num += rng.gen::<i64>();
}
num
}
#[inline(never)]
fn add_fold(mut rng: impl Rng) -> i64 {
rng.sample_iter::<i64, _>(&Standard)
.take(ITERATION)
.fold(0i64, |x, y| x + y)
}
#[inline(never)]
fn add_sum(mut rng: impl Rng) -> i64 {
rng.sample_iter::<i64, _>(&Standard).take(ITERATION).sum()
}
// Prevent inlining the RNG to create easier-to-inspect LLVM IR
struct NoInlineRng<R: Rng>(R);
impl<R: Rng> RngCore for NoInlineRng<R> {
#[inline(never)]
fn next_u32(&mut self) -> u32 {
self.0.next_u32()
}
#[inline(never)]
fn next_u64(&mut self) -> u64 {
self.0.next_u64()
}
#[inline(never)]
fn fill_bytes(&mut self, dest: &mut [u8]) {
self.0.fill_bytes(dest)
}
#[inline(never)]
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), rand::Error> {
self.0.try_fill_bytes(dest)
}
}
fn main() {
let mut rng = NoInlineRng(rand::thread_rng());
let a = add_manual(&mut rng);
let b = add_range(&mut rng);
let c = add_fold(&mut rng);
let d = add_sum(&mut rng);
println!("{}, {}, {}, {}", a, b, c, d);
}
相应的LLVM IR,来自Rust 1.29.2在发布模式下构建:
; playground::add_manual
; Function Attrs: noinline uwtable
define internal fastcc i64 @_ZN10playground10add_manual17hb7f61676b41e00bfE(i64** dereferenceable(8)) unnamed_addr #4 personality i32 (i32, i32, i64, %"unwind::libunwind::_Unwind_Exception"*, %"unwind::libunwind::_Unwind_Context"*)* @rust_eh_personality {
start:
br label %bb4
bb3: ; preds = %bb4
ret i64 %2
bb4: ; preds = %bb4, %start
%num.09 = phi i64 [ 0, %start ], [ %2, %bb4 ]
%i.08 = phi i64 [ 0, %start ], [ %3, %bb4 ]
%rng.val.val = load i64*, i64** %0, align 8
; call <playground::NoInlineRng<R> as rand_core::RngCore>::next_u64
%1 = tail call fastcc i64 @"_ZN71_$LT$playground..NoInlineRng$LT$R$GT$$u20$as$u20$rand_core..RngCore$GT$8next_u6417h0b95e10cc642939aE"(i64* %rng.val.val)
%2 = add i64 %1, %num.09
%3 = add nuw nsw i64 %i.08, 1
%exitcond = icmp eq i64 %3, 10000
br i1 %exitcond, label %bb3, label %bb4
}
; playground::add_range
; Function Attrs: noinline uwtable
define internal fastcc i64 @_ZN10playground9add_range17h27ceded9d02ff747E(i64** dereferenceable(8)) unnamed_addr #4 personality i32 (i32, i32, i64, %"unwind::libunwind::_Unwind_Exception"*, %"unwind::libunwind::_Unwind_Context"*)* @rust_eh_personality {
start:
br label %bb8
bb6: ; preds = %bb8
ret i64 %3
bb8: ; preds = %bb8, %start
%num.021 = phi i64 [ 0, %start ], [ %3, %bb8 ]
%iter.sroa.0.020 = phi i64 [ 0, %start ], [ %1, %bb8 ]
%1 = add nuw nsw i64 %iter.sroa.0.020, 1
%rng.val.val = load i64*, i64** %0, align 8
; call <playground::NoInlineRng<R> as rand_core::RngCore>::next_u64
%2 = tail call fastcc i64 @"_ZN71_$LT$playground..NoInlineRng$LT$R$GT$$u20$as$u20$rand_core..RngCore$GT$8next_u6417h0b95e10cc642939aE"(i64* %rng.val.val)
%3 = add i64 %2, %num.021
%exitcond = icmp eq i64 %1, 10000
br i1 %exitcond, label %bb6, label %bb8
}
; playground::add_sum
; Function Attrs: noinline uwtable
define internal fastcc i64 @_ZN10playground7add_sum17h0910bf39c2bf0430E(i64** dereferenceable(8)) unnamed_addr #4 personality i32 (i32, i32, i64, %"unwind::libunwind::_Unwind_Exception"*, %"unwind::libunwind::_Unwind_Context"*)* @rust_eh_personality {
bb2.i.i.i.i:
br label %bb2.i.i.i.i.i
bb2.i.i.i.i.i: ; preds = %bb2.i.i.i.i.i, %bb2.i.i.i.i
%1 = phi i64 [ 10000, %bb2.i.i.i.i ], [ %3, %bb2.i.i.i.i.i ]
%accum.0.i.i.i.i.i = phi i64 [ 0, %bb2.i.i.i.i ], [ %4, %bb2.i.i.i.i.i ]
%.val.val.i.i.i.i.i.i = load i64*, i64** %0, align 8, !noalias !33
; call <playground::NoInlineRng<R> as rand_core::RngCore>::next_u64
%2 = tail call fastcc i64 @"_ZN71_$LT$playground..NoInlineRng$LT$R$GT$$u20$as$u20$rand_core..RngCore$GT$8next_u6417h0b95e10cc642939aE"(i64* %.val.val.i.i.i.i.i.i), !noalias !33
%3 = add nsw i64 %1, -1
%4 = add i64 %2, %accum.0.i.i.i.i.i
%5 = icmp eq i64 %3, 0
br i1 %5, label %_ZN4core4iter8iterator8Iterator3sum17hcbc4a00f32ac1feeE.exit, label %bb2.i.i.i.i.i
_ZN4core4iter8iterator8Iterator3sum17hcbc4a00f32ac1feeE.exit: ; preds = %bb2.i.i.i.i.i
ret i64 %4
}
您可以看到add_manual
和add_range
基本相同,但add
的位置除外。 add_sum
也类似,但它从10000倒数而不是计数。 add_fold
没有定义,因为编译器已确定它与add_sum
完全相同,并将它们组合在一起。
在这种情况下,优化器确实可以使它们基本相同。让我们使用内置的基准测试:
#[bench]
fn bench_add_manual(b: &mut Bencher) {
b.iter(|| {
let rng = rand::thread_rng();
add_manual(rng)
});
}
#[bench]
fn bench_add_range(b: &mut Bencher) {
b.iter(|| {
let rng = rand::thread_rng();
add_range(rng)
});
}
#[bench]
fn bench_add_sum(b: &mut Bencher) {
b.iter(|| {
let rng = rand::thread_rng();
add_sum(rng)
});
}
结果是:
test bench_add_manual ... bench: 28,058 ns/iter (+/- 3,552)
test bench_add_range ... bench: 28,349 ns/iter (+/- 6,663)
test bench_add_sum ... bench: 29,807 ns/iter (+/- 2,016)
这对我来说几乎是一样的。我会说,在这种情况下,在这个时间点,表明性能没有显着差异。但是,这并不适用于功能样式中的每一个可能的代码。
答案 1 :(得分:3)
通常,fold(reduce)可以编译为等效的手工编译代码,从而节省程序员的时间。值得注意的是,折叠中的递归处于尾部位置,因此它只是一种编写循环的简单方法。
对于以函数风格编写的所有程序,情况并非如此。