我想用sdl2-rs
crate启动计时器来执行绘制调用。我想通过做这样的事情来启动它:
extern crate sdl2;
use std::sync::mpsc;
enum Event {
Draw,
}
fn main() {
let sdl_context = sdl2::init().unwrap();
let video_subsystem = sdl_context.video().unwrap();
video_subsystem.gl_attr().set_context_version(4, 5);
println!(
"Current gl version: {:?}",
video_subsystem.gl_attr().context_version()
);
let timer_subsystem = sdl_context.timer().unwrap();
let window = video_subsystem
.window("rust-sdl2 demo: Video", 800, 600)
.position_centered()
.opengl()
.build()
.unwrap();
let context = window.gl_create_context().unwrap();
let (tx, rx) = mpsc::channel();
{
let timer_tx = tx.clone();
timer_subsystem.add_timer(
1000u32 / 120u32,
Box::new(move || {
timer_tx.send(Event::Draw);
1000u32 / 120u32
}),
);
}
}
然而,我收到此错误:
error[E0277]: the trait bound `std::sync::mpsc::Sender<Event>: std::marker::Sync` is not satisfied in `[closure@src/main.rs:33:22: 36:14 timer_tx:std::sync::mpsc::Sender<Event>]`
--> src/main.rs:33:13
|
33 | / Box::new(move || {
34 | | timer_tx.send(Event::Draw);
35 | | 1000u32 / 120u32
36 | | }),
| |______________^ `std::sync::mpsc::Sender<Event>` cannot be shared between threads safely
|
= help: within `[closure@src/main.rs:33:22: 36:14 timer_tx:std::sync::mpsc::Sender<Event>]`, the trait `std::marker::Sync` is not implemented for `std::sync::mpsc::Sender<Event>`
= note: required because it appears within the type `[closure@src/main.rs:33:22: 36:14 timer_tx:std::sync::mpsc::Sender<Event>]`
= note: required for the cast to the object type `std::ops::FnMut() -> u32 + std::marker::Sync`
我理解发件人不是Sync
所以我克隆它并将克隆的对象移动到FnMut
闭包中但是它无论如何都不起作用。我怎样才能做到这一点?根据我的理解,通过将对象移动到闭包中,我们不共享它,因此它必须以这种方式工作。此外,文档中的示例也是如此。
答案 0 :(得分:0)
克隆的发件人与原始发件人的类型相同,因此它仍然不是add_timer
。 Sync
函数需要一个Mutex
的闭包,因此您需要将发件人包装在let timer_tx = Mutex::new(tx.clone());
timer_subsystem.add_timer(
1000u32 / 120u32,
Box::new(move || {
timer_tx.lock().unwrap().send(Event::Draw);
1000u32 / 120u32
}),
);
中,这样可以让发件人在线程之间共享。
input: "data"
input_shape {
dim: 1
dim: 1
dim: 28
dim: 28
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 2.0
decay_mult: 0.0
}
convolution_param {
num_output: 5
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 2.0
decay_mult: 0.0
}
convolution_param {
num_output: 5
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "fc3"
type: "InnerProduct"
bottom: "pool2"
top: "fc3"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 2.0
decay_mult: 0.0
}
inner_product_param {
num_output: 20
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "fc3"
top: "fc3"
}
layer {
name: "drop3"
type: "Dropout"
bottom: "fc3"
top: "fc3"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc4"
type: "InnerProduct"
bottom: "fc3"
top: "fc4"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 2.0
decay_mult: 0.0
}
inner_product_param {
num_output: 10
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "softmax"
type: "Softmax"
bottom: "fc4"
top: "softmax"
}