在设备驱动程序中,我们如何分辨进程之间共享的数据以及进程本地的数据? Linux Device Drivers book提及
只要共享硬件或软件资源超出单个执行线程,并且有一个线程可能遇到该资源不一致视图的可能性,就必须显式管理对该资源的访问。
但是哪些软件资源可以在线程之间共享,哪些数据 不能共享?我知道全局变量通常被视为共享内存,但是还需要保护其他哪些种类的东西?
例如,在文件操作中传递的struct inode
和struct file
类型是诸如open
,release
,read
,write
等。被认为是共享的?
在main.c内部的open
调用中,如果dev
(在dev = container_of(inode->i_cdev, struct scull_dev, cdev);
行中)指向一个struct scull_dev
条目,为什么没有用锁保护它。全局数组scull_devices
?
在scull_write
中,为什么行int quantum = dev->quantum, qset = dev->qset;
由于正在访问全局变量而没有被信号量锁定?
/* In scull.h */
struct scull_qset {
void **data; /* pointer to an array of pointers which each point to a quantum buffer */
struct scull_qset *next;
};
struct scull_dev {
struct scull_qset *data; /* Pointer to first quantum set */
int quantum; /* the current quantum size */
int qset; /* the current array size */
unsigned long size; /* amount of data stored here */
unsigned int access_key; /* used by sculluid and scullpriv */
struct semaphore sem; /* mutual exclusion semaphore */
struct cdev cdev; /* Char device structure */
};
/* In main.c */
struct scull_dev *scull_devices; /* allocated in scull_init_module */
int scull_major = SCULL_MAJOR;
int scull_minor = 0;
int scull_nr_devs = SCULL_NR_DEVS;
int scull_quantum = SCULL_QUANTUM;
int scull_qset = SCULL_QSET;
ssize_t scull_write(struct file *filp, const char __user *buf, size_t count,
loff_t *f_pos)
{
struct scull_dev *dev = filp->private_data; /* flip->private_data assigned in scull_open */
struct scull_qset *dptr;
int quantum = dev->quantum, qset = dev->qset;
int itemsize = quantum * qset;
int item; /* item in linked list */
int s_pos; /* position in qset data array */
int q_pos; /* position in quantum */
int rest;
ssize_t retval = -ENOMEM; /* value used in "goto out" statements */
if (down_interruptible(&dev->sem))
return -ERESTARTSYS;
/* find listitem, qset index and offset in the quantum */
item = (long)*f_pos / itemsize;
rest = (long)*f_pos % itemsize;
s_pos = rest / quantum;
q_pos = rest % quantum;
/* follow the list up to the right position */
dptr = scull_follow(dev, item);
if (dptr == NULL)
goto out;
if (!dptr->data) {
dptr->data = kmalloc(qset * sizeof(char *), GFP_KERNEL);
if (!dptr->data)
goto out;
memset(dptr->data, 0, qset * sizeof(char *));
}
if (!dptr->data[s_pos]) {
dptr->data[s_pos] = kmalloc(quantum, GFP_KERNEL);
if (!dptr->data[s_pos])
goto out;
}
/* write only up to the end of this quantum */
if (count > quantum - q_pos)
count = quantum - q_pos;
if (copy_from_user(dptr->data[s_pos]+q_pos, buf, count)) {
retval = -EFAULT;
goto out;
}
*f_pos += count;
retval = count;
/* update the size */
if (dev->size < *f_pos)
dev->size = *f_pos;
out:
up(&dev->sem);
return retval;
}
int scull_open(struct inode *inode, struct file *filp)
{
struct scull_dev *dev; /* device information */
/* Question: Why was the lock not placed here? */
dev = container_of(inode->i_cdev, struct scull_dev, cdev);
filp->private_data = dev; /* for other methods */
/* now trim to 0 the length of the device if open was write-only */
if ( (filp->f_flags & O_ACCMODE) == O_WRONLY) {
if (down_interruptible(&dev->sem))
return -ERESTARTSYS;
scull_trim(dev); /* ignore errors */
up(&dev->sem);
}
return 0; /* success */
}
int scull_init_module(void)
{
int result, i;
dev_t dev = 0;
/* assigns major and minor numbers (left out for brevity) */
/*
* allocate the devices -- we can't have them static, as the number
* can be specified at load time
*/
scull_devices = kmalloc(scull_nr_devs * sizeof(struct scull_dev), GFP_KERNEL);
if (!scull_devices) {
result = -ENOMEM;
goto fail; /* isn't this redundant? */
}
memset(scull_devices, 0, scull_nr_devs * sizeof(struct scull_dev));
/* Initialize each device. */
for (i = 0; i < scull_nr_devs; i++) {
scull_devices[i].quantum = scull_quantum;
scull_devices[i].qset = scull_qset;
init_MUTEX(&scull_devices[i].sem);
scull_setup_cdev(&scull_devices[i], i);
}
/* some other stuff (left out for brevity) */
return 0; /* succeed */
fail:
scull_cleanup_module(); /* left out for brevity */
return result;
}
/*
* Set up the char_dev structure for this device.
*/
static void scull_setup_cdev(struct scull_dev *dev, int index)
{
int err, devno = MKDEV(scull_major, scull_minor + index);
cdev_init(&dev->cdev, &scull_fops);
dev->cdev.owner = THIS_MODULE;
dev->cdev.ops = &scull_fops; /* isn't this redundant? */
err = cdev_add (&dev->cdev, devno, 1);
/* Fail gracefully if need be */
if (err)
printk(KERN_NOTICE "Error %d adding scull%d", err, index);
}
答案 0 :(得分:2)
如果两个线程都能够访问内存中的所有数据,则可以将其视为“共享资源” *。它们不会在处理器之间共享的唯一资源是寄存器中的数据,这些数据在C中被抽象出来。
有两个原因使您实际上不考虑共享两个资源(即使它们实际上不是 意味着两个线程在理论上无法访问它们,有些噩梦般的代码有时可能会绕开它们)
这里显示的程序是不完整的,很难说,但是在没有锁定的情况下访问的每个变量必须满足该程序具有线程安全性的条件之一。
有一些不明显的方法可以满足条件,例如,变量是恒定的还是仅在特定上下文中限于一个线程。
您给出了两个未锁定的行的示例。对于第一行。
dev = container_of(inode->i_cdev, struct scull_dev, cdev);
此行实际上并不访问任何变量,它只是计算包含cdev
的结构在哪里。不可能存在争用条件,因为没有其他人可以访问您的指针(尽管他们可以访问它们指向的指针),但是它们只能在函数中访问(这与它们指向的指针不符)。这符合条件(1)。
另一个例子是
int quantum = dev->quantum, qset = dev->qset;
在没有上下文的情况下很难说这句话,但是我最好的猜测是假设dev->quantum
和dev->qset
在函数调用期间永远不会改变。这似乎受到以下事实的支持:它们仅在scull_init_module
中被调用,而在开始时仅应被调用一次。我相信这符合条件(2)。
如果您知道其他线程在由于其他原因(例如它们还不存在)完成操作之前不会尝试访问它,则会提出另一种方法,您可以在不锁定的情况下更改共享变量。
简而言之,所有内存都是共享的,但是有时您可以像不那样共享它。
*可以想象,可能有一个系统,其中每个处理器都有一定数量的RAM,只有它可以使用,但这不是典型的情况。