保护内核和用户空间之间的共享内存段

时间:2016-04-13 09:02:15

标签: c linux-kernel

我使用mmap在内核中创建了共享内存段。我需要从内核和用户空间访问这个映射的内存。我应该使用什么机制来保护内存不受并发访问的影响? 我希望有类似的东西:

内核模块:

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/mm.h> 

#ifndef VM_RESERVED
# define  VM_RESERVED   (VM_DONTEXPAND | VM_DONTDUMP)
#endif

struct dentry  *file;

struct mmap_info
{
    char *data;            
    int reference;      
};

void mmap_open(struct vm_area_struct *vma)
{
    struct mmap_info *info = (struct mmap_info *)vma->vm_private_data;
    info->reference++;
}

void mmap_close(struct vm_area_struct *vma)
{
    struct mmap_info *info = (struct mmap_info *)vma->vm_private_data;
    info->reference--;
}

static int mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
    struct page *page;
    struct mmap_info *info;    

    info = (struct mmap_info *)vma->vm_private_data;
    if (!info->data)
    {
        printk("No data\n");
        return 0;    
    }
    page = virt_to_page(info->data);    
    get_page(page);
    vmf->page = page;            
    return 0;
}

struct vm_operations_struct mmap_vm_ops =
{
    .open =     mmap_open,
    .close =    mmap_close,
    .fault =    mmap_fault,    
};

int op_mmap(struct file *filp, struct vm_area_struct *vma)
{
    vma->vm_ops = &mmap_vm_ops;
    vma->vm_flags |= VM_RESERVED;    
    vma->vm_private_data = filp->private_data;
    mmap_open(vma);
    return 0;
}

int mmapfop_close(struct inode *inode, struct file *filp)
{
    struct mmap_info *info = filp->private_data;
    free_page((unsigned long)info->data);
    kfree(info);
    filp->private_data = NULL;
    return 0;
}

int mmapfop_open(struct inode *inode, struct file *filp)
{
    struct mmap_info *info = kmalloc(sizeof(struct mmap_info), GFP_KERNEL);    
    info->data = (char *)get_zeroed_page(GFP_KERNEL);
    memcpy(info->data, "hello from kernel this is file: ", 32);
    memcpy(info->data + 32, filp->f_dentry->d_name.name, strlen(filp->f_dentry->d_name.name));
    /* assign this info struct to the file */
    filp->private_data = info;
    return 0;
}

static const struct file_operations mmap_fops = {
    .open = mmapfop_open,
    .release = mmapfop_close,
    .mmap = op_mmap,
};

 static int __init mmapexample_module_init(void)
{
    file = debugfs_create_file("mmap_example", 0644, NULL, NULL, &mmap_fops);
    return 0;
}

static void __exit mmapexample_module_exit(void)
{
    debugfs_remove(file);
}

module_init(mmapexample_module_init);
module_exit(mmapexample_module_exit);
MODULE_LICENSE("GPL");

用户空间:

#include <stdio.h>
#include <string.h>
#include <fcntl.h>
#include <sys/mman.h>

#define PAGE_SIZE     4096

int main ( int argc, char **argv )
{
    int configfd;
    char * address = NULL;

    configfd = open("/sys/kernel/debug/mmap_example", O_RDWR);
    if(configfd < 0)
    {
        perror("Open call failed");
        return -1;
    }

    address = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, configfd, 0);
    if (address == MAP_FAILED)
    {
        perror("mmap operation failed");
        return -1;
    }
    printf("Initial message: %s\n", address);
    memcpy(address + 11 , "*user*", 6);
    printf("Changed message: %s\n", address);
    close(configfd);    
    return 0;
}

但有锁。

2 个答案:

答案 0 :(得分:1)

内核空间和用户空间没有用于并发访问保护的共享机制。如果你想要它们,你需要自己实现它们。

它可以是某种互斥体,在您的内核模块中实现,并通过特殊的ioctl请求从用户空间访问:

内核:

DECLARE_WAIT_QUEUE_HEAD(wq);
int my_mutex_val = 0;

/* 
 * Lock mutex.
 *
 * May be used directly by the kernel or via 'ioctl(MY_CMD_LOCK)' by user.
 */
void my_mutex_lock(void)
{
    spin_lock(&wq.lock);
    wait_event_interruptible_locked(&wq, my_mutex_val == 0);
    my_mutex_val = 1;
    spin_unlock(&wq.lock);
}

/* 
 * Unlock mutex.
 *
 * May be used directly by the kernel or via 'ioctl(MY_CMD_UNLOCK)' by user.
 */
void my_mutex_unlock(void)
{
    spin_lock(&wq.lock);
    my_mutex_val = 0;
    wake_up(&wq);
    spin_unlock(&wq.lock);
}

long unlocked_ioctl (struct file * filp, unsigned int cmd, unsigned long val)
{
    switch(cmd) {
    case MY_CMD_LOCK:
        my_mutex_lock();
    break;
    case MY_CMD_UNLOCK:
        my_mutex_unlock();
    break;
    }
}

用户:

int main()
{
    ...
    ioctl(MY_CMD_LOCK);
    <read data>
    ioctl(MY_CMD_UNLOCK);
    ...
}

它可以是某种自旋锁,该值存储在mmap-ed区域中(对于内核空间和用户空间都是可见的)。

在任何情况下,当用户空间应用程序不遵循锁定约定时,应为该案例准备内核模块。这可能会取消对内核生成的mmap-ed区域内容的任何期望,但在这种情况下内核模块不应该崩溃。 [这就是上面代码中没有使用标准内核的struct mutex的原因:用户空间可能会错误地使用它。)

答案 1 :(得分:0)

ioctl的问题是每次要访问共享信息 - >数据时都需要内核切换。如果可以,那么ioctl是好的 - 但为什么不只是做一个标准的字符读/写文件操作呢?

您还可以尝试无锁机制。在共享信息 - >数据区域中添加屏障变量。当用户需要访问时,它将对barrier变量执行atomic_compare_and_xchg,直到将其设置为0(未使用),然后将其设置为1.当内核需要访问时,它将执行相同操作但将其设置为2.请参阅gcc原子内置文档。