|
设备驱动程序是操作系统内核和机器硬件之间的接口,它为应用程序屏蔽硬件的细节,一般来说,Linux的设备驱动程序需要完成如下功能:
·设备初始化、释放;
·提供各类设备服务;
·负责内核和设备之间的数据交换;
·检测和处理设备工作过程中出现的错误。
Linux下的设备驱动程序被组织为一组完成不同任务的函数的集合,通过这些函数使得Windows的设备操作犹如文件一般。在应用程序看来,硬件设备只是一个设备文件,应用程序可以象操作普通文件一样对硬件设备进行操作,如open ()、close ()、read ()、write () 等。
Linux主要将设备分为二类:字符设备和块设备。字符设备是指设备发送和接收数据以字符的形式进行;而块设备则以整个数据缓冲区的形式进行。在对字符设备发出读/写请求时,实际的硬件I/O一般就紧接着发生了;而块设备则不然,它利用一块系统内存作缓冲区,当用户进程对设备请求能满足用户的要求,就返回请求的数据,如果不能,就调用请求函数来进行实际的I/O操作。块设备主要针对磁盘等慢速设备。
1.内存分配
由于Linux驱动程序在内核中运行,因此在设备驱动程序需要申请/释放内存时,不能使用用户级的malloc/free函数,而需由内核级的函数kmalloc/kfree () 来实现,kmalloc()函数的原型为:
[table]
[tr]
[td]void kmalloc (size_t size ,int priority);[/td][/tr]
4.块设备驱动
块设备驱动程序的编写是一个浩繁的工程,其难度远超过字符设备,上千行的代码往往只能搞定一个简单的块设备,而数十行代码就可能搞定一个字符设备。因此,非得有相当的基本功才能完成此项工作。下面先给出一个实例,即mtdblock块设备的驱动。我们通过分析此实例中的代码来说明块设备驱动程序的写法(由于篇幅的关系,大量的代码被省略,只保留了必要的主干):
[table]
[tr]
[td]#include
#include
static void mtd_notify_add(struct mtd_info* mtd);
static void mtd_notify_remove(struct mtd_info* mtd);
static struct mtd_notifier notifier = {
mtd_notify_add,
mtd_notify_remove,
NULL
};
static devfs_handle_t devfs_dir_handle = NULL;
static devfs_handle_t devfs_rw_handle[MAX_Mtd_DEVICES];
static struct mtdblk_dev {
struct mtd_info *mtd; /* Locked */
int count;
struct semaphore cache_sem;
unsigned char *cache_data;
unsigned long cache_offset;
unsigned int cache_size;
enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
} *mtdblks[MAX_Mtd_DEVICES];
static spinlock_t mtdblks_lock;
/* this lock is used just in kernels >= 2.5.x */
static spinlock_t mtdblock_lock;
static int mtd_sizes[MAX_Mtd_DEVICES];
static int mtd_blksizes[MAX_Mtd_DEVICES];
static void erase_callback(struct erase_info *done)
{
wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
wake_up(wait_q);
}
static int erase_write (struct mtd_info *mtd, unsigned long pos,
int len, const char *buf)
{
struct erase_info erase;
DECLARE_WAITQUEUE(wait, current);
wait_queue_head_t wait_q;
size_t retlen;
int ret;
/*
* First, let's erase the flash block.
*/
init_waitqueue_head(&wait_q);
erase.mtd = mtd;
erase.callback = erase_callback;
erase.addr = pos;
erase.len = len;
erase.priv = (u_long)&wait_q;
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&wait_q, &wait);
ret = Mtd_ERASE(mtd, &erase);
if (ret) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&wait_q, &wait);
printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] " "on /"%s/" failed/n",
pos, len, mtd->name);
return ret;
}
schedule(); /* Wait for erase to finish. */
remove_wait_queue(&wait_q, &wait);
/*
* Next, writhe data to flash.
*/
ret = Mtd_WRITE (mtd, pos, len, &retlen, buf);
if (ret)
return ret;
if (retlen != len)
return -EIO;
return 0;
}
static int write_cached_data (struct mtdblk_dev *mtdblk)
{
struct mtd_info *mtd = mtdblk->mtd;
int ret;
if (mtdblk->cache_state != STATE_DIRTY)
return 0;
DEBUG(Mtd_DEBUG_LEVEL2, "mtdblock: writing cached data for /"%s/" "
"at 0x%lx, size 0x%x/n", mtd->name,
mtdblk->cache_offset, mtdblk->cache_size);
ret = erase_write (mtd, mtdblk->cache_offset,
mtdblk->cache_size, mtdblk->cache_data);
if (ret)
return ret;
mtdblk->cache_state = STATE_EMPTY;
return 0;
}
static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, const char *buf)
{
…
}
static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, char *buf)
{
…
}
static int mtdblock_open(struct inode *inode, struct file *file)
{
…
}
static release_t mtdblock_release(struct inode *inode, struct file *file)
{
int dev;
struct mtdblk_dev *mtdblk;
DEBUG(Mtd_DEBUG_LEVEL1, "mtdblock_release/n");
if (inode == NULL)
release_return(-ENODEV);
dev = minor(inode->i_rdev);
mtdblk = mtdblks[dev];
down(&mtdblk->cache_sem);
write_cached_data(mtdblk);
up(&mtdblk->cache_sem);
spin_lock(&mtdblks_lock);
if (!--mtdblk->count) {
/* It was the last usage. Free the device */
mtdblks[dev] = NULL;
spin_unlock(&mtdblks_lock);
if (mtdblk->mtd->sync)
mtdblk->mtd->sync(mtdblk->mtd);
put_mtd_device(mtdblk->mtd);
vfree(mtdblk->cache_data);
kfree(mtdblk);
} else {
spin_unlock(&mtdblks_lock);
}
DEBUG(Mtd_DEBUG_LEVEL1, "ok/n");
BLK_DEC_USE_COUNT;
release_return(0);
}
/*
* This is a special request_fn because it is executed in a process context
* to be able to sleep independently of the caller. The
* io_request_lock (for =2.5) is held upon entry
* and exit. The head of our request queue is considered active so there is
* no need to dequeue requests before we are done.
*/
static void handle_mtdblock_request(void)
{
struct request *req;
struct mtdblk_dev *mtdblk;
unsigned int res;
for (;;) {
INIT_REQUEST;
req = CURRENT;
spin_unlock_irq(QUEUE_LOCK(QUEUE));
mtdblk = mtdblks[minor(req->rq_dev)];
res = 0;
if (minor(req->rq_dev) >= MAX_Mtd_DEVICES)
panic("%s : minor out of bound", __FUNCTION__);
if (!IS_REQ_CMD(req))
goto end_req;
if ((req->sector + req->current_nr_sectors) > (mtdblk->mtd->size >> 9))
goto end_req;
// Handle the request
switch (rq_data_dir(req))
{
int err;
case READ:
down(&mtdblk->cache_sem);
err = do_cached_read (mtdblk, req->sector current_nr_sectors buffer);
up(&mtdblk->cache_sem);
if (!err)
res = 1;
break;
case WRITE:
// Read only device
if ( !(mtdblk->mtd->flags & Mtd_WRITEABLE) )
break;
// Do the write
down(&mtdblk->cache_sem);
err = do_cached_write (mtdblk, req->sector current_nr_sectors buffer);
up(&mtdblk->cache_sem);
if (!err)
res = 1;
break;
}
end_req:
spin_lock_irq(QUEUE_LOCK(QUEUE));
end_request(res);
}
}
static volatile int leaving = 0;
static DECLARE_MUTEX_LOCKED(thread_sem);
static DECLARE_WAIT_QUEUE_HEAD(thr_wq);
int mtdblock_thread(void *dummy)
{
…
}
#define RQFUNC_ARG request_queue_t *q
static void mtdblock_request(RQFUNC_ARG)
{
/* Don't do anything, except wake the thread if necessary */
wake_up(&thr_wq);
}
static int mtdblock_ioctl(struct inode * inode, struct file * file,
unsigned int cmd, unsigned long arg)
{
struct mtdblk_dev *mtdblk;
mtdblk = mtdblks[minor(inode->i_rdev)];
switch (cmd) {
case BLKGETSIZE: /* Return device size */
return put_user((mtdblk->mtd->size >> 9), (unsigned long *) arg);
case BLKFLSBUF:
if(!capable(CAP_SYS_ADMIN))
return -EACCES;
fsync_dev(inode->i_rdev);
invalidate_buffers(inode->i_rdev);
down(&mtdblk->cache_sem);
write_cached_data(mtdblk);
up(&mtdblk->cache_sem);
if (mtdblk->mtd->sync)
mtdblk->mtd->sync(mtdblk->mtd);
return 0;
default:
return -EINVAL;
}
}
static struct block_device_operations mtd_fops =
{
owner: THIS_MODULE,
open: mtdblock_open,
release: mtdblock_release,
ioctl: mtdblock_ioctl
};
static void mtd_notify_add(struct mtd_info* mtd)
{
…
}
static void mtd_notify_remove(struct mtd_info* mtd)
{
if (!mtd || mtd->type == Mtd_ABSENT)
return;
devfs_unregister(devfs_rw_handle[mtd->index]);
}
int __init init_mtdblock(void)
{
int i;
spin_lock_init(&mtdblks_lock);
/* this lock is used just in kernels >= 2.5.x */
spin_lock_init(&mtdblock_lock);
#ifdef CONFIG_DEVFS_FS
if (devfs_register_blkdev(Mtd_BLOCK_MAJOR, DEVICE_NAME, &mtd_fops))
{
printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices./n",
Mtd_BLOCK_MAJOR);
return -EAGAIN;
}
devfs_dir_handle = devfs_mk_dir(NULL, DEVICE_NAME, NULL);
register_mtd_user(¬ifier);
#else
if (register_blkdev(MAJOR_NR,DEVICE_NAME,&mtd_fops)) {
printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices./n",
Mtd_BLOCK_MAJOR);
return -EAGAIN;
}
#endif
/* We fill it in at open() time. */
for (i=0; i |
|