spin_lock(&mtdblks_lock);
if (!--mtdblk->count) {
/* It was the last usage. Free the device */
mtdblks[dev] = NULL;
spin_unlock(&mtdblks_lock);
if (mtdblk->mtd->sync)
mtdblk->mtd->sync(mtdblk->mtd);
put_mtd_device(mtdblk->mtd);
vfree(mtdblk->cache_data);
kfree(mtdblk);
} else {
spin_unlock(&mtdblks_lock);
}
DEBUG(Mtd_DEBUG_LEVEL1, "ok/n");
BLK_DEC_USE_COUNT;
release_return(0);
}
/*
* This is a special request_fn because it is executed in a process context
* to be able to sleep independently of the caller. The
* io_request_lock (for =2.5) is held upon entry
* and exit. The head of our request queue is considered active so there is
* no need to dequeue requests before we are done.
*/
static void handle_mtdblock_request(void)
{
struct request *req;
struct mtdblk_dev *mtdblk;
unsigned int res;
for (;;) {
INIT_REQUEST;
req = CURRENT;
spin_unlock_irq(QUEUE_LOCK(QUEUE));
mtdblk = mtdblks[minor(req->rq_dev)];
res = 0;
if (minor(req->rq_dev) >= MAX_Mtd_DEVICES)
panic("%s : minor out of bound", __FUNCTION__);
if (!IS_REQ_CMD(req))
goto end_req;
if ((req->sector + req->current_nr_sectors) > (mtdblk->mtd->size >> 9))
goto end_req;
// Handle the request
switch (rq_data_dir(req))
{
int err;
case READ:
down(&mtdblk->cache_sem);
err = do_cached_read (mtdblk, req->sector current_nr_sectors buffer);
up(&mtdblk->cache_sem);
if (!err)
res = 1;
break;
case WRITE:
// Read only device
if ( !(mtdblk->mtd->flags & Mtd_WRITEABLE) )
break;
// Do the write
down(&mtdblk->cache_sem);
err = do_cached_write (mtdblk, req->sector current_nr_sectors buffer);
up(&mtdblk->cache_sem);
if (!err)
res = 1;
break;
}