Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 | #ifndef _LINUX_BLKDEV_H #define _LINUX_BLKDEV_H #include <linux/major.h> #include <linux/sched.h> #include <linux/genhd.h> #include <linux/tqueue.h> #include <linux/list.h> struct request_queue; typedef struct request_queue request_queue_t; struct elevator_s; typedef struct elevator_s elevator_t; /* * Ok, this is an expanded form so that we can use the same * request for paging requests when that is implemented. In * paging, 'bh' is NULL, and the semaphore is used to wait * for read/write completion. */ struct request { struct list_head queue; int elevator_sequence; struct list_head table; volatile int rq_status; /* should split this into a few status bits */ #define RQ_INACTIVE (-1) #define RQ_ACTIVE 1 #define RQ_SCSI_BUSY 0xffff #define RQ_SCSI_DONE 0xfffe #define RQ_SCSI_DISCONNECTING 0xffe0 kdev_t rq_dev; int cmd; /* READ or WRITE */ int errors; unsigned long sector; unsigned long nr_sectors; unsigned long hard_sector, hard_nr_sectors; unsigned int nr_segments; unsigned int nr_hw_segments; unsigned long current_nr_sectors; void * special; char * buffer; struct semaphore * sem; struct buffer_head * bh; struct buffer_head * bhtail; request_queue_t *q; }; #include <linux/elevator.h> typedef int (merge_request_fn) (request_queue_t *q, struct request *req, struct buffer_head *bh, int); typedef int (merge_requests_fn) (request_queue_t *q, struct request *req, struct request *req2, int); typedef void (request_fn_proc) (request_queue_t *q); typedef request_queue_t * (queue_proc) (kdev_t dev); typedef int (make_request_fn) (request_queue_t *q, int rw, struct buffer_head *bh); typedef void (plug_device_fn) (request_queue_t *q, kdev_t device); typedef void (unplug_device_fn) (void *q); /* * Default nr free requests per queue, ll_rw_blk will scale it down * according to available RAM at init time */ #define QUEUE_NR_REQUESTS 8192 struct request_queue { /* * the queue request freelist, one for reads and one for writes */ struct list_head request_freelist[2]; struct list_head pending_freelist[2]; int pending_free[2]; /* * Together with queue_head for cacheline sharing */ struct list_head queue_head; elevator_t elevator; request_fn_proc * request_fn; merge_request_fn * back_merge_fn; merge_request_fn * front_merge_fn; merge_requests_fn * merge_requests_fn; make_request_fn * make_request_fn; plug_device_fn * plug_device_fn; /* * The queue owner gets to use this for whatever they like. * ll_rw_blk doesn't touch it. */ void * queuedata; /* * This is used to remove the plug when tq_disk runs. */ struct tq_struct plug_tq; /* * Boolean that indicates whether this queue is plugged or not. */ char plugged; /* * Boolean that indicates whether current_request is active or * not. */ char head_active; /* * Is meant to protect the queue in the future instead of * io_request_lock */ spinlock_t queue_lock; /* * Tasks wait here for free request */ wait_queue_head_t wait_for_request; }; struct blk_dev_struct { /* * queue_proc has to be atomic */ request_queue_t request_queue; queue_proc *queue; void *data; }; struct sec_size { unsigned block_size; unsigned block_size_bits; }; /* * Used to indicate the default queue for drivers that don't bother * to implement multiple queues. We have this access macro here * so as to eliminate the need for each and every block device * driver to know about the internal structure of blk_dev[]. */ #define BLK_DEFAULT_QUEUE(_MAJOR) &blk_dev[_MAJOR].request_queue extern struct sec_size * blk_sec[MAX_BLKDEV]; extern struct blk_dev_struct blk_dev[MAX_BLKDEV]; extern void grok_partitions(struct gendisk *dev, int drive, unsigned minors, long size); extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size); extern void generic_make_request(int rw, struct buffer_head * bh); extern request_queue_t *blk_get_queue(kdev_t dev); extern inline request_queue_t *__blk_get_queue(kdev_t dev); extern void blkdev_release_request(struct request *); /* * Access functions for manipulating queue properties */ extern void blk_init_queue(request_queue_t *, request_fn_proc *); extern void blk_cleanup_queue(request_queue_t *); extern void blk_queue_headactive(request_queue_t *, int); extern void blk_queue_pluggable(request_queue_t *, plug_device_fn *); extern void blk_queue_make_request(request_queue_t *, make_request_fn *); extern void generic_unplug_device(void *); extern int * blk_size[MAX_BLKDEV]; extern int * blksize_size[MAX_BLKDEV]; extern int * hardsect_size[MAX_BLKDEV]; extern int * max_readahead[MAX_BLKDEV]; extern int * max_sectors[MAX_BLKDEV]; extern int * max_segments[MAX_BLKDEV]; extern atomic_t queued_sectors; #define MAX_SEGMENTS 128 #define MAX_SECTORS (MAX_SEGMENTS*8) #define PageAlignSize(size) (((size) + PAGE_SIZE -1) & PAGE_MASK) /* read-ahead in pages.. */ #define MAX_READAHEAD 31 #define MIN_READAHEAD 3 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queue) #define blkdev_entry_next_request(entry) blkdev_entry_to_request((entry)->next) #define blkdev_entry_prev_request(entry) blkdev_entry_to_request((entry)->prev) #define blkdev_next_request(req) blkdev_entry_to_request((req)->queue.next) #define blkdev_prev_request(req) blkdev_entry_to_request((req)->queue.prev) extern void drive_stat_acct (kdev_t dev, int rw, unsigned long nr_sectors, int new_io); static inline int get_hardsect_size(kdev_t dev) { extern int *hardsect_size[]; if (hardsect_size[MAJOR(dev)] != NULL) return hardsect_size[MAJOR(dev)][MINOR(dev)]; else return 512; } #define blk_finished_io(nsects) \ atomic_sub(nsects, &queued_sectors); \ if (atomic_read(&queued_sectors) < 0) { \ printk("block: queued_sectors < 0\n"); \ atomic_set(&queued_sectors, 0); \ } #define blk_started_io(nsects) \ atomic_add(nsects, &queued_sectors); #endif |