Dr Andrew Scott G7VAV

My photo
 
June 2025
Mo Tu We Th Fr Sa Su
26 27 28 29 30 31 1
2 3 4 5 6 7 8
9 10 11 12 13 14 15
16 17 18 19 20 21 22
23 24 25 26 27 28 29
30 1 2 3 4 5 6


blk_types.h
001: /*
002:  * Block data types and constants.  Directly include this file only to
003:  * break include dependency loop.
004:  */
005: #ifndef __LINUX_BLK_TYPES_H
006: #define __LINUX_BLK_TYPES_H
007: 
008: #ifdef CONFIG_BLOCK
009: 
010: #include <linux/types.h>
011: 
012: struct bio_set;
013: struct bio;
014: struct bio_integrity_payload;
015: struct page;
016: struct block_device;
017: typedef void (bio_end_io_t) (struct bio *, int);
018: typedef void (bio_destructor_t) (struct bio *);
019: 
020: /*
021:  * was unsigned short, but we might as well be ready for > 64kB I/O pages
022:  */
023: struct bio_vec {
024:         struct page     *bv_page;
025:         unsigned int    bv_len;
026:         unsigned int    bv_offset;
027: };
028: 
029: /*
030:  * main unit of I/O for the block layer and lower layers (ie drivers and
031:  * stacking drivers)
032:  */
033: struct bio {
034:         sector_t                bi_sector;      /* device address in 512 byte
035:                                                    sectors */
036:         struct bio              *bi_next;       /* request queue link */
037:         struct block_device     *bi_bdev;
038:         unsigned long           bi_flags;       /* status, command, etc */
039:         unsigned long           bi_rw;          /* bottom bits READ/WRITE,
040:                                                  * top bits priority
041:                                                  */
042: 
043:         unsigned short          bi_vcnt;        /* how many bio_vec's */
044:         unsigned short          bi_idx;         /* current index into bvl_vec */
045: 
046:         /* Number of segments in this BIO after
047:          * physical address coalescing is performed.
048:          */
049:         unsigned int            bi_phys_segments;
050: 
051:         unsigned int            bi_size;        /* residual I/O count */
052: 
053:         /*
054:          * To keep track of the max segment size, we account for the
055:          * sizes of the first and last mergeable segments in this bio.
056:          */
057:         unsigned int            bi_seg_front_size;
058:         unsigned int            bi_seg_back_size;
059: 
060:         unsigned int            bi_max_vecs;    /* max bvl_vecs we can hold */
061: 
062:         atomic_t                bi_cnt;         /* pin count */
063: 
064:         struct bio_vec          *bi_io_vec;     /* the actual vec list */
065: 
066:         bio_end_io_t            *bi_end_io;
067: 
068:         void                    *bi_private;
069: #if defined(CONFIG_BLK_DEV_INTEGRITY)
070:         struct bio_integrity_payload *bi_integrity;  /* data integrity */
071: #endif
072: 
073:         bio_destructor_t        *bi_destructor; /* destructor */
074: 
075:         /*
076:          * We can __inline__ a number of vecs at the end of the bio, to avoid
077:          * double allocations for a small number of bio_vecs. This member
078:          * MUST obviously be kept at the very end of the bio.
079:          */
080:         struct bio_vec          bi_inline_vecs[0];
081: };
082: 
083: /*
084:  * bio flags
085:  */
086: #define BIO_UPTODATE    0       /* ok after I/O completion */
087: #define BIO_RW_BLOCK    1       /* RW_AHEAD set, and read/write would block */
088: #define BIO_EOF         2       /* out-out-bounds error */
089: #define BIO_SEG_VALID   3       /* bi_phys_segments valid */
090: #define BIO_CLONED      4       /* doesn't own data */
091: #define BIO_BOUNCED     5       /* bio is a bounce bio */
092: #define BIO_USER_MAPPED 6       /* contains user pages */
093: #define BIO_EOPNOTSUPP  7       /* not supported */
094: #define BIO_NULL_MAPPED 8       /* contains invalid user pages */
095: #define BIO_FS_INTEGRITY 9      /* fs owns integrity data, not block layer */
096: #define BIO_QUIET       10      /* Make BIO Quiet */
097: #define BIO_MAPPED_INTEGRITY 11/* integrity metadata has been remapped */
098: #define bio_flagged(bio, flag)  ((bio)->bi_flags & (1 << (flag)))
099: 
100: /*
101:  * top 4 bits of bio flags indicate the pool this bio came from
102:  */
103: #define BIO_POOL_BITS           (4)
104: #define BIO_POOL_NONE           ((1UL << BIO_POOL_BITS) - 1)
105: #define BIO_POOL_OFFSET         (BITS_PER_LONG - BIO_POOL_BITS)
106: #define BIO_POOL_MASK           (1UL << BIO_POOL_OFFSET)
107: #define BIO_POOL_IDX(bio)       ((bio)->bi_flags >> BIO_POOL_OFFSET)
108: 
109: #endif /* CONFIG_BLOCK */
110: 
111: /*
112:  * Request flags.  For use in the cmd_flags field of struct request, and in
113:  * bi_rw of struct bio.  Note that some flags are only valid in either one.
114:  */
115: enum rq_flag_bits {
116:         /* common flags */
117:         __REQ_WRITE,            /* not set, read. set, write */
118:         __REQ_FAILFAST_DEV,     /* no driver retries of device errors */
119:         __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
120:         __REQ_FAILFAST_DRIVER,  /* no driver retries of driver errors */
121: 
122:         __REQ_SYNC,             /* request is sync (sync write or read) */
123:         __REQ_META,             /* metadata io request */
124:         __REQ_PRIO,             /* boost priority in cfq */
125:         __REQ_DISCARD,          /* request to discard sectors */
126:         __REQ_SECURE,           /* secure discard (used with __REQ_DISCARD) */
127: 
128:         __REQ_NOIDLE,           /* don't anticipate more IO after this one */
129:         __REQ_FUA,              /* forced unit access */
130:         __REQ_FLUSH,            /* request for cache flush */
131: 
132:         /* bio only flags */
133:         __REQ_RAHEAD,           /* read ahead, can fail anytime */
134:         __REQ_THROTTLED,        /* This bio has already been subjected to
135:                                  * throttling rules. Don't do it again. */
136: 
137:         /* request only flags */
138:         __REQ_SORTED,           /* elevator knows about this request */
139:         __REQ_SOFTBARRIER,      /* may not be passed by ioscheduler */
140:         __REQ_NOMERGE,          /* don't touch this for merging */
141:         __REQ_STARTED,          /* drive already may have started this one */
142:         __REQ_DONTPREP,         /* don't call prep for this one */
143:         __REQ_QUEUED,           /* uses queueing */
144:         __REQ_ELVPRIV,          /* elevator private data attached */
145:         __REQ_FAILED,           /* set if the request failed */
146:         __REQ_QUIET,            /* don't worry about errors */
147:         __REQ_PREEMPT,          /* set for "ide_preempt" requests */
148:         __REQ_ALLOCED,          /* request came from our alloc pool */
149:         __REQ_COPY_USER,        /* contains copies of user pages */
150:         __REQ_FLUSH_SEQ,        /* request for flush sequence */
151:         __REQ_IO_STAT,          /* account I/O stat */
152:         __REQ_MIXED_MERGE,      /* merge of different types, fail separately */
153:         __REQ_NR_BITS,          /* stops here */
154: };
155: 
156: #define REQ_WRITE               (1 << __REQ_WRITE)
157: #define REQ_FAILFAST_DEV        (1 << __REQ_FAILFAST_DEV)
158: #define REQ_FAILFAST_TRANSPORT  (1 << __REQ_FAILFAST_TRANSPORT)
159: #define REQ_FAILFAST_DRIVER     (1 << __REQ_FAILFAST_DRIVER)
160: #define REQ_SYNC                (1 << __REQ_SYNC)
161: #define REQ_META                (1 << __REQ_META)
162: #define REQ_PRIO                (1 << __REQ_PRIO)
163: #define REQ_DISCARD             (1 << __REQ_DISCARD)
164: #define REQ_NOIDLE              (1 << __REQ_NOIDLE)
165: 
166: #define REQ_FAILFAST_MASK \
167:         (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
168: #define REQ_COMMON_MASK \
169:         (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
170:          REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
171: #define REQ_CLONE_MASK          REQ_COMMON_MASK
172: 
173: #define REQ_RAHEAD              (1 << __REQ_RAHEAD)
174: #define REQ_THROTTLED           (1 << __REQ_THROTTLED)
175: 
176: #define REQ_SORTED              (1 << __REQ_SORTED)
177: #define REQ_SOFTBARRIER         (1 << __REQ_SOFTBARRIER)
178: #define REQ_FUA                 (1 << __REQ_FUA)
179: #define REQ_NOMERGE             (1 << __REQ_NOMERGE)
180: #define REQ_STARTED             (1 << __REQ_STARTED)
181: #define REQ_DONTPREP            (1 << __REQ_DONTPREP)
182: #define REQ_QUEUED              (1 << __REQ_QUEUED)
183: #define REQ_ELVPRIV             (1 << __REQ_ELVPRIV)
184: #define REQ_FAILED              (1 << __REQ_FAILED)
185: #define REQ_QUIET               (1 << __REQ_QUIET)
186: #define REQ_PREEMPT             (1 << __REQ_PREEMPT)
187: #define REQ_ALLOCED             (1 << __REQ_ALLOCED)
188: #define REQ_COPY_USER           (1 << __REQ_COPY_USER)
189: #define REQ_FLUSH               (1 << __REQ_FLUSH)
190: #define REQ_FLUSH_SEQ           (1 << __REQ_FLUSH_SEQ)
191: #define REQ_IO_STAT             (1 << __REQ_IO_STAT)
192: #define REQ_MIXED_MERGE         (1 << __REQ_MIXED_MERGE)
193: #define REQ_SECURE              (1 << __REQ_SECURE)
194: 
195: #endif /* __LINUX_BLK_TYPES_H */
196: 


for client (none)
© Andrew Scott 2006 - 2025,
All Rights Reserved
http://www.andrew-scott.uk/
Andrew Scott
http://www.andrew-scott.co.uk/