From 3f414a884c58d323433f3c811584ac561a16045d Mon Sep 17 00:00:00 2001 From: fengzx33 Date: Thu, 28 Mar 2024 11:14:36 +0800 Subject: [PATCH] iosdiag:add header files 4.19.0-91.82.65.uelc20.x86_64 --- .../4.19.0-91.82.65.uelc20.x86_64/nvme.h | 61 ++++ .../virtio_blk.h | 268 ++++++++++++++++++ 2 files changed, 329 insertions(+) create mode 100644 modules/iosdiag/include/4.19.0-91.82.65.uelc20.x86_64/nvme.h create mode 100644 modules/iosdiag/include/4.19.0-91.82.65.uelc20.x86_64/virtio_blk.h diff --git a/modules/iosdiag/include/4.19.0-91.82.65.uelc20.x86_64/nvme.h b/modules/iosdiag/include/4.19.0-91.82.65.uelc20.x86_64/nvme.h new file mode 100644 index 0000000..9f32860 --- /dev/null +++ b/modules/iosdiag/include/4.19.0-91.82.65.uelc20.x86_64/nvme.h @@ -0,0 +1,61 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u8 polled; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/modules/iosdiag/include/4.19.0-91.82.65.uelc20.x86_64/virtio_blk.h b/modules/iosdiag/include/4.19.0-91.82.65.uelc20.x86_64/virtio_blk.h new file mode 100644 index 0000000..853fa75 --- /dev/null +++ b/modules/iosdiag/include/4.19.0-91.82.65.uelc20.x86_64/virtio_blk.h @@ -0,0 +1,268 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, + enum hctx_type type, + unsigned int cpu) +{ + return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; +} + +/* + * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue + * @q: request queue + * @flags: request command flags + * @cpu: CPU + */ +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + unsigned int flags, + unsigned int cpu) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + if ((flags & REQ_HIPRI) && + q->tag_set->nr_maps > HCTX_TYPE_POLL && + q->tag_set->map[HCTX_TYPE_POLL].nr_queues && + test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + type = HCTX_TYPE_POLL; + + else if (((flags & REQ_OP_MASK) == REQ_OP_READ) && + q->tag_set->nr_maps > HCTX_TYPE_READ && + q->tag_set->map[HCTX_TYPE_READ].nr_queues) + type = HCTX_TYPE_READ; + + return blk_mq_map_queue_type(q, type, cpu); +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +//#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +//#endif +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif -- Gitee