target_core_user.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
  2. #ifndef __TARGET_CORE_USER_H
  3. #define __TARGET_CORE_USER_H
  4. /* This header will be used by application too */
  5. #include <linux/types.h>
  6. #include <linux/uio.h>
  7. #define TCMU_VERSION "2.0"
  8. /**
  9. * DOC: Ring Design
  10. * Ring Design
  11. * -----------
  12. *
  13. * The mmaped area is divided into three parts:
  14. * 1) The mailbox (struct tcmu_mailbox, below);
  15. * 2) The command ring;
  16. * 3) Everything beyond the command ring (data).
  17. *
  18. * The mailbox tells userspace the offset of the command ring from the
  19. * start of the shared memory region, and how big the command ring is.
  20. *
  21. * The kernel passes SCSI commands to userspace by putting a struct
  22. * tcmu_cmd_entry in the ring, updating mailbox->cmd_head, and poking
  23. * userspace via UIO's interrupt mechanism.
  24. *
  25. * tcmu_cmd_entry contains a header. If the header type is PAD,
  26. * userspace should skip hdr->length bytes (mod cmdr_size) to find the
  27. * next cmd_entry.
  28. *
  29. * Otherwise, the entry will contain offsets into the mmaped area that
  30. * contain the cdb and data buffers -- the latter accessible via the
  31. * iov array. iov addresses are also offsets into the shared area.
  32. *
  33. * When userspace is completed handling the command, set
  34. * entry->rsp.scsi_status, fill in rsp.sense_buffer if appropriate,
  35. * and also set mailbox->cmd_tail equal to the old cmd_tail plus
  36. * hdr->length, mod cmdr_size. If cmd_tail doesn't equal cmd_head, it
  37. * should process the next packet the same way, and so on.
  38. */
  39. #define TCMU_MAILBOX_VERSION 2
  40. #define ALIGN_SIZE 64 /* Should be enough for most CPUs */
  41. #define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
  42. #define TCMU_MAILBOX_FLAG_CAP_READ_LEN (1 << 1) /* Read data length */
  43. #define TCMU_MAILBOX_FLAG_CAP_TMR (1 << 2) /* TMR notifications */
  44. #define TCMU_MAILBOX_FLAG_CAP_KEEP_BUF (1<<3) /* Keep buf after cmd completion */
  45. struct tcmu_mailbox {
  46. __u16 version;
  47. __u16 flags;
  48. __u32 cmdr_off;
  49. __u32 cmdr_size;
  50. __u32 cmd_head;
  51. /* Updated by user. On its own cacheline */
  52. __u32 cmd_tail __attribute__((__aligned__(ALIGN_SIZE)));
  53. } __attribute__((packed));
  54. enum tcmu_opcode {
  55. TCMU_OP_PAD = 0,
  56. TCMU_OP_CMD,
  57. TCMU_OP_TMR,
  58. };
  59. /*
  60. * Only a few opcodes, and length is 8-byte aligned, so use low bits for opcode.
  61. */
  62. struct tcmu_cmd_entry_hdr {
  63. __u32 len_op;
  64. __u16 cmd_id;
  65. __u8 kflags;
  66. #define TCMU_UFLAG_UNKNOWN_OP 0x1
  67. #define TCMU_UFLAG_READ_LEN 0x2
  68. #define TCMU_UFLAG_KEEP_BUF 0x4
  69. __u8 uflags;
  70. } __attribute__((packed));
  71. #define TCMU_OP_MASK 0x7
  72. static __inline__ enum tcmu_opcode tcmu_hdr_get_op(__u32 len_op)
  73. {
  74. return len_op & TCMU_OP_MASK;
  75. }
  76. static __inline__ void tcmu_hdr_set_op(__u32 *len_op, enum tcmu_opcode op)
  77. {
  78. *len_op &= ~TCMU_OP_MASK;
  79. *len_op |= (op & TCMU_OP_MASK);
  80. }
  81. static __inline__ __u32 tcmu_hdr_get_len(__u32 len_op)
  82. {
  83. return len_op & ~TCMU_OP_MASK;
  84. }
  85. static __inline__ void tcmu_hdr_set_len(__u32 *len_op, __u32 len)
  86. {
  87. *len_op &= TCMU_OP_MASK;
  88. *len_op |= len;
  89. }
  90. /* Currently the same as SCSI_SENSE_BUFFERSIZE */
  91. #define TCMU_SENSE_BUFFERSIZE 96
  92. struct tcmu_cmd_entry {
  93. struct tcmu_cmd_entry_hdr hdr;
  94. union {
  95. struct {
  96. __u32 iov_cnt;
  97. __u32 iov_bidi_cnt;
  98. __u32 iov_dif_cnt;
  99. __u64 cdb_off;
  100. __u64 __pad1;
  101. __u64 __pad2;
  102. __DECLARE_FLEX_ARRAY(struct iovec, iov);
  103. } req;
  104. struct {
  105. __u8 scsi_status;
  106. __u8 __pad1;
  107. __u16 __pad2;
  108. __u32 read_len;
  109. char sense_buffer[TCMU_SENSE_BUFFERSIZE];
  110. } rsp;
  111. };
  112. } __attribute__((packed));
  113. struct tcmu_tmr_entry {
  114. struct tcmu_cmd_entry_hdr hdr;
  115. #define TCMU_TMR_UNKNOWN 0
  116. #define TCMU_TMR_ABORT_TASK 1
  117. #define TCMU_TMR_ABORT_TASK_SET 2
  118. #define TCMU_TMR_CLEAR_ACA 3
  119. #define TCMU_TMR_CLEAR_TASK_SET 4
  120. #define TCMU_TMR_LUN_RESET 5
  121. #define TCMU_TMR_TARGET_WARM_RESET 6
  122. #define TCMU_TMR_TARGET_COLD_RESET 7
  123. /* Pseudo reset due to received PR OUT */
  124. #define TCMU_TMR_LUN_RESET_PRO 128
  125. __u8 tmr_type;
  126. __u8 __pad1;
  127. __u16 __pad2;
  128. __u32 cmd_cnt;
  129. __u64 __pad3;
  130. __u64 __pad4;
  131. __u16 cmd_ids[];
  132. } __attribute__((packed));
  133. #define TCMU_OP_ALIGN_SIZE sizeof(__u64)
  134. enum tcmu_genl_cmd {
  135. TCMU_CMD_UNSPEC,
  136. TCMU_CMD_ADDED_DEVICE,
  137. TCMU_CMD_REMOVED_DEVICE,
  138. TCMU_CMD_RECONFIG_DEVICE,
  139. TCMU_CMD_ADDED_DEVICE_DONE,
  140. TCMU_CMD_REMOVED_DEVICE_DONE,
  141. TCMU_CMD_RECONFIG_DEVICE_DONE,
  142. TCMU_CMD_SET_FEATURES,
  143. __TCMU_CMD_MAX,
  144. };
  145. #define TCMU_CMD_MAX (__TCMU_CMD_MAX - 1)
  146. enum tcmu_genl_attr {
  147. TCMU_ATTR_UNSPEC,
  148. TCMU_ATTR_DEVICE,
  149. TCMU_ATTR_MINOR,
  150. TCMU_ATTR_PAD,
  151. TCMU_ATTR_DEV_CFG,
  152. TCMU_ATTR_DEV_SIZE,
  153. TCMU_ATTR_WRITECACHE,
  154. TCMU_ATTR_CMD_STATUS,
  155. TCMU_ATTR_DEVICE_ID,
  156. TCMU_ATTR_SUPP_KERN_CMD_REPLY,
  157. __TCMU_ATTR_MAX,
  158. };
  159. #define TCMU_ATTR_MAX (__TCMU_ATTR_MAX - 1)
  160. #endif