From: Jason Gunthorpe <jgg@xxxxxxxxxxxx> The new auditing standard for the subsystem will be to only use __aligned_64 in uapi headers to try and prevent 32/64 compat bugs from existing in the future. Changing all existing usage will help ensure new developers copy the right idea. The before/after of this patch was tested using pahole on 32 and 64 bit compiles to confirm it has no change in the structure layout, so this patch is a NOP. Signed-off-by: Jason Gunthorpe <jgg@xxxxxxxxxxxx> --- include/uapi/rdma/bnxt_re-abi.h | 14 ++-- include/uapi/rdma/cxgb3-abi.h | 12 +-- include/uapi/rdma/cxgb4-abi.h | 24 +++--- include/uapi/rdma/hfi/hfi1_ioctl.h | 32 ++++---- include/uapi/rdma/hfi/hfi1_user.h | 4 +- include/uapi/rdma/hns-abi.h | 14 ++-- include/uapi/rdma/i40iw-abi.h | 12 +-- include/uapi/rdma/ib_user_cm.h | 48 +++++------ include/uapi/rdma/ib_user_mad.h | 4 +- include/uapi/rdma/ib_user_verbs.h | 158 ++++++++++++++++++------------------- include/uapi/rdma/mlx4-abi.h | 24 +++--- include/uapi/rdma/mlx5-abi.h | 40 +++++----- include/uapi/rdma/mthca-abi.h | 10 +-- include/uapi/rdma/nes-abi.h | 6 +- include/uapi/rdma/ocrdma-abi.h | 30 +++---- include/uapi/rdma/qedr-abi.h | 16 ++-- include/uapi/rdma/rdma_user_cm.h | 34 ++++---- include/uapi/rdma/rdma_user_rxe.h | 22 +++--- include/uapi/rdma/vmw_pvrdma-abi.h | 48 +++++------ 19 files changed, 276 insertions(+), 276 deletions(-) diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h index 2d3c9aac661acc..a7a6111e50c7f9 100644 --- a/include/uapi/rdma/bnxt_re-abi.h +++ b/include/uapi/rdma/bnxt_re-abi.h @@ -65,8 +65,8 @@ struct bnxt_re_pd_resp { } __attribute__((packed, aligned(4))); struct bnxt_re_cq_req { - __u64 cq_va; - __u64 cq_handle; + __aligned_u64 cq_va; + __aligned_u64 cq_handle; }; struct bnxt_re_cq_resp { @@ -77,9 +77,9 @@ struct bnxt_re_cq_resp { }; struct bnxt_re_qp_req { - __u64 qpsva; - __u64 qprva; - __u64 qp_handle; + __aligned_u64 qpsva; + __aligned_u64 qprva; + __aligned_u64 qp_handle; }; struct bnxt_re_qp_resp { @@ -88,8 +88,8 @@ struct bnxt_re_qp_resp { }; struct bnxt_re_srq_req { - __u64 srqva; - __u64 srq_handle; + __aligned_u64 srqva; + __aligned_u64 srq_handle; }; struct bnxt_re_srq_resp { diff --git a/include/uapi/rdma/cxgb3-abi.h b/include/uapi/rdma/cxgb3-abi.h index 17116c1c792586..9acb4b7a624633 100644 --- a/include/uapi/rdma/cxgb3-abi.h +++ b/include/uapi/rdma/cxgb3-abi.h @@ -41,21 +41,21 @@ * Make sure that all structs defined in this file remain laid out so * that they pack the same way on 32-bit and 64-bit architectures (to * avoid incompatibility between 32-bit userspace and 64-bit kernels). - * In particular do not use pointer types -- pass pointers in __u64 + * In particular do not use pointer types -- pass pointers in __aligned_u64 * instead. */ struct iwch_create_cq_req { - __u64 user_rptr_addr; + __aligned_u64 user_rptr_addr; }; struct iwch_create_cq_resp_v0 { - __u64 key; + __aligned_u64 key; __u32 cqid; __u32 size_log2; }; struct iwch_create_cq_resp { - __u64 key; + __aligned_u64 key; __u32 cqid; __u32 size_log2; __u32 memsize; @@ -63,8 +63,8 @@ struct iwch_create_cq_resp { }; struct iwch_create_qp_resp { - __u64 key; - __u64 db_key; + __aligned_u64 key; + __aligned_u64 db_key; __u32 qpid; __u32 size_log2; __u32 sq_size_log2; diff --git a/include/uapi/rdma/cxgb4-abi.h b/include/uapi/rdma/cxgb4-abi.h index c398a1ee8d003a..1fefd0140c26f6 100644 --- a/include/uapi/rdma/cxgb4-abi.h +++ b/include/uapi/rdma/cxgb4-abi.h @@ -41,13 +41,13 @@ * Make sure that all structs defined in this file remain laid out so * that they pack the same way on 32-bit and 64-bit architectures (to * avoid incompatibility between 32-bit userspace and 64-bit kernels). - * In particular do not use pointer types -- pass pointers in __u64 + * In particular do not use pointer types -- pass pointers in __aligned_u64 * instead. */ struct c4iw_create_cq_resp { - __u64 key; - __u64 gts_key; - __u64 memsize; + __aligned_u64 key; + __aligned_u64 gts_key; + __aligned_u64 memsize; __u32 cqid; __u32 size; __u32 qid_mask; @@ -59,13 +59,13 @@ enum { }; struct c4iw_create_qp_resp { - __u64 ma_sync_key; - __u64 sq_key; - __u64 rq_key; - __u64 sq_db_gts_key; - __u64 rq_db_gts_key; - __u64 sq_memsize; - __u64 rq_memsize; + __aligned_u64 ma_sync_key; + __aligned_u64 sq_key; + __aligned_u64 rq_key; + __aligned_u64 sq_db_gts_key; + __aligned_u64 rq_db_gts_key; + __aligned_u64 sq_memsize; + __aligned_u64 rq_memsize; __u32 sqid; __u32 rqid; __u32 sq_size; @@ -75,7 +75,7 @@ struct c4iw_create_qp_resp { }; struct c4iw_alloc_ucontext_resp { - __u64 status_page_key; + __aligned_u64 status_page_key; __u32 status_page_size; __u32 reserved; /* explicit padding (optional for i386) */ }; diff --git a/include/uapi/rdma/hfi/hfi1_ioctl.h b/include/uapi/rdma/hfi/hfi1_ioctl.h index 9de78c5ee9139e..8f3d9fe7b141c3 100644 --- a/include/uapi/rdma/hfi/hfi1_ioctl.h +++ b/include/uapi/rdma/hfi/hfi1_ioctl.h @@ -79,7 +79,7 @@ struct hfi1_user_info { }; struct hfi1_ctxt_info { - __u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */ + __aligned_u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */ __u32 rcvegr_size; /* size of each eager buffer */ __u16 num_active; /* number of active units */ __u16 unit; /* unit (chip) assigned to caller */ @@ -98,9 +98,9 @@ struct hfi1_ctxt_info { struct hfi1_tid_info { /* virtual address of first page in transfer */ - __u64 vaddr; + __aligned_u64 vaddr; /* pointer to tid array. this array is big enough */ - __u64 tidlist; + __aligned_u64 tidlist; /* number of tids programmed by this request */ __u32 tidcnt; /* length of transfer buffer programmed by this request */ @@ -131,23 +131,23 @@ struct hfi1_base_info { */ __u32 bthqp; /* PIO credit return address, */ - __u64 sc_credits_addr; + __aligned_u64 sc_credits_addr; /* * Base address of write-only pio buffers for this process. * Each buffer has sendpio_credits*64 bytes. */ - __u64 pio_bufbase_sop; + __aligned_u64 pio_bufbase_sop; /* * Base address of write-only pio buffers for this process. * Each buffer has sendpio_credits*64 bytes. */ - __u64 pio_bufbase; + __aligned_u64 pio_bufbase; /* address where receive buffer queue is mapped into */ - __u64 rcvhdr_bufbase; + __aligned_u64 rcvhdr_bufbase; /* base address of Eager receive buffers. */ - __u64 rcvegr_bufbase; + __aligned_u64 rcvegr_bufbase; /* base address of SDMA completion ring */ - __u64 sdma_comp_bufbase; + __aligned_u64 sdma_comp_bufbase; /* * User register base for init code, not to be used directly by * protocol or applications. Always maps real chip register space. @@ -155,20 +155,20 @@ struct hfi1_base_info { * ur_rcvhdrhead, ur_rcvhdrtail, ur_rcvegrhead, ur_rcvegrtail, * ur_rcvtidflow */ - __u64 user_regbase; + __aligned_u64 user_regbase; /* notification events */ - __u64 events_bufbase; + __aligned_u64 events_bufbase; /* status page */ - __u64 status_bufbase; + __aligned_u64 status_bufbase; /* rcvhdrtail update */ - __u64 rcvhdrtail_base; + __aligned_u64 rcvhdrtail_base; /* * shared memory pages for subctxts if ctxt is shared; these cover * all the processes in the group sharing a single context. * all have enough space for the num_subcontexts value on this job. */ - __u64 subctxt_uregbase; - __u64 subctxt_rcvegrbuf; - __u64 subctxt_rcvhdrbuf; + __aligned_u64 subctxt_uregbase; + __aligned_u64 subctxt_rcvegrbuf; + __aligned_u64 subctxt_rcvhdrbuf; }; #endif /* _LINIUX__HFI1_IOCTL_H */ diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h index 43b46bf6f8bb10..c6a984c0c8817c 100644 --- a/include/uapi/rdma/hfi/hfi1_user.h +++ b/include/uapi/rdma/hfi/hfi1_user.h @@ -177,8 +177,8 @@ struct hfi1_sdma_comp_entry { * Device status and notifications from driver to user-space. */ struct hfi1_status { - __u64 dev; /* device/hw status bits */ - __u64 port; /* port state and status bits */ + __aligned_u64 dev; /* device/hw status bits */ + __aligned_u64 port; /* port state and status bits */ char freezemsg[0]; }; diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h index aa774985a0c70e..7092c8de4bd883 100644 --- a/include/uapi/rdma/hns-abi.h +++ b/include/uapi/rdma/hns-abi.h @@ -37,18 +37,18 @@ #include <linux/types.h> struct hns_roce_ib_create_cq { - __u64 buf_addr; - __u64 db_addr; + __aligned_u64 buf_addr; + __aligned_u64 db_addr; }; struct hns_roce_ib_create_cq_resp { - __u64 cqn; /* Only 32 bits used, 64 for compat */ - __u64 cap_flags; + __aligned_u64 cqn; /* Only 32 bits used, 64 for compat */ + __aligned_u64 cap_flags; }; struct hns_roce_ib_create_qp { - __u64 buf_addr; - __u64 db_addr; + __aligned_u64 buf_addr; + __aligned_u64 db_addr; __u8 log_sq_bb_count; __u8 log_sq_stride; __u8 sq_no_prefetch; @@ -56,7 +56,7 @@ struct hns_roce_ib_create_qp { }; struct hns_roce_ib_create_qp_resp { - __u64 cap_flags; + __aligned_u64 cap_flags; }; struct hns_roce_ib_alloc_ucontext_resp { diff --git a/include/uapi/rdma/i40iw-abi.h b/include/uapi/rdma/i40iw-abi.h index bfc3aaf2e56a5b..79890baa6fdbb3 100644 --- a/include/uapi/rdma/i40iw-abi.h +++ b/include/uapi/rdma/i40iw-abi.h @@ -61,17 +61,17 @@ struct i40iw_alloc_pd_resp { }; struct i40iw_create_cq_req { - __u64 user_cq_buffer; - __u64 user_shadow_area; + __aligned_u64 user_cq_buffer; + __aligned_u64 user_shadow_area; }; struct i40iw_create_qp_req { - __u64 user_wqe_buffers; - __u64 user_compl_ctx; + __aligned_u64 user_wqe_buffers; + __aligned_u64 user_compl_ctx; /* UDA QP PHB */ - __u64 user_sq_phb; /* place for VA of the sq phb buff */ - __u64 user_rq_phb; /* place for VA of the rq phb buff */ + __aligned_u64 user_sq_phb; /* place for VA of the sq phb buff */ + __aligned_u64 user_rq_phb; /* place for VA of the rq phb buff */ }; enum i40iw_memreg_type { diff --git a/include/uapi/rdma/ib_user_cm.h b/include/uapi/rdma/ib_user_cm.h index f4041bdc4d0892..4a8f9562f7cd9b 100644 --- a/include/uapi/rdma/ib_user_cm.h +++ b/include/uapi/rdma/ib_user_cm.h @@ -73,8 +73,8 @@ struct ib_ucm_cmd_hdr { }; struct ib_ucm_create_id { - __u64 uid; - __u64 response; + __aligned_u64 uid; + __aligned_u64 response; }; struct ib_ucm_create_id_resp { @@ -82,7 +82,7 @@ struct ib_ucm_create_id_resp { }; struct ib_ucm_destroy_id { - __u64 response; + __aligned_u64 response; __u32 id; __u32 reserved; }; @@ -92,7 +92,7 @@ struct ib_ucm_destroy_id_resp { }; struct ib_ucm_attr_id { - __u64 response; + __aligned_u64 response; __u32 id; __u32 reserved; }; @@ -105,7 +105,7 @@ struct ib_ucm_attr_id_resp { }; struct ib_ucm_init_qp_attr { - __u64 response; + __aligned_u64 response; __u32 id; __u32 qp_state; }; @@ -123,7 +123,7 @@ struct ib_ucm_notify { }; struct ib_ucm_private_data { - __u64 data; + __aligned_u64 data; __u32 id; __u8 len; __u8 reserved[3]; @@ -135,9 +135,9 @@ struct ib_ucm_req { __u32 qp_type; __u32 psn; __be64 sid; - __u64 data; - __u64 primary_path; - __u64 alternate_path; + __aligned_u64 data; + __aligned_u64 primary_path; + __aligned_u64 alternate_path; __u8 len; __u8 peer_to_peer; __u8 responder_resources; @@ -153,8 +153,8 @@ struct ib_ucm_req { }; struct ib_ucm_rep { - __u64 uid; - __u64 data; + __aligned_u64 uid; + __aligned_u64 data; __u32 id; __u32 qpn; __u32 psn; @@ -172,15 +172,15 @@ struct ib_ucm_rep { struct ib_ucm_info { __u32 id; __u32 status; - __u64 info; - __u64 data; + __aligned_u64 info; + __aligned_u64 data; __u8 info_len; __u8 data_len; __u8 reserved[6]; }; struct ib_ucm_mra { - __u64 data; + __aligned_u64 data; __u32 id; __u8 len; __u8 timeout; @@ -188,8 +188,8 @@ struct ib_ucm_mra { }; struct ib_ucm_lap { - __u64 path; - __u64 data; + __aligned_u64 path; + __aligned_u64 data; __u32 id; __u8 len; __u8 reserved[3]; @@ -199,8 +199,8 @@ struct ib_ucm_sidr_req { __u32 id; __u32 timeout; __be64 sid; - __u64 data; - __u64 path; + __aligned_u64 data; + __aligned_u64 path; __u16 reserved_pkey; __u8 len; __u8 max_cm_retries; @@ -212,8 +212,8 @@ struct ib_ucm_sidr_rep { __u32 qpn; __u32 qkey; __u32 status; - __u64 info; - __u64 data; + __aligned_u64 info; + __aligned_u64 data; __u8 info_len; __u8 data_len; __u8 reserved[6]; @@ -222,9 +222,9 @@ struct ib_ucm_sidr_rep { * event notification ABI structures. */ struct ib_ucm_event_get { - __u64 response; - __u64 data; - __u64 info; + __aligned_u64 response; + __aligned_u64 data; + __aligned_u64 info; __u8 data_len; __u8 info_len; __u8 reserved[6]; @@ -303,7 +303,7 @@ struct ib_ucm_sidr_rep_event_resp { #define IB_UCM_PRES_ALTERNATE 0x08 struct ib_ucm_event_resp { - __u64 uid; + __aligned_u64 uid; __u32 id; __u32 event; __u32 present; diff --git a/include/uapi/rdma/ib_user_mad.h b/include/uapi/rdma/ib_user_mad.h index 330a3c5f1aa864..ef92118dad9770 100644 --- a/include/uapi/rdma/ib_user_mad.h +++ b/include/uapi/rdma/ib_user_mad.h @@ -143,7 +143,7 @@ struct ib_user_mad_hdr { */ struct ib_user_mad { struct ib_user_mad_hdr hdr; - __u64 data[0]; + __aligned_u64 data[0]; }; /* @@ -225,7 +225,7 @@ struct ib_user_mad_reg_req2 { __u8 mgmt_class_version; __u16 res; __u32 flags; - __u64 method_mask[2]; + __aligned_u64 method_mask[2]; __u32 oui; __u8 rmpp_version; __u8 reserved[3]; diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index d56fba09dc8ac3..aa0615105563a2 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -117,13 +117,13 @@ enum { */ struct ib_uverbs_async_event_desc { - __u64 element; + __aligned_u64 element; __u32 event_type; /* enum ib_event_type */ __u32 reserved; }; struct ib_uverbs_comp_event_desc { - __u64 cq_handle; + __aligned_u64 cq_handle; }; struct ib_uverbs_cq_moderation_caps { @@ -150,15 +150,15 @@ struct ib_uverbs_cmd_hdr { }; struct ib_uverbs_ex_cmd_hdr { - __u64 response; + __aligned_u64 response; __u16 provider_in_words; __u16 provider_out_words; __u32 cmd_hdr_reserved; }; struct ib_uverbs_get_context { - __u64 response; - __u64 driver_data[0]; + __aligned_u64 response; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_get_context_resp { @@ -167,16 +167,16 @@ struct ib_uverbs_get_context_resp { }; struct ib_uverbs_query_device { - __u64 response; - __u64 driver_data[0]; + __aligned_u64 response; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_query_device_resp { - __u64 fw_ver; + __aligned_u64 fw_ver; __be64 node_guid; __be64 sys_image_guid; - __u64 max_mr_size; - __u64 page_size_cap; + __aligned_u64 max_mr_size; + __aligned_u64 page_size_cap; __u32 vendor_id; __u32 vendor_part_id; __u32 hw_ver; @@ -221,7 +221,7 @@ struct ib_uverbs_ex_query_device { }; struct ib_uverbs_odp_caps { - __u64 general_caps; + __aligned_u64 general_caps; struct { __u32 rc_odp_caps; __u32 uc_odp_caps; @@ -260,9 +260,9 @@ struct ib_uverbs_ex_query_device_resp { __u32 comp_mask; __u32 response_length; struct ib_uverbs_odp_caps odp_caps; - __u64 timestamp_mask; - __u64 hca_core_clock; /* in KHZ */ - __u64 device_cap_flags_ex; + __aligned_u64 timestamp_mask; + __aligned_u64 hca_core_clock; /* in KHZ */ + __aligned_u64 device_cap_flags_ex; struct ib_uverbs_rss_caps rss_caps; __u32 max_wq_type_rq; __u32 raw_packet_caps; @@ -271,10 +271,10 @@ struct ib_uverbs_ex_query_device_resp { }; struct ib_uverbs_query_port { - __u64 response; + __aligned_u64 response; __u8 port_num; __u8 reserved[7]; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_query_port_resp { @@ -302,8 +302,8 @@ struct ib_uverbs_query_port_resp { }; struct ib_uverbs_alloc_pd { - __u64 response; - __u64 driver_data[0]; + __aligned_u64 response; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_alloc_pd_resp { @@ -315,10 +315,10 @@ struct ib_uverbs_dealloc_pd { }; struct ib_uverbs_open_xrcd { - __u64 response; + __aligned_u64 response; __u32 fd; __u32 oflags; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_open_xrcd_resp { @@ -330,13 +330,13 @@ struct ib_uverbs_close_xrcd { }; struct ib_uverbs_reg_mr { - __u64 response; - __u64 start; - __u64 length; - __u64 hca_va; + __aligned_u64 response; + __aligned_u64 start; + __aligned_u64 length; + __aligned_u64 hca_va; __u32 pd_handle; __u32 access_flags; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_reg_mr_resp { @@ -346,12 +346,12 @@ struct ib_uverbs_reg_mr_resp { }; struct ib_uverbs_rereg_mr { - __u64 response; + __aligned_u64 response; __u32 mr_handle; __u32 flags; - __u64 start; - __u64 length; - __u64 hca_va; + __aligned_u64 start; + __aligned_u64 length; + __aligned_u64 hca_va; __u32 pd_handle; __u32 access_flags; }; @@ -366,7 +366,7 @@ struct ib_uverbs_dereg_mr { }; struct ib_uverbs_alloc_mw { - __u64 response; + __aligned_u64 response; __u32 pd_handle; __u8 mw_type; __u8 reserved[3]; @@ -382,7 +382,7 @@ struct ib_uverbs_dealloc_mw { }; struct ib_uverbs_create_comp_channel { - __u64 response; + __aligned_u64 response; }; struct ib_uverbs_create_comp_channel_resp { @@ -390,13 +390,13 @@ struct ib_uverbs_create_comp_channel_resp { }; struct ib_uverbs_create_cq { - __u64 response; - __u64 user_handle; + __aligned_u64 response; + __aligned_u64 user_handle; __u32 cqe; __u32 comp_vector; __s32 comp_channel; __u32 reserved; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; enum ib_uverbs_ex_create_cq_flags { @@ -405,7 +405,7 @@ enum ib_uverbs_ex_create_cq_flags { }; struct ib_uverbs_ex_create_cq { - __u64 user_handle; + __aligned_u64 user_handle; __u32 cqe; __u32 comp_vector; __s32 comp_channel; @@ -426,26 +426,26 @@ struct ib_uverbs_ex_create_cq_resp { }; struct ib_uverbs_resize_cq { - __u64 response; + __aligned_u64 response; __u32 cq_handle; __u32 cqe; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_resize_cq_resp { __u32 cqe; __u32 reserved; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_poll_cq { - __u64 response; + __aligned_u64 response; __u32 cq_handle; __u32 ne; }; struct ib_uverbs_wc { - __u64 wr_id; + __aligned_u64 wr_id; __u32 status; __u32 opcode; __u32 vendor_err; @@ -477,7 +477,7 @@ struct ib_uverbs_req_notify_cq { }; struct ib_uverbs_destroy_cq { - __u64 response; + __aligned_u64 response; __u32 cq_handle; __u32 reserved; }; @@ -546,8 +546,8 @@ struct ib_uverbs_qp_attr { }; struct ib_uverbs_create_qp { - __u64 response; - __u64 user_handle; + __aligned_u64 response; + __aligned_u64 user_handle; __u32 pd_handle; __u32 send_cq_handle; __u32 recv_cq_handle; @@ -561,7 +561,7 @@ struct ib_uverbs_create_qp { __u8 qp_type; __u8 is_srq; __u8 reserved; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; enum ib_uverbs_create_qp_mask { @@ -587,7 +587,7 @@ enum { }; struct ib_uverbs_ex_create_qp { - __u64 user_handle; + __aligned_u64 user_handle; __u32 pd_handle; __u32 send_cq_handle; __u32 recv_cq_handle; @@ -608,13 +608,13 @@ struct ib_uverbs_ex_create_qp { }; struct ib_uverbs_open_qp { - __u64 response; - __u64 user_handle; + __aligned_u64 response; + __aligned_u64 user_handle; __u32 pd_handle; __u32 qpn; __u8 qp_type; __u8 reserved[7]; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; /* also used for open response */ @@ -655,10 +655,10 @@ struct ib_uverbs_qp_dest { }; struct ib_uverbs_query_qp { - __u64 response; + __aligned_u64 response; __u32 qp_handle; __u32 attr_mask; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_query_qp_resp { @@ -692,7 +692,7 @@ struct ib_uverbs_query_qp_resp { __u8 alt_timeout; __u8 sq_sig_all; __u8 reserved[5]; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_modify_qp { @@ -722,7 +722,7 @@ struct ib_uverbs_modify_qp { __u8 alt_port_num; __u8 alt_timeout; __u8 reserved[2]; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_ex_modify_qp { @@ -740,7 +740,7 @@ struct ib_uverbs_ex_modify_qp_resp { }; struct ib_uverbs_destroy_qp { - __u64 response; + __aligned_u64 response; __u32 qp_handle; __u32 reserved; }; @@ -756,13 +756,13 @@ struct ib_uverbs_destroy_qp_resp { * document the ABI. */ struct ib_uverbs_sge { - __u64 addr; + __aligned_u64 addr; __u32 length; __u32 lkey; }; struct ib_uverbs_send_wr { - __u64 wr_id; + __aligned_u64 wr_id; __u32 num_sge; __u32 opcode; __u32 send_flags; @@ -772,14 +772,14 @@ struct ib_uverbs_send_wr { } ex; union { struct { - __u64 remote_addr; + __aligned_u64 remote_addr; __u32 rkey; __u32 reserved; } rdma; struct { - __u64 remote_addr; - __u64 compare_add; - __u64 swap; + __aligned_u64 remote_addr; + __aligned_u64 compare_add; + __aligned_u64 swap; __u32 rkey; __u32 reserved; } atomic; @@ -793,7 +793,7 @@ struct ib_uverbs_send_wr { }; struct ib_uverbs_post_send { - __u64 response; + __aligned_u64 response; __u32 qp_handle; __u32 wr_count; __u32 sge_count; @@ -806,13 +806,13 @@ struct ib_uverbs_post_send_resp { }; struct ib_uverbs_recv_wr { - __u64 wr_id; + __aligned_u64 wr_id; __u32 num_sge; __u32 reserved; }; struct ib_uverbs_post_recv { - __u64 response; + __aligned_u64 response; __u32 qp_handle; __u32 wr_count; __u32 sge_count; @@ -825,7 +825,7 @@ struct ib_uverbs_post_recv_resp { }; struct ib_uverbs_post_srq_recv { - __u64 response; + __aligned_u64 response; __u32 srq_handle; __u32 wr_count; __u32 sge_count; @@ -838,8 +838,8 @@ struct ib_uverbs_post_srq_recv_resp { }; struct ib_uverbs_create_ah { - __u64 response; - __u64 user_handle; + __aligned_u64 response; + __aligned_u64 user_handle; __u32 pd_handle; __u32 reserved; struct ib_uverbs_ah_attr attr; @@ -858,7 +858,7 @@ struct ib_uverbs_attach_mcast { __u32 qp_handle; __u16 mlid; __u16 reserved; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_detach_mcast { @@ -866,7 +866,7 @@ struct ib_uverbs_detach_mcast { __u32 qp_handle; __u16 mlid; __u16 reserved; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_flow_spec_hdr { @@ -874,7 +874,7 @@ struct ib_uverbs_flow_spec_hdr { __u16 size; __u16 reserved; /* followed by flow_spec */ - __u64 flow_spec_data[0]; + __aligned_u64 flow_spec_data[0]; }; struct ib_uverbs_flow_eth_filter { @@ -1033,18 +1033,18 @@ struct ib_uverbs_destroy_flow { }; struct ib_uverbs_create_srq { - __u64 response; - __u64 user_handle; + __aligned_u64 response; + __aligned_u64 user_handle; __u32 pd_handle; __u32 max_wr; __u32 max_sge; __u32 srq_limit; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_create_xsrq { - __u64 response; - __u64 user_handle; + __aligned_u64 response; + __aligned_u64 user_handle; __u32 srq_type; __u32 pd_handle; __u32 max_wr; @@ -1053,7 +1053,7 @@ struct ib_uverbs_create_xsrq { __u32 max_num_tags; __u32 xrcd_handle; __u32 cq_handle; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_create_srq_resp { @@ -1068,14 +1068,14 @@ struct ib_uverbs_modify_srq { __u32 attr_mask; __u32 max_wr; __u32 srq_limit; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_query_srq { - __u64 response; + __aligned_u64 response; __u32 srq_handle; __u32 reserved; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_query_srq_resp { @@ -1086,7 +1086,7 @@ struct ib_uverbs_query_srq_resp { }; struct ib_uverbs_destroy_srq { - __u64 response; + __aligned_u64 response; __u32 srq_handle; __u32 reserved; }; @@ -1098,7 +1098,7 @@ struct ib_uverbs_destroy_srq_resp { struct ib_uverbs_ex_create_wq { __u32 comp_mask; __u32 wq_type; - __u64 user_handle; + __aligned_u64 user_handle; __u32 pd_handle; __u32 cq_handle; __u32 max_wr; diff --git a/include/uapi/rdma/mlx4-abi.h b/include/uapi/rdma/mlx4-abi.h index 50a56aeb1f4179..04f64bc4045f1b 100644 --- a/include/uapi/rdma/mlx4-abi.h +++ b/include/uapi/rdma/mlx4-abi.h @@ -77,8 +77,8 @@ struct mlx4_ib_alloc_pd_resp { }; struct mlx4_ib_create_cq { - __u64 buf_addr; - __u64 db_addr; + __aligned_u64 buf_addr; + __aligned_u64 db_addr; }; struct mlx4_ib_create_cq_resp { @@ -87,12 +87,12 @@ struct mlx4_ib_create_cq_resp { }; struct mlx4_ib_resize_cq { - __u64 buf_addr; + __aligned_u64 buf_addr; }; struct mlx4_ib_create_srq { - __u64 buf_addr; - __u64 db_addr; + __aligned_u64 buf_addr; + __aligned_u64 db_addr; }; struct mlx4_ib_create_srq_resp { @@ -101,7 +101,7 @@ struct mlx4_ib_create_srq_resp { }; struct mlx4_ib_create_qp_rss { - __u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */ + __aligned_u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */ __u8 rx_hash_function; /* Use enum mlx4_ib_rx_hash_function_flags */ __u8 reserved[7]; __u8 rx_hash_key[40]; @@ -110,8 +110,8 @@ struct mlx4_ib_create_qp_rss { }; struct mlx4_ib_create_qp { - __u64 buf_addr; - __u64 db_addr; + __aligned_u64 buf_addr; + __aligned_u64 db_addr; __u8 log_sq_bb_count; __u8 log_sq_stride; __u8 sq_no_prefetch; @@ -120,8 +120,8 @@ struct mlx4_ib_create_qp { }; struct mlx4_ib_create_wq { - __u64 buf_addr; - __u64 db_addr; + __aligned_u64 buf_addr; + __aligned_u64 db_addr; __u8 log_range_size; __u8 reserved[3]; __u32 comp_mask; @@ -161,7 +161,7 @@ enum mlx4_ib_rx_hash_fields { }; struct mlx4_ib_rss_caps { - __u64 rx_hash_fields_mask; /* enum mlx4_ib_rx_hash_fields */ + __aligned_u64 rx_hash_fields_mask; /* enum mlx4_ib_rx_hash_fields */ __u8 rx_hash_function; /* enum mlx4_ib_rx_hash_function_flags */ __u8 reserved[7]; }; @@ -181,7 +181,7 @@ struct mlx4_ib_tso_caps { struct mlx4_uverbs_ex_query_device_resp { __u32 comp_mask; __u32 response_length; - __u64 hca_core_clock_offset; + __aligned_u64 hca_core_clock_offset; __u32 max_inl_recv_sz; __u32 reserved; struct mlx4_ib_rss_caps rss_caps; diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index d2e0d234704fc5..09c50f390a3c86 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -84,7 +84,7 @@ struct mlx5_ib_alloc_ucontext_req_v2 { __u8 reserved0; __u16 reserved1; __u32 reserved2; - __u64 lib_caps; + __aligned_u64 lib_caps; }; enum mlx5_ib_alloc_ucontext_resp_mask { @@ -125,7 +125,7 @@ struct mlx5_ib_alloc_ucontext_resp { __u8 cmds_supp_uhw; __u8 eth_min_inline; __u8 clock_info_versions; - __u64 hca_core_clock_offset; + __aligned_u64 hca_core_clock_offset; __u32 log_uar_size; __u32 num_uars_per_page; __u32 num_dyn_bfregs; @@ -147,7 +147,7 @@ struct mlx5_ib_tso_caps { }; struct mlx5_ib_rss_caps { - __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */ + __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */ __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */ __u8 reserved[7]; }; @@ -248,8 +248,8 @@ enum mlx5_ib_create_cq_flags { }; struct mlx5_ib_create_cq { - __u64 buf_addr; - __u64 db_addr; + __aligned_u64 buf_addr; + __aligned_u64 db_addr; __u32 cqe_size; __u8 cqe_comp_en; __u8 cqe_comp_res_format; @@ -262,15 +262,15 @@ struct mlx5_ib_create_cq_resp { }; struct mlx5_ib_resize_cq { - __u64 buf_addr; + __aligned_u64 buf_addr; __u16 cqe_size; __u16 reserved0; __u32 reserved1; }; struct mlx5_ib_create_srq { - __u64 buf_addr; - __u64 db_addr; + __aligned_u64 buf_addr; + __aligned_u64 db_addr; __u32 flags; __u32 reserved0; /* explicit padding (optional on i386) */ __u32 uidx; @@ -283,8 +283,8 @@ struct mlx5_ib_create_srq_resp { }; struct mlx5_ib_create_qp { - __u64 buf_addr; - __u64 db_addr; + __aligned_u64 buf_addr; + __aligned_u64 db_addr; __u32 sq_wqe_count; __u32 rq_wqe_count; __u32 rq_wqe_shift; @@ -292,8 +292,8 @@ struct mlx5_ib_create_qp { __u32 uidx; __u32 bfreg_index; union { - __u64 sq_buf_addr; - __u64 access_key; + __aligned_u64 sq_buf_addr; + __aligned_u64 access_key; }; }; @@ -324,7 +324,7 @@ enum mlx5_rx_hash_fields { }; struct mlx5_ib_create_qp_rss { - __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */ + __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */ __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */ __u8 rx_key_len; /* valid only for Toeplitz */ __u8 reserved[6]; @@ -349,8 +349,8 @@ enum mlx5_ib_create_wq_mask { }; struct mlx5_ib_create_wq { - __u64 buf_addr; - __u64 db_addr; + __aligned_u64 buf_addr; + __aligned_u64 db_addr; __u32 rq_wqe_count; __u32 rq_wqe_shift; __u32 user_index; @@ -402,13 +402,13 @@ struct mlx5_ib_modify_wq { struct mlx5_ib_clock_info { __u32 sign; __u32 resv; - __u64 nsec; - __u64 cycles; - __u64 frac; + __aligned_u64 nsec; + __aligned_u64 cycles; + __aligned_u64 frac; __u32 mult; __u32 shift; - __u64 mask; - __u64 overflow_period; + __aligned_u64 mask; + __aligned_u64 overflow_period; }; enum mlx5_ib_mmap_cmd { diff --git a/include/uapi/rdma/mthca-abi.h b/include/uapi/rdma/mthca-abi.h index 3020d8a907a74f..ac756cd9e80772 100644 --- a/include/uapi/rdma/mthca-abi.h +++ b/include/uapi/rdma/mthca-abi.h @@ -74,8 +74,8 @@ struct mthca_reg_mr { struct mthca_create_cq { __u32 lkey; __u32 pdn; - __u64 arm_db_page; - __u64 set_db_page; + __aligned_u64 arm_db_page; + __aligned_u64 set_db_page; __u32 arm_db_index; __u32 set_db_index; }; @@ -93,7 +93,7 @@ struct mthca_resize_cq { struct mthca_create_srq { __u32 lkey; __u32 db_index; - __u64 db_page; + __aligned_u64 db_page; }; struct mthca_create_srq_resp { @@ -104,8 +104,8 @@ struct mthca_create_srq_resp { struct mthca_create_qp { __u32 lkey; __u32 reserved; - __u64 sq_db_page; - __u64 rq_db_page; + __aligned_u64 sq_db_page; + __aligned_u64 rq_db_page; __u32 sq_db_index; __u32 rq_db_index; }; diff --git a/include/uapi/rdma/nes-abi.h b/include/uapi/rdma/nes-abi.h index f5b2437aab2884..35bfd4015d0705 100644 --- a/include/uapi/rdma/nes-abi.h +++ b/include/uapi/rdma/nes-abi.h @@ -72,14 +72,14 @@ struct nes_alloc_pd_resp { }; struct nes_create_cq_req { - __u64 user_cq_buffer; + __aligned_u64 user_cq_buffer; __u32 mcrqf; __u8 reserved[4]; }; struct nes_create_qp_req { - __u64 user_wqe_buffers; - __u64 user_qp_buffer; + __aligned_u64 user_wqe_buffers; + __aligned_u64 user_qp_buffer; }; enum iwnes_memreg_type { diff --git a/include/uapi/rdma/ocrdma-abi.h b/include/uapi/rdma/ocrdma-abi.h index 32ef8670583a32..284d47b41f6e56 100644 --- a/include/uapi/rdma/ocrdma-abi.h +++ b/include/uapi/rdma/ocrdma-abi.h @@ -55,13 +55,13 @@ struct ocrdma_alloc_ucontext_resp { __u32 wqe_size; __u32 max_inline_data; __u32 dpp_wqe_size; - __u64 ah_tbl_page; + __aligned_u64 ah_tbl_page; __u32 ah_tbl_len; __u32 rqe_size; __u8 fw_ver[32]; /* for future use/new features in progress */ - __u64 rsvd1; - __u64 rsvd2; + __aligned_u64 rsvd1; + __aligned_u64 rsvd2; }; struct ocrdma_alloc_pd_ureq { @@ -87,13 +87,13 @@ struct ocrdma_create_cq_uresp { __u32 page_size; __u32 num_pages; __u32 max_hw_cqe; - __u64 page_addr[MAX_CQ_PAGES]; - __u64 db_page_addr; + __aligned_u64 page_addr[MAX_CQ_PAGES]; + __aligned_u64 db_page_addr; __u32 db_page_size; __u32 phase_change; /* for future use/new features in progress */ - __u64 rsvd1; - __u64 rsvd2; + __aligned_u64 rsvd1; + __aligned_u64 rsvd2; }; #define MAX_QP_PAGES 8 @@ -115,9 +115,9 @@ struct ocrdma_create_qp_uresp { __u32 rq_page_size; __u32 num_sq_pages; __u32 num_rq_pages; - __u64 sq_page_addr[MAX_QP_PAGES]; - __u64 rq_page_addr[MAX_QP_PAGES]; - __u64 db_page_addr; + __aligned_u64 sq_page_addr[MAX_QP_PAGES]; + __aligned_u64 rq_page_addr[MAX_QP_PAGES]; + __aligned_u64 db_page_addr; __u32 db_page_size; __u32 dpp_credit; __u32 dpp_offset; @@ -126,7 +126,7 @@ struct ocrdma_create_qp_uresp { __u32 db_sq_offset; __u32 db_rq_offset; __u32 db_shift; - __u64 rsvd[11]; + __aligned_u64 rsvd[11]; }; struct ocrdma_create_srq_uresp { @@ -137,16 +137,16 @@ struct ocrdma_create_srq_uresp { __u32 rq_page_size; __u32 num_rq_pages; - __u64 rq_page_addr[MAX_QP_PAGES]; - __u64 db_page_addr; + __aligned_u64 rq_page_addr[MAX_QP_PAGES]; + __aligned_u64 db_page_addr; __u32 db_page_size; __u32 num_rqe_allocated; __u32 db_rq_offset; __u32 db_shift; - __u64 rsvd2; - __u64 rsvd3; + __aligned_u64 rsvd2; + __aligned_u64 rsvd3; }; #endif /* OCRDMA_ABI_USER_H */ diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h index 39665606293197..8ba098900e9aac 100644 --- a/include/uapi/rdma/qedr-abi.h +++ b/include/uapi/rdma/qedr-abi.h @@ -40,7 +40,7 @@ /* user kernel communication data structures. */ struct qedr_alloc_ucontext_resp { - __u64 db_pa; + __aligned_u64 db_pa; __u32 db_size; __u32 max_send_wr; @@ -57,7 +57,7 @@ struct qedr_alloc_ucontext_resp { }; struct qedr_alloc_pd_ureq { - __u64 rsvd1; + __aligned_u64 rsvd1; }; struct qedr_alloc_pd_uresp { @@ -66,8 +66,8 @@ struct qedr_alloc_pd_uresp { }; struct qedr_create_cq_ureq { - __u64 addr; - __u64 len; + __aligned_u64 addr; + __aligned_u64 len; }; struct qedr_create_cq_uresp { @@ -82,17 +82,17 @@ struct qedr_create_qp_ureq { /* SQ */ /* user space virtual address of SQ buffer */ - __u64 sq_addr; + __aligned_u64 sq_addr; /* length of SQ buffer */ - __u64 sq_len; + __aligned_u64 sq_len; /* RQ */ /* user space virtual address of RQ buffer */ - __u64 rq_addr; + __aligned_u64 rq_addr; /* length of RQ buffer */ - __u64 rq_len; + __aligned_u64 rq_len; }; struct qedr_create_qp_uresp { diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h index 65399c837762f5..c4f28cb92214ff 100644 --- a/include/uapi/rdma/rdma_user_cm.h +++ b/include/uapi/rdma/rdma_user_cm.h @@ -80,8 +80,8 @@ struct rdma_ucm_cmd_hdr { }; struct rdma_ucm_create_id { - __u64 uid; - __u64 response; + __aligned_u64 uid; + __aligned_u64 response; __u16 ps; __u8 qp_type; __u8 reserved[5]; @@ -92,7 +92,7 @@ struct rdma_ucm_create_id_resp { }; struct rdma_ucm_destroy_id { - __u64 response; + __aligned_u64 response; __u32 id; __u32 reserved; }; @@ -102,7 +102,7 @@ struct rdma_ucm_destroy_id_resp { }; struct rdma_ucm_bind_ip { - __u64 response; + __aligned_u64 response; struct sockaddr_in6 addr; __u32 id; }; @@ -143,13 +143,13 @@ enum { }; struct rdma_ucm_query { - __u64 response; + __aligned_u64 response; __u32 id; __u32 option; }; struct rdma_ucm_query_route_resp { - __u64 node_guid; + __aligned_u64 node_guid; struct ib_user_path_rec ib_route[2]; struct sockaddr_in6 src_addr; struct sockaddr_in6 dst_addr; @@ -159,7 +159,7 @@ struct rdma_ucm_query_route_resp { }; struct rdma_ucm_query_addr_resp { - __u64 node_guid; + __aligned_u64 node_guid; __u8 port_num; __u8 reserved; __u16 pkey; @@ -210,7 +210,7 @@ struct rdma_ucm_listen { }; struct rdma_ucm_accept { - __u64 uid; + __aligned_u64 uid; struct rdma_ucm_conn_param conn_param; __u32 id; __u32 reserved; @@ -228,7 +228,7 @@ struct rdma_ucm_disconnect { }; struct rdma_ucm_init_qp_attr { - __u64 response; + __aligned_u64 response; __u32 id; __u32 qp_state; }; @@ -239,8 +239,8 @@ struct rdma_ucm_notify { }; struct rdma_ucm_join_ip_mcast { - __u64 response; /* rdma_ucm_create_id_resp */ - __u64 uid; + __aligned_u64 response; /* rdma_ucm_create_id_resp */ + __aligned_u64 uid; struct sockaddr_in6 addr; __u32 id; }; @@ -253,8 +253,8 @@ enum { }; struct rdma_ucm_join_mcast { - __u64 response; /* rdma_ucma_create_id_resp */ - __u64 uid; + __aligned_u64 response; /* rdma_ucma_create_id_resp */ + __aligned_u64 uid; __u32 id; __u16 addr_size; __u16 join_flags; @@ -262,11 +262,11 @@ struct rdma_ucm_join_mcast { }; struct rdma_ucm_get_event { - __u64 response; + __aligned_u64 response; }; struct rdma_ucm_event_resp { - __u64 uid; + __aligned_u64 uid; __u32 id; __u32 event; __u32 status; @@ -296,7 +296,7 @@ enum { }; struct rdma_ucm_set_option { - __u64 optval; + __aligned_u64 optval; __u32 id; __u32 level; __u32 optname; @@ -304,7 +304,7 @@ struct rdma_ucm_set_option { }; struct rdma_ucm_migrate_id { - __u64 response; + __aligned_u64 response; __u32 id; __u32 fd; }; diff --git a/include/uapi/rdma/rdma_user_rxe.h b/include/uapi/rdma/rdma_user_rxe.h index af8f8218aed57a..1f8a9e7daea43e 100644 --- a/include/uapi/rdma/rdma_user_rxe.h +++ b/include/uapi/rdma/rdma_user_rxe.h @@ -68,7 +68,7 @@ struct rxe_av { }; struct rxe_send_wr { - __u64 wr_id; + __aligned_u64 wr_id; __u32 num_sge; __u32 opcode; __u32 send_flags; @@ -78,14 +78,14 @@ struct rxe_send_wr { } ex; union { struct { - __u64 remote_addr; + __aligned_u64 remote_addr; __u32 rkey; __u32 reserved; } rdma; struct { - __u64 remote_addr; - __u64 compare_add; - __u64 swap; + __aligned_u64 remote_addr; + __aligned_u64 compare_add; + __aligned_u64 swap; __u32 rkey; __u32 reserved; } atomic; @@ -98,7 +98,7 @@ struct rxe_send_wr { struct { union { struct ib_mr *mr; - __u64 reserved; + __aligned_u64 reserved; }; __u32 key; __u32 access; @@ -107,13 +107,13 @@ struct rxe_send_wr { }; struct rxe_sge { - __u64 addr; + __aligned_u64 addr; __u32 length; __u32 lkey; }; struct mminfo { - __u64 offset; + __aligned_u64 offset; __u32 size; __u32 pad; }; @@ -136,7 +136,7 @@ struct rxe_send_wqe { struct rxe_av av; __u32 status; __u32 state; - __u64 iova; + __aligned_u64 iova; __u32 mask; __u32 first_psn; __u32 last_psn; @@ -147,7 +147,7 @@ struct rxe_send_wqe { }; struct rxe_recv_wqe { - __u64 wr_id; + __aligned_u64 wr_id; __u32 num_sge; __u32 padding; struct rxe_dma_info dma; @@ -173,7 +173,7 @@ struct rxe_create_srq_resp { }; struct rxe_modify_srq_cmd { - __u64 mmap_info_addr; + __aligned_u64 mmap_info_addr; }; #endif /* RDMA_USER_RXE_H */ diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h index edf5c722490189..d13fd490b66da2 100644 --- a/include/uapi/rdma/vmw_pvrdma-abi.h +++ b/include/uapi/rdma/vmw_pvrdma-abi.h @@ -143,7 +143,7 @@ struct pvrdma_alloc_pd_resp { }; struct pvrdma_create_cq { - __u64 buf_addr; + __aligned_u64 buf_addr; __u32 buf_size; __u32 reserved; }; @@ -154,13 +154,13 @@ struct pvrdma_create_cq_resp { }; struct pvrdma_resize_cq { - __u64 buf_addr; + __aligned_u64 buf_addr; __u32 buf_size; __u32 reserved; }; struct pvrdma_create_srq { - __u64 buf_addr; + __aligned_u64 buf_addr; __u32 buf_size; __u32 reserved; }; @@ -171,25 +171,25 @@ struct pvrdma_create_srq_resp { }; struct pvrdma_create_qp { - __u64 rbuf_addr; - __u64 sbuf_addr; + __aligned_u64 rbuf_addr; + __aligned_u64 sbuf_addr; __u32 rbuf_size; __u32 sbuf_size; - __u64 qp_addr; + __aligned_u64 qp_addr; }; /* PVRDMA masked atomic compare and swap */ struct pvrdma_ex_cmp_swap { - __u64 swap_val; - __u64 compare_val; - __u64 swap_mask; - __u64 compare_mask; + __aligned_u64 swap_val; + __aligned_u64 compare_val; + __aligned_u64 swap_mask; + __aligned_u64 compare_mask; }; /* PVRDMA masked atomic fetch and add */ struct pvrdma_ex_fetch_add { - __u64 add_val; - __u64 field_boundary; + __aligned_u64 add_val; + __aligned_u64 field_boundary; }; /* PVRDMA address vector. */ @@ -207,14 +207,14 @@ struct pvrdma_av { /* PVRDMA scatter/gather entry */ struct pvrdma_sge { - __u64 addr; + __aligned_u64 addr; __u32 length; __u32 lkey; }; /* PVRDMA receive queue work request */ struct pvrdma_rq_wqe_hdr { - __u64 wr_id; /* wr id */ + __aligned_u64 wr_id; /* wr id */ __u32 num_sge; /* size of s/g array */ __u32 total_len; /* reserved */ }; @@ -222,7 +222,7 @@ struct pvrdma_rq_wqe_hdr { /* PVRDMA send queue work request */ struct pvrdma_sq_wqe_hdr { - __u64 wr_id; /* wr id */ + __aligned_u64 wr_id; /* wr id */ __u32 num_sge; /* size of s/g array */ __u32 total_len; /* reserved */ __u32 opcode; /* operation type */ @@ -234,19 +234,19 @@ struct pvrdma_sq_wqe_hdr { __u32 reserved; union { struct { - __u64 remote_addr; + __aligned_u64 remote_addr; __u32 rkey; __u8 reserved[4]; } rdma; struct { - __u64 remote_addr; - __u64 compare_add; - __u64 swap; + __aligned_u64 remote_addr; + __aligned_u64 compare_add; + __aligned_u64 swap; __u32 rkey; __u32 reserved; } atomic; struct { - __u64 remote_addr; + __aligned_u64 remote_addr; __u32 log_arg_sz; __u32 rkey; union { @@ -255,8 +255,8 @@ struct pvrdma_sq_wqe_hdr { } wr_data; } masked_atomics; struct { - __u64 iova_start; - __u64 pl_pdir_dma; + __aligned_u64 iova_start; + __aligned_u64 pl_pdir_dma; __u32 page_shift; __u32 page_list_len; __u32 length; @@ -275,8 +275,8 @@ struct pvrdma_sq_wqe_hdr { /* Completion queue element. */ struct pvrdma_cqe { - __u64 wr_id; - __u64 qp; + __aligned_u64 wr_id; + __aligned_u64 qp; __u32 opcode; __u32 status; __u32 byte_len; -- 2.16.1 -- To unsubscribe from this list: send the line "unsubscribe linux-rdma" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html