Subject: Re: [PATCH V3 04/25] smartpqi: add support for raid5 and raid6 writes Stg s> > struct pqi_raid_path_request { > struct pqi_iu_header header; > @@ -312,6 +313,39 @@ struct pqi_aio_path_request { > sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS]; > }; > > +#define PQI_RAID56_XFER_LIMIT_4K 0x1000 /* 4Kib */ > +#define PQI_RAID56_XFER_LIMIT_8K 0x2000 /* 8Kib */ You don't seem to use these, and you'll remove them again in patch 06/25. Don: Removed these definitions. > diff --git a/drivers/scsi/smartpqi/smartpqi_init.c > b/drivers/scsi/smartpqi/smartpqi_init.c > index 6bcb037ae9d7..c813cec10003 100644 > --- a/drivers/scsi/smartpqi/smartpqi_init.c > +++ b/drivers/scsi/smartpqi/smartpqi_init.c > @@ -2245,13 +2250,14 @@ static bool > pqi_aio_raid_level_supported(struct pqi_scsi_dev_raid_map_data *rmd) > case SA_RAID_0: > break; > case SA_RAID_1: > - if (rmd->is_write) > - is_supported = false; > + is_supported = false; You disable RAID1 READs with this patch. I can see you fix it again in 05/25, still it looks wrong. Don: Corrected > break; > case SA_RAID_5: > - fallthrough; > + if (rmd->is_write && !ctrl_info->enable_r5_writes) > + is_supported = false; > + break; > case SA_RAID_6: > - if (rmd->is_write) > + if (rmd->is_write && !ctrl_info->enable_r6_writes) > is_supported = false; > break; > case SA_RAID_ADM: > @@ -2526,6 +2532,26 @@ static int pqi_calc_aio_r5_or_r6(struct > pqi_scsi_dev_raid_map_data *rmd, > rmd->total_disks_per_row)) + > (rmd->map_row * rmd->total_disks_per_row) + rmd- > > first_column; > > + if (rmd->is_write) { > + rmd->p_index = (rmd->map_row * rmd- > > total_disks_per_row) + rmd->data_disks_per_row; > + rmd->p_parity_it_nexus = raid_map->disk_data[rmd- > > p_index].aio_handle; I suppose you have made sure rmd->p_index can't be larger than the size of raid_map->disk_data. A comment explaining that would be helpful for the reader though. Don: Added a comment for p_index. > + if (rmd->raid_level == SA_RAID_6) { > + rmd->q_index = (rmd->map_row * rmd- > > total_disks_per_row) + > + (rmd->data_disks_per_row + 1); > + rmd->q_parity_it_nexus = raid_map- > > disk_data[rmd->q_index].aio_handle; > + rmd->xor_mult = raid_map->disk_data[rmd- > > map_index].xor_mult[1]; See above. Don: Comment updated to include q_index. > + } > + if (rmd->blocks_per_row == 0) > + return PQI_RAID_BYPASS_INELIGIBLE; #if > +BITS_PER_LONG == 32 > + tmpdiv = rmd->first_block; > + do_div(tmpdiv, rmd->blocks_per_row); > + rmd->row = tmpdiv; > +#else > + rmd->row = rmd->first_block / rmd->blocks_per_row; > +#endif Why not always use do_div()? Don: I had removed the BITS_PER_LONG check, was an attempt to clean up the code, but forgot we still need to support 32bit and I just re-added BITS_PER_LONG HUNKS. These HUNKS were there before I refactored the code so it predates me. Any chance I can leave this in? It's been through a lot of regression testing already... > @@ -4844,6 +4889,12 @@ static void > pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) > PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / > sizeof(struct pqi_sg_descriptor)) + > PQI_MAX_EMBEDDED_SG_DESCRIPTORS; > + > + ctrl_info->max_sg_per_r56_iu = > + ((ctrl_info->max_inbound_iu_length - > + PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / > + sizeof(struct pqi_sg_descriptor)) + > + PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS; > } > > static inline void pqi_set_sg_descriptor( @@ -4931,6 +4982,44 @@ > static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, > return 0; > } > > +static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info > *ctrl_info, > + struct pqi_aio_r56_path_request *request, struct scsi_cmnd > *scmd, > + struct pqi_io_request *io_request) { > + u16 iu_length; > + int sg_count; > + bool chained; > + unsigned int num_sg_in_iu; > + struct scatterlist *sg; > + struct pqi_sg_descriptor *sg_descriptor; > + > + sg_count = scsi_dma_map(scmd); > + if (sg_count < 0) > + return sg_count; > + > + iu_length = offsetof(struct pqi_aio_r56_path_request, > sg_descriptors) - > + PQI_REQUEST_HEADER_LENGTH; > + num_sg_in_iu = 0; > + > + if (sg_count == 0) > + goto out; An if {} block would be better readable here. Don> done. > } > > +static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info > *ctrl_info, > + struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, > + struct pqi_encryption_info *encryption_info, struct > pqi_scsi_dev *device, > + struct pqi_scsi_dev_raid_map_data *rmd) { > + > + switch (scmd->sc_data_direction) { > + case DMA_TO_DEVICE: > + r56_request->data_direction = SOP_READ_FLAG; > + break; I wonder how it would be possible that sc_data_direction is anything else but DMA_TO_DEVICE here. AFAICS we will only reach this code for WRITE commands. Add a comment, please. Don: Great observation, removed switch block and added a comment. Set direction to write. > +static ssize_t pqi_host_enable_r5_writes_show(struct device *dev, > + struct device_attribute *attr, char *buffer) { > + struct Scsi_Host *shost = class_to_shost(dev); > + struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); > + > + return scnprintf(buffer, 10, "%hhx\n", ctrl_info- > > enable_r5_writes); "%hhx" is deprecated, see https://lore.kernel.org/lkml/20190914015858.7c76e036@xxxxxxx/T/ Don: done > +static ssize_t pqi_host_enable_r6_writes_show(struct device *dev, > + struct device_attribute *attr, char *buffer) { > + struct Scsi_Host *shost = class_to_shost(dev); > + struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); > + > + return scnprintf(buffer, 10, "%hhx\n", ctrl_info- > > enable_r6_writes); See above Don: done Don: Thanks for your all of your great effort on this patch. I'll upload a V4 with updates to this patch and the rest of your reviews soon. Thanks, Don Brace