Re: [PATCH 08/11] crypto: qat - add rate limiting feature to qat_4xxx

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 2023-10-16 at 13:53:14 +0300, Tero Kristo wrote:
> Hello,
> 
> On 11/10/2023 15:15, Muszynski, Damian wrote:
> > The Rate Limiting (RL) feature allows to control the rate of requests
> > that can be submitted on a ring pair (RP). This allows sharing a QAT
> > device among multiple users while ensuring a guaranteed throughput.

...

> > +static int validate_user_input(struct adf_accel_dev *accel_dev,
> > +			       struct adf_rl_sla_input_data *sla_in,
> > +			       bool is_update)
> > +{
> > +	const unsigned long rp_mask = sla_in->rp_mask;
> > +	size_t rp_mask_size;
> > +	int i, cnt;
> > +
> > +	if (sla_in->sla_id < RL_SLA_EMPTY_ID || sla_in->sla_id >= RL_NODES_CNT_MAX) {
> 
> Should this be <= RL_SLA_EMPTY_ID? Additionally this check seems unnecessary
> as similar check is done in the validate_sla_id() which gets always called
> in the same path where the validate_user_input() is called.

You're right this check can be safely eliminated.

> 
> > +		dev_notice(&GET_DEV(accel_dev), "Wrong SLA ID\n");
> > +		goto ret_inval;
> > +	}
> > +
> > +	if (sla_in->pir < sla_in->cir) {
> > +		dev_notice(&GET_DEV(accel_dev),
> > +			   "PIR must be >= CIR, setting PIR to CIR\n");
> > +		sla_in->pir = sla_in->cir;
> > +	}
> > +
> > +	if (!is_update) {
> > +		cnt = 0;
> > +		rp_mask_size = sizeof(sla_in->rp_mask) * BITS_PER_BYTE;
> > +		for_each_set_bit(i, &rp_mask, rp_mask_size) {
> > +			if (++cnt > RL_RP_CNT_PER_LEAF_MAX) {
> > +				dev_notice(&GET_DEV(accel_dev),
> > +					   "Too many ring pairs selected for this SLA\n");
> > +				goto ret_inval;
> 
> The error gotos seem useless in this function, maybe just directly return
> -EINVAL.

Ok, will change that.

> 
> > +			}
> > +		}
> > +
> > +		if (sla_in->srv >= ADF_SVC_NONE) {
> > +			dev_notice(&GET_DEV(accel_dev),
> > +				   "Wrong service type\n");
> > +			goto ret_inval;
> > +		}
> > +
> > +		if (sla_in->type > RL_LEAF) {
> > +			dev_notice(&GET_DEV(accel_dev),
> > +				   "Wrong node type\n");
> > +			goto ret_inval;
> > +		}
> > +
> > +		if (sla_in->parent_id < RL_PARENT_DEFAULT_ID ||
> > +		    sla_in->parent_id >= RL_NODES_CNT_MAX) {
> > +			dev_notice(&GET_DEV(accel_dev),
> > +				   "Wrong parent ID\n");
> > +			goto ret_inval;
> > +		}
> > +	}
> > +
> > +	return 0;
> > +
> > +ret_inval:
> > +	return -EINVAL;
> > +}
> > +
> > +static int validate_sla_id(struct adf_accel_dev *accel_dev, int sla_id)
> > +{
> > +	struct rl_sla *sla;
> > +
> > +	if (sla_id < 0 || sla_id >= RL_NODES_CNT_MAX)
> > +		goto ret_not_exists;
> > +
> > +	sla = accel_dev->rate_limiting->sla[sla_id];
> > +
> > +	if (!sla)
> > +		goto ret_not_exists;
> > +
> > +	if (sla->type != RL_LEAF) {
> > +		dev_notice(&GET_DEV(accel_dev),
> > +			   "This ID is reserved for internal use\n");
> > +		goto ret_inval;
> 
> Maybe just direct return here.

Done.

...

> > +static void update_budget(struct rl_sla *sla, u32 old_cir, bool is_update)
> > +{
> > +	u32 new_rem;
> > +
> > +	switch (sla->type) {
> > +	case RL_LEAF:
> > +		if (is_update)
> > +			sla->parent->rem_cir += old_cir;
> > +
> > +		sla->parent->rem_cir -= sla->cir;
> > +		sla->rem_cir = 0;
> > +		break;
> > +	case RL_CLUSTER:
> > +		if (is_update) {
> > +			sla->parent->rem_cir += old_cir;
> > +			new_rem = sla->cir - (old_cir - sla->rem_cir);
> > +			sla->rem_cir = new_rem;
> 
> Get rid of new_rem and directly assign.

Ok.

> 
> > +		} else {
> > +			sla->rem_cir = sla->cir;
> > +		}
> > +
> > +		sla->parent->rem_cir -= sla->cir;
> > +		break;
> > +	case RL_ROOT:
> > +		if (is_update) {
> > +			new_rem = sla->cir - (old_cir - sla->rem_cir);
> > +			sla->rem_cir = new_rem;
> 
> Same here. You avoid one local variable that way.

Ok.

...

> > +static int add_new_sla_entry(struct adf_accel_dev *accel_dev,
> > +			     struct adf_rl_sla_input_data *sla_in,
> > +			     struct rl_sla **sla_out)
> > +{
> > +	struct adf_rl *rl_data = accel_dev->rate_limiting;
> > +	struct rl_sla *sla;
> > +	int ret = 0;
> > +
> > +	sla = kzalloc(sizeof(*sla), GFP_KERNEL);
> 
> Why not use devm_kzalloc()? You have access to the device handle.

Our driver allows user to put a device 'down' and 'up'. During that procedure 
many components needs to be unloaded and frees it's memory. This component also 
belongs to that group. So devm_* is pointless here.

...

> > +	if (sla->type == RL_LEAF) {
> > +		ret = prepare_rp_ids(accel_dev, sla, sla_in->rp_mask);
> > +		if (!sla->ring_pairs_cnt || ret) {
> > +			dev_notice(&GET_DEV(accel_dev),
> > +				   "Unable to find ring pairs to assign to the leaf");
> > +			if (!ret)
> > +				ret = -EINVAL;
> > +
> > +			goto ret_err;
> > +		}
> > +	}
> > +
> > +	ret = 0;
> > +
> > +ret_err:
> > +	/* Allocated sla will be freed at the bottom of calling function */
> 
> I would rather recommend you release the sla here if the function fails.

Ok, will add.

...

> > +
> > +		ret = adf_rl_add_sla(accel_dev, &sla_in);
> > +		if (ret)
> > +			goto err_ret;
> > +	}
> > +
> > +	return 0;
> > +
> > +err_ret:
> > +	dev_notice(&GET_DEV(accel_dev),
> > +		   "Initialization of default nodes failed\n");
> 
> dev_notice() here and you also do dev_err() for the exact same reason in the
> callee (adf_rl_start().) You can make this function cleaner by removing the
> dev_notice here, and directly returning in error cases without goto.

Thanks. Will fix that.

...

> > +int adf_rl_remove_sla(struct adf_accel_dev *accel_dev, u32 sla_id)
> > +{
> > +	struct adf_rl *rl_data = accel_dev->rate_limiting;
> > +	struct rl_sla *sla;
> > +	int ret;
> > +
> > +	ret = validate_sla_id(accel_dev, sla_id);
> > +	if (ret)
> > +		return ret;
> > +
> > +	sla = rl_data->sla[sla_id];
> > +
> > +	if (sla->type < RL_LEAF && sla->rem_cir != sla->cir) {
> > +		dev_notice(&GET_DEV(accel_dev),
> > +			   "To remove parent SLA all its children must be removed first");
> > +		return -EINVAL;
> > +	}
> 
> Mutex is not protecting the above portion of code, which means you can call
> clear_sla() with identical pointer value multiple times.

Thanks. Will fix that.

> > +
> > +	mutex_lock(&rl_data->rl_lock);
> > +	clear_sla(rl_data, sla);
> > +	mutex_unlock(&rl_data->rl_lock);
> > +
> > +	return 0;
> > +}

...

> > +	rl = kzalloc(sizeof(*rl), GFP_KERNEL);
> 
> devm_kzalloc()?

No, explanation above.

...


> > +int adf_rl_send_admin_init_msg(struct adf_accel_dev *accel_dev,
> > +			       struct rl_slice_cnt *slices_int)
> > +{
> > +	struct icp_qat_fw_init_admin_slice_cnt slices_resp = { };
> > +	int ret;
> > +
> > +	ret = adf_send_admin_rl_init(accel_dev, &slices_resp);
> > +	if (ret) > +		goto err_ret;
> 
> Just return ret.

Ok.


---
Damian Muszynski



[Index of Archives]     [Kernel]     [Gnu Classpath]     [Gnu Crypto]     [DM Crypt]     [Netfilter]     [Bugtraq]
  Powered by Linux