Re: [PATCH] crypto: skcipher - make chunksize and walksize accessors internal

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, 3 Jun 2019 at 07:46, Eric Biggers <ebiggers@xxxxxxxxxx> wrote:
>
> From: Eric Biggers <ebiggers@xxxxxxxxxx>
>
> The 'chunksize' and 'walksize' properties of skcipher algorithms are
> implementation details that users of the skcipher API should not be
> looking at.  So move their accessor functions from <crypto/skcipher.h>
> to <crypto/internal/skcipher.h>.
>
> Signed-off-by: Eric Biggers <ebiggers@xxxxxxxxxx>

Acked-by: Ard Biesheuvel <ard.biesheuvel@xxxxxxxxxx>

> ---
>  include/crypto/internal/skcipher.h | 60 ++++++++++++++++++++++++++++++
>  include/crypto/skcipher.h          | 60 ------------------------------
>  2 files changed, 60 insertions(+), 60 deletions(-)
>
> diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
> index 9de6032209cb1..abb1096495c2f 100644
> --- a/include/crypto/internal/skcipher.h
> +++ b/include/crypto/internal/skcipher.h
> @@ -205,6 +205,66 @@ static inline unsigned int crypto_skcipher_alg_max_keysize(
>         return alg->max_keysize;
>  }
>
> +static inline unsigned int crypto_skcipher_alg_chunksize(
> +       struct skcipher_alg *alg)
> +{
> +       if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
> +           CRYPTO_ALG_TYPE_BLKCIPHER)
> +               return alg->base.cra_blocksize;
> +
> +       if (alg->base.cra_ablkcipher.encrypt)
> +               return alg->base.cra_blocksize;
> +
> +       return alg->chunksize;
> +}
> +
> +static inline unsigned int crypto_skcipher_alg_walksize(
> +       struct skcipher_alg *alg)
> +{
> +       if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
> +           CRYPTO_ALG_TYPE_BLKCIPHER)
> +               return alg->base.cra_blocksize;
> +
> +       if (alg->base.cra_ablkcipher.encrypt)
> +               return alg->base.cra_blocksize;
> +
> +       return alg->walksize;
> +}
> +
> +/**
> + * crypto_skcipher_chunksize() - obtain chunk size
> + * @tfm: cipher handle
> + *
> + * The block size is set to one for ciphers such as CTR.  However,
> + * you still need to provide incremental updates in multiples of
> + * the underlying block size as the IV does not have sub-block
> + * granularity.  This is known in this API as the chunk size.
> + *
> + * Return: chunk size in bytes
> + */
> +static inline unsigned int crypto_skcipher_chunksize(
> +       struct crypto_skcipher *tfm)
> +{
> +       return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
> +}
> +
> +/**
> + * crypto_skcipher_walksize() - obtain walk size
> + * @tfm: cipher handle
> + *
> + * In some cases, algorithms can only perform optimally when operating on
> + * multiple blocks in parallel. This is reflected by the walksize, which
> + * must be a multiple of the chunksize (or equal if the concern does not
> + * apply)
> + *
> + * Return: walk size in bytes
> + */
> +static inline unsigned int crypto_skcipher_walksize(
> +       struct crypto_skcipher *tfm)
> +{
> +       return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm));
> +}
> +
>  /* Helpers for simple block cipher modes of operation */
>  struct skcipher_ctx_simple {
>         struct crypto_cipher *cipher;   /* underlying block cipher */
> diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
> index 98547d1f18c53..694397fb0faab 100644
> --- a/include/crypto/skcipher.h
> +++ b/include/crypto/skcipher.h
> @@ -293,66 +293,6 @@ static inline unsigned int crypto_sync_skcipher_ivsize(
>         return crypto_skcipher_ivsize(&tfm->base);
>  }
>
> -static inline unsigned int crypto_skcipher_alg_chunksize(
> -       struct skcipher_alg *alg)
> -{
> -       if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
> -           CRYPTO_ALG_TYPE_BLKCIPHER)
> -               return alg->base.cra_blocksize;
> -
> -       if (alg->base.cra_ablkcipher.encrypt)
> -               return alg->base.cra_blocksize;
> -
> -       return alg->chunksize;
> -}
> -
> -static inline unsigned int crypto_skcipher_alg_walksize(
> -       struct skcipher_alg *alg)
> -{
> -       if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
> -           CRYPTO_ALG_TYPE_BLKCIPHER)
> -               return alg->base.cra_blocksize;
> -
> -       if (alg->base.cra_ablkcipher.encrypt)
> -               return alg->base.cra_blocksize;
> -
> -       return alg->walksize;
> -}
> -
> -/**
> - * crypto_skcipher_chunksize() - obtain chunk size
> - * @tfm: cipher handle
> - *
> - * The block size is set to one for ciphers such as CTR.  However,
> - * you still need to provide incremental updates in multiples of
> - * the underlying block size as the IV does not have sub-block
> - * granularity.  This is known in this API as the chunk size.
> - *
> - * Return: chunk size in bytes
> - */
> -static inline unsigned int crypto_skcipher_chunksize(
> -       struct crypto_skcipher *tfm)
> -{
> -       return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
> -}
> -
> -/**
> - * crypto_skcipher_walksize() - obtain walk size
> - * @tfm: cipher handle
> - *
> - * In some cases, algorithms can only perform optimally when operating on
> - * multiple blocks in parallel. This is reflected by the walksize, which
> - * must be a multiple of the chunksize (or equal if the concern does not
> - * apply)
> - *
> - * Return: walk size in bytes
> - */
> -static inline unsigned int crypto_skcipher_walksize(
> -       struct crypto_skcipher *tfm)
> -{
> -       return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm));
> -}
> -
>  /**
>   * crypto_skcipher_blocksize() - obtain block size of cipher
>   * @tfm: cipher handle
> --
> 2.21.0
>



[Index of Archives]     [Kernel]     [Gnu Classpath]     [Gnu Crypto]     [DM Crypt]     [Netfilter]     [Bugtraq]

  Powered by Linux