While at it, conform to kernel spacing (i.e. no space) after cast. No functional changes. Reviewed-by: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx> Signed-off-by: Jani Nikula <jani.nikula@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_fixed.h | 61 +++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/i915_fixed.h index c974e51c6d8b..6c914940b4a9 100644 --- a/drivers/gpu/drm/i915/i915_fixed.h +++ b/drivers/gpu/drm/i915/i915_fixed.h @@ -7,7 +7,7 @@ #define _I915_FIXED_H_ typedef struct { - uint32_t val; + u32 val; } uint_fixed_16_16_t; #define FP_16_16_MAX ({ \ @@ -23,7 +23,7 @@ static inline bool is_fixed16_zero(uint_fixed_16_16_t val) return false; } -static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val) +static inline uint_fixed_16_16_t u32_to_fixed16(u32 val) { uint_fixed_16_16_t fp; @@ -33,12 +33,12 @@ static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val) return fp; } -static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp) +static inline u32 fixed16_to_u32_round_up(uint_fixed_16_16_t fp) { return DIV_ROUND_UP(fp.val, 1 << 16); } -static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp) +static inline u32 fixed16_to_u32(uint_fixed_16_16_t fp) { return fp.val >> 16; } @@ -61,86 +61,83 @@ static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1, return max; } -static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val) +static inline uint_fixed_16_16_t clamp_u64_to_fixed16(u64 val) { uint_fixed_16_16_t fp; WARN_ON(val > U32_MAX); - fp.val = (uint32_t) val; + fp.val = (u32)val; return fp; } -static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val, - uint_fixed_16_16_t d) +static inline u32 div_round_up_fixed16(uint_fixed_16_16_t val, + uint_fixed_16_16_t d) { return DIV_ROUND_UP(val.val, d.val); } -static inline uint32_t mul_round_up_u32_fixed16(uint32_t val, - uint_fixed_16_16_t mul) +static inline u32 mul_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t mul) { - uint64_t intermediate_val; + u64 intermediate_val; - intermediate_val = (uint64_t) val * mul.val; + intermediate_val = (u64)val * mul.val; intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16); WARN_ON(intermediate_val > U32_MAX); - return (uint32_t) intermediate_val; + return (u32)intermediate_val; } static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val, uint_fixed_16_16_t mul) { - uint64_t intermediate_val; + u64 intermediate_val; - intermediate_val = (uint64_t) val.val * mul.val; + intermediate_val = (u64)val.val * mul.val; intermediate_val = intermediate_val >> 16; return clamp_u64_to_fixed16(intermediate_val); } -static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d) +static inline uint_fixed_16_16_t div_fixed16(u32 val, u32 d) { - uint64_t interm_val; + u64 interm_val; - interm_val = (uint64_t)val << 16; + interm_val = (u64)val << 16; interm_val = DIV_ROUND_UP_ULL(interm_val, d); return clamp_u64_to_fixed16(interm_val); } -static inline uint32_t div_round_up_u32_fixed16(uint32_t val, - uint_fixed_16_16_t d) +static inline u32 div_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t d) { - uint64_t interm_val; + u64 interm_val; - interm_val = (uint64_t)val << 16; + interm_val = (u64)val << 16; interm_val = DIV_ROUND_UP_ULL(interm_val, d.val); WARN_ON(interm_val > U32_MAX); - return (uint32_t) interm_val; + return (u32)interm_val; } -static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val, - uint_fixed_16_16_t mul) +static inline uint_fixed_16_16_t mul_u32_fixed16(u32 val, uint_fixed_16_16_t mul) { - uint64_t intermediate_val; + u64 intermediate_val; - intermediate_val = (uint64_t) val * mul.val; + intermediate_val = (u64)val * mul.val; return clamp_u64_to_fixed16(intermediate_val); } static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1, uint_fixed_16_16_t add2) { - uint64_t interm_sum; + u64 interm_sum; - interm_sum = (uint64_t) add1.val + add2.val; + interm_sum = (u64)add1.val + add2.val; return clamp_u64_to_fixed16(interm_sum); } static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1, - uint32_t add2) + u32 add2) { - uint64_t interm_sum; + u64 interm_sum; uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2); - interm_sum = (uint64_t) add1.val + interm_add2.val; + interm_sum = (u64)add1.val + interm_add2.val; return clamp_u64_to_fixed16(interm_sum); } -- 2.11.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx