diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst index 77db10e944f0..b42fea07c5ce 100644 --- a/Documentation/arch/arm64/silicon-errata.rst +++ b/Documentation/arch/arm64/silicon-errata.rst @@ -255,8 +255,9 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | Hisilicon | Hip08 SMMU PMCG | #162001800 | N/A | +----------------+-----------------+-----------------+-----------------------------+ -| Hisilicon | Hip{08,09,10,10C| #162001900 | N/A | -| | ,11} SMMU PMCG | | | +| Hisilicon | Hip{08,09,09A,10| #162001900 | N/A | +| | ,10C,11} | | | +| | SMMU PMCG | | | +----------------+-----------------+-----------------+-----------------------------+ | Hisilicon | Hip09 | #162100801 | HISILICON_ERRATUM_162100801 | +----------------+-----------------+-----------------+-----------------------------+ diff --git a/Documentation/devicetree/bindings/sound/realtek,rt5645.yaml b/Documentation/devicetree/bindings/sound/realtek,rt5645.yaml index 13f09f1bc800..0a698798c22b 100644 --- a/Documentation/devicetree/bindings/sound/realtek,rt5645.yaml +++ b/Documentation/devicetree/bindings/sound/realtek,rt5645.yaml @@ -51,7 +51,7 @@ properties: description: Power supply for AVDD, providing 1.8V. cpvdd-supply: - description: Power supply for CPVDD, providing 3.5V. + description: Power supply for CPVDD, providing 1.8V. hp-detect-gpios: description: diff --git a/Makefile b/Makefile index 685a57f6c8d2..8a10105c2539 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 12 -SUBLEVEL = 7 +SUBLEVEL = 8 EXTRAVERSION = NAME = Baby Opossum Posse diff --git a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi index 6e5a984c1d4e..26a29e5e5078 100644 --- a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi +++ b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi @@ -67,7 +67,7 @@ cpu0: cpu@0 { l2_cache_l0: l2-cache-l0 { compatible = "cache"; cache-size = <0x80000>; - cache-line-size = <128>; + cache-line-size = <64>; cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set cache-level = <2>; cache-unified; @@ -91,7 +91,7 @@ cpu1: cpu@1 { l2_cache_l1: l2-cache-l1 { compatible = "cache"; cache-size = <0x80000>; - cache-line-size = <128>; + cache-line-size = <64>; cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set cache-level = <2>; cache-unified; @@ -115,7 +115,7 @@ cpu2: cpu@2 { l2_cache_l2: l2-cache-l2 { compatible = "cache"; cache-size = <0x80000>; - cache-line-size = <128>; + cache-line-size = <64>; cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set cache-level = <2>; cache-unified; @@ -139,7 +139,7 @@ cpu3: cpu@3 { l2_cache_l3: l2-cache-l3 { compatible = "cache"; cache-size = <0x80000>; - cache-line-size = <128>; + cache-line-size = <64>; cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set cache-level = <2>; cache-unified; diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h index 944482063f14..3089785ca97e 100644 --- a/arch/loongarch/include/asm/inst.h +++ b/arch/loongarch/include/asm/inst.h @@ -683,7 +683,17 @@ DEF_EMIT_REG2I16_FORMAT(blt, blt_op) DEF_EMIT_REG2I16_FORMAT(bge, bge_op) DEF_EMIT_REG2I16_FORMAT(bltu, bltu_op) DEF_EMIT_REG2I16_FORMAT(bgeu, bgeu_op) -DEF_EMIT_REG2I16_FORMAT(jirl, jirl_op) + +static inline void emit_jirl(union loongarch_instruction *insn, + enum loongarch_gpr rd, + enum loongarch_gpr rj, + int offset) +{ + insn->reg2i16_format.opcode = jirl_op; + insn->reg2i16_format.immediate = offset; + insn->reg2i16_format.rd = rd; + insn->reg2i16_format.rj = rj; +} #define DEF_EMIT_REG2BSTRD_FORMAT(NAME, OP) \ static inline void emit_##NAME(union loongarch_instruction *insn, \ diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c index 2bf86aeda874..de21e72759ee 100644 --- a/arch/loongarch/kernel/efi.c +++ b/arch/loongarch/kernel/efi.c @@ -95,7 +95,7 @@ static void __init init_screen_info(void) memset(si, 0, sizeof(*si)); early_memunmap(si, sizeof(*si)); - memblock_reserve(screen_info.lfb_base, screen_info.lfb_size); + memblock_reserve(__screen_info_lfb_base(&screen_info), screen_info.lfb_size); } void __init efi_init(void) diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c index 3050329556d1..14d7d700bcb9 100644 --- a/arch/loongarch/kernel/inst.c +++ b/arch/loongarch/kernel/inst.c @@ -332,7 +332,7 @@ u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm) return INSN_BREAK; } - emit_jirl(&insn, rj, rd, imm >> 2); + emit_jirl(&insn, rd, rj, imm >> 2); return insn.word; } diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index dd350cba1252..ea357a3edc09 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -181,13 +181,13 @@ static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call) /* Set return value */ emit_insn(ctx, addiw, LOONGARCH_GPR_A0, regmap[BPF_REG_0], 0); /* Return to the caller */ - emit_insn(ctx, jirl, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0); + emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0); } else { /* * Call the next bpf prog and skip the first instruction * of TCC initialization. */ - emit_insn(ctx, jirl, LOONGARCH_GPR_T3, LOONGARCH_GPR_ZERO, 1); + emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T3, 1); } } @@ -904,7 +904,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext return ret; move_addr(ctx, t1, func_addr); - emit_insn(ctx, jirl, t1, LOONGARCH_GPR_RA, 0); + emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0); move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0); break; diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c index f381b177ea06..0b6365d85d11 100644 --- a/arch/powerpc/platforms/book3s/vas-api.c +++ b/arch/powerpc/platforms/book3s/vas-api.c @@ -464,7 +464,43 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } +/* + * During mmap() paste address, mapping VMA is saved in VAS window + * struct which is used to unmap during migration if the window is + * still open. But the user space can remove this mapping with + * munmap() before closing the window and the VMA address will + * be invalid. Set VAS window VMA to NULL in this function which + * is called before VMA free. + */ +static void vas_mmap_close(struct vm_area_struct *vma) +{ + struct file *fp = vma->vm_file; + struct coproc_instance *cp_inst = fp->private_data; + struct vas_window *txwin; + + /* Should not happen */ + if (!cp_inst || !cp_inst->txwin) { + pr_err("No attached VAS window for the paste address mmap\n"); + return; + } + + txwin = cp_inst->txwin; + /* + * task_ref.vma is set in coproc_mmap() during mmap paste + * address. So it has to be the same VMA that is getting freed. + */ + if (WARN_ON(txwin->task_ref.vma != vma)) { + pr_err("Invalid paste address mmaping\n"); + return; + } + + mutex_lock(&txwin->task_ref.mmap_mutex); + txwin->task_ref.vma = NULL; + mutex_unlock(&txwin->task_ref.mmap_mutex); +} + static const struct vm_operations_struct vas_vm_ops = { + .close = vas_mmap_close, .fault = vas_mmap_fault, }; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index d879478db3f5..28b4312f2563 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -429,6 +429,16 @@ static struct event_constraint intel_lnc_event_constraints[] = { EVENT_CONSTRAINT_END }; +static struct extra_reg intel_lnc_extra_regs[] __read_mostly = { + INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0xfffffffffffull, RSP_0), + INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0xfffffffffffull, RSP_1), + INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), + INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE), + INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE), + INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0xf, FE), + INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE), + EVENT_EXTRA_END +}; EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3"); EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3"); @@ -6344,7 +6354,7 @@ static __always_inline void intel_pmu_init_lnc(struct pmu *pmu) intel_pmu_init_glc(pmu); hybrid(pmu, event_constraints) = intel_lnc_event_constraints; hybrid(pmu, pebs_constraints) = intel_lnc_pebs_event_constraints; - hybrid(pmu, extra_regs) = intel_rwc_extra_regs; + hybrid(pmu, extra_regs) = intel_lnc_extra_regs; } static __always_inline void intel_pmu_init_skt(struct pmu *pmu) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 6188650707ab..19a9fd974e3e 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -2496,6 +2496,7 @@ void __init intel_ds_init(void) x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME; break; + case 6: case 5: x86_pmu.pebs_ept = 1; fallthrough; diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index d98fac567684..e7aba7349231 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -1910,6 +1910,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &adl_uncore_init), X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &gnr_uncore_init), X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &gnr_uncore_init), + X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &gnr_uncore_init), {}, }; MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match); diff --git a/arch/x86/kernel/cet.c b/arch/x86/kernel/cet.c index d2c732a34e5d..303bf74d175b 100644 --- a/arch/x86/kernel/cet.c +++ b/arch/x86/kernel/cet.c @@ -81,6 +81,34 @@ static void do_user_cp_fault(struct pt_regs *regs, unsigned long error_code) static __ro_after_init bool ibt_fatal = true; +/* + * By definition, all missing-ENDBRANCH #CPs are a result of WFE && !ENDBR. + * + * For the kernel IBT no ENDBR selftest where #CPs are deliberately triggered, + * the WFE state of the interrupted context needs to be cleared to let execution + * continue. Otherwise when the CPU resumes from the instruction that just + * caused the previous #CP, another missing-ENDBRANCH #CP is raised and the CPU + * enters a dead loop. + * + * This is not a problem with IDT because it doesn't preserve WFE and IRET doesn't + * set WFE. But FRED provides space on the entry stack (in an expanded CS area) + * to save and restore the WFE state, thus the WFE state is no longer clobbered, + * so software must clear it. + */ +static void ibt_clear_fred_wfe(struct pt_regs *regs) +{ + /* + * No need to do any FRED checks. + * + * For IDT event delivery, the high-order 48 bits of CS are pushed + * as 0s into the stack, and later IRET ignores these bits. + * + * For FRED, a test to check if fred_cs.wfe is set would be dropped + * by compilers. + */ + regs->fred_cs.wfe = 0; +} + static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code) { if ((error_code & CP_EC) != CP_ENDBR) { @@ -90,6 +118,7 @@ static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code) if (unlikely(regs->ip == (unsigned long)&ibt_selftest_noendbr)) { regs->ax = 0; + ibt_clear_fred_wfe(regs); return; } @@ -97,6 +126,7 @@ static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code) if (!ibt_fatal) { printk(KERN_DEFAULT CUT_HERE); __warn(__FILE__, __LINE__, (void *)regs->ip, TAINT_WARN, regs, NULL); + ibt_clear_fred_wfe(regs); return; } BUG(); diff --git a/block/blk-mq.c b/block/blk-mq.c index d5995021815d..4e76651e786d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3903,16 +3903,11 @@ static int blk_mq_init_hctx(struct request_queue *q, { hctx->queue_num = hctx_idx; - if (!(hctx->flags & BLK_MQ_F_STACKING)) - cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, - &hctx->cpuhp_online); - cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); - hctx->tags = set->tags[hctx_idx]; if (set->ops->init_hctx && set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) - goto unregister_cpu_notifier; + goto fail; if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, hctx->numa_node)) @@ -3921,6 +3916,11 @@ static int blk_mq_init_hctx(struct request_queue *q, if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL)) goto exit_flush_rq; + if (!(hctx->flags & BLK_MQ_F_STACKING)) + cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, + &hctx->cpuhp_online); + cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); + return 0; exit_flush_rq: @@ -3929,8 +3929,7 @@ static int blk_mq_init_hctx(struct request_queue *q, exit_hctx: if (set->ops->exit_hctx) set->ops->exit_hctx(hctx, hctx_idx); - unregister_cpu_notifier: - blk_mq_remove_cpuhp(hctx); + fail: return -1; } diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 4c745a26226b..bf3be532e089 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1703,6 +1703,8 @@ static struct acpi_platform_list pmcg_plat_info[] __initdata = { /* HiSilicon Hip09 Platform */ {"HISI ", "HIP09 ", 0, ACPI_SIG_IORT, greater_than_or_equal, "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09}, + {"HISI ", "HIP09A ", 0, ACPI_SIG_IORT, greater_than_or_equal, + "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09}, /* HiSilicon Hip10/11 Platform uses the same SMMU IP with Hip09 */ {"HISI ", "HIP10 ", 0, ACPI_SIG_IORT, greater_than_or_equal, "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09}, diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index e3e2afc2c83c..5962ea1230a1 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1063,13 +1063,13 @@ struct regmap *__regmap_init(struct device *dev, /* Sanity check */ if (range_cfg->range_max < range_cfg->range_min) { - dev_err(map->dev, "Invalid range %d: %d < %d\n", i, + dev_err(map->dev, "Invalid range %d: %u < %u\n", i, range_cfg->range_max, range_cfg->range_min); goto err_range; } if (range_cfg->range_max > map->max_register) { - dev_err(map->dev, "Invalid range %d: %d > %d\n", i, + dev_err(map->dev, "Invalid range %d: %u > %u\n", i, range_cfg->range_max, map->max_register); goto err_range; } diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 90bc605ff6c2..458ac54e7b20 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -1599,6 +1599,21 @@ static void ublk_unquiesce_dev(struct ublk_device *ub) blk_mq_kick_requeue_list(ub->ub_disk->queue); } +static struct gendisk *ublk_detach_disk(struct ublk_device *ub) +{ + struct gendisk *disk; + + /* Sync with ublk_abort_queue() by holding the lock */ + spin_lock(&ub->lock); + disk = ub->ub_disk; + ub->dev_info.state = UBLK_S_DEV_DEAD; + ub->dev_info.ublksrv_pid = -1; + ub->ub_disk = NULL; + spin_unlock(&ub->lock); + + return disk; +} + static void ublk_stop_dev(struct ublk_device *ub) { struct gendisk *disk; @@ -1612,14 +1627,7 @@ static void ublk_stop_dev(struct ublk_device *ub) ublk_unquiesce_dev(ub); } del_gendisk(ub->ub_disk); - - /* Sync with ublk_abort_queue() by holding the lock */ - spin_lock(&ub->lock); - disk = ub->ub_disk; - ub->dev_info.state = UBLK_S_DEV_DEAD; - ub->dev_info.ublksrv_pid = -1; - ub->ub_disk = NULL; - spin_unlock(&ub->lock); + disk = ublk_detach_disk(ub); put_disk(disk); unlock: mutex_unlock(&ub->mutex); @@ -2295,7 +2303,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd) out_put_cdev: if (ret) { - ub->dev_info.state = UBLK_S_DEV_DEAD; + ublk_detach_disk(ub); ublk_put_device(ub); } if (ret) diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 43c96b73a711..0e50b65e1dbf 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -1587,9 +1587,12 @@ static void virtblk_remove(struct virtio_device *vdev) static int virtblk_freeze(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; + struct request_queue *q = vblk->disk->queue; /* Ensure no requests in virtqueues before deleting vqs. */ - blk_mq_freeze_queue(vblk->disk->queue); + blk_mq_freeze_queue(q); + blk_mq_quiesce_queue_nowait(q); + blk_mq_unfreeze_queue(q); /* Ensure we don't receive any more interrupts */ virtio_reset_device(vdev); @@ -1613,8 +1616,8 @@ static int virtblk_restore(struct virtio_device *vdev) return ret; virtio_device_ready(vdev); + blk_mq_unquiesce_queue(vblk->disk->queue); - blk_mq_unfreeze_queue(vblk->disk->queue); return 0; } #endif diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 11755cb1eb16..0c85c981a833 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -870,6 +870,7 @@ struct btusb_data { int (*suspend)(struct hci_dev *hdev); int (*resume)(struct hci_dev *hdev); + int (*disconnect)(struct hci_dev *hdev); int oob_wake_irq; /* irq for out-of-band wake-on-bt */ unsigned cmd_timeout_cnt; @@ -2643,11 +2644,11 @@ static void btusb_mtk_claim_iso_intf(struct btusb_data *data) init_usb_anchor(&btmtk_data->isopkt_anchor); } -static void btusb_mtk_release_iso_intf(struct btusb_data *data) +static void btusb_mtk_release_iso_intf(struct hci_dev *hdev) { - struct btmtk_data *btmtk_data = hci_get_priv(data->hdev); + struct btmtk_data *btmtk_data = hci_get_priv(hdev); - if (btmtk_data->isopkt_intf) { + if (test_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags)) { usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor); clear_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags); @@ -2661,6 +2662,16 @@ static void btusb_mtk_release_iso_intf(struct btusb_data *data) clear_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags); } +static int btusb_mtk_disconnect(struct hci_dev *hdev) +{ + /* This function describes the specific additional steps taken by MediaTek + * when Bluetooth usb driver's resume function is called. + */ + btusb_mtk_release_iso_intf(hdev); + + return 0; +} + static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data) { struct btusb_data *data = hci_get_drvdata(hdev); @@ -2677,8 +2688,8 @@ static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data) if (err < 0) return err; - if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags)) - btusb_mtk_release_iso_intf(data); + /* Release MediaTek ISO data interface */ + btusb_mtk_release_iso_intf(hdev); btusb_stop_traffic(data); usb_kill_anchored_urbs(&data->tx_anchor); @@ -2723,22 +2734,24 @@ static int btusb_mtk_setup(struct hci_dev *hdev) btmtk_data->reset_sync = btusb_mtk_reset; /* Claim ISO data interface and endpoint */ - btmtk_data->isopkt_intf = usb_ifnum_to_if(data->udev, MTK_ISO_IFNUM); - if (btmtk_data->isopkt_intf) + if (!test_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags)) { + btmtk_data->isopkt_intf = usb_ifnum_to_if(data->udev, MTK_ISO_IFNUM); btusb_mtk_claim_iso_intf(data); + } return btmtk_usb_setup(hdev); } static int btusb_mtk_shutdown(struct hci_dev *hdev) { - struct btusb_data *data = hci_get_drvdata(hdev); - struct btmtk_data *btmtk_data = hci_get_priv(hdev); + int ret; - if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags)) - btusb_mtk_release_iso_intf(data); + ret = btmtk_usb_shutdown(hdev); - return btmtk_usb_shutdown(hdev); + /* Release MediaTek iso interface after shutdown */ + btusb_mtk_release_iso_intf(hdev); + + return ret; } #ifdef CONFIG_PM @@ -3850,6 +3863,7 @@ static int btusb_probe(struct usb_interface *intf, data->recv_acl = btmtk_usb_recv_acl; data->suspend = btmtk_usb_suspend; data->resume = btmtk_usb_resume; + data->disconnect = btusb_mtk_disconnect; } if (id->driver_info & BTUSB_SWAVE) { @@ -4040,6 +4054,9 @@ static void btusb_disconnect(struct usb_interface *intf) if (data->diag) usb_set_intfdata(data->diag, NULL); + if (data->disconnect) + data->disconnect(hdev); + hci_unregister_dev(hdev); if (intf == data->intf) { diff --git a/drivers/dma/amd/qdma/qdma.c b/drivers/dma/amd/qdma/qdma.c index b0a1f3ad851b..4761fa255015 100644 --- a/drivers/dma/amd/qdma/qdma.c +++ b/drivers/dma/amd/qdma/qdma.c @@ -7,9 +7,9 @@ #include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/dmaengine.h> +#include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/mod_devicetable.h> -#include <linux/dma-map-ops.h> #include <linux/platform_device.h> #include <linux/platform_data/amd_qdma.h> #include <linux/regmap.h> @@ -492,18 +492,9 @@ static int qdma_device_verify(struct qdma_device *qdev) static int qdma_device_setup(struct qdma_device *qdev) { - struct device *dev = &qdev->pdev->dev; u32 ring_sz = QDMA_DEFAULT_RING_SIZE; int ret = 0; - while (dev && get_dma_ops(dev)) - dev = dev->parent; - if (!dev) { - qdma_err(qdev, "dma device not found"); - return -EINVAL; - } - set_dma_ops(&qdev->pdev->dev, get_dma_ops(dev)); - ret = qdma_setup_fmap_context(qdev); if (ret) { qdma_err(qdev, "Failed setup fmap context"); @@ -548,11 +539,12 @@ static void qdma_free_queue_resources(struct dma_chan *chan) { struct qdma_queue *queue = to_qdma_queue(chan); struct qdma_device *qdev = queue->qdev; - struct device *dev = qdev->dma_dev.dev; + struct qdma_platdata *pdata; qdma_clear_queue_context(queue); vchan_free_chan_resources(&queue->vchan); - dma_free_coherent(dev, queue->ring_size * QDMA_MM_DESC_SIZE, + pdata = dev_get_platdata(&qdev->pdev->dev); + dma_free_coherent(pdata->dma_dev, queue->ring_size * QDMA_MM_DESC_SIZE, queue->desc_base, queue->dma_desc_base); } @@ -565,6 +557,7 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan) struct qdma_queue *queue = to_qdma_queue(chan); struct qdma_device *qdev = queue->qdev; struct qdma_ctxt_sw_desc desc; + struct qdma_platdata *pdata; size_t size; int ret; @@ -572,8 +565,9 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan) if (ret) return ret; + pdata = dev_get_platdata(&qdev->pdev->dev); size = queue->ring_size * QDMA_MM_DESC_SIZE; - queue->desc_base = dma_alloc_coherent(qdev->dma_dev.dev, size, + queue->desc_base = dma_alloc_coherent(pdata->dma_dev, size, &queue->dma_desc_base, GFP_KERNEL); if (!queue->desc_base) { @@ -588,7 +582,7 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan) if (ret) { qdma_err(qdev, "Failed to setup SW desc ctxt for %s", chan->name); - dma_free_coherent(qdev->dma_dev.dev, size, queue->desc_base, + dma_free_coherent(pdata->dma_dev, size, queue->desc_base, queue->dma_desc_base); return ret; } @@ -948,8 +942,9 @@ static int qdma_init_error_irq(struct qdma_device *qdev) static int qdmam_alloc_qintr_rings(struct qdma_device *qdev) { - u32 ctxt[QDMA_CTXT_REGMAP_LEN]; + struct qdma_platdata *pdata = dev_get_platdata(&qdev->pdev->dev); struct device *dev = &qdev->pdev->dev; + u32 ctxt[QDMA_CTXT_REGMAP_LEN]; struct qdma_intr_ring *ring; struct qdma_ctxt_intr intr_ctxt; u32 vector; @@ -969,7 +964,8 @@ static int qdmam_alloc_qintr_rings(struct qdma_device *qdev) ring->msix_id = qdev->err_irq_idx + i + 1; ring->ridx = i; ring->color = 1; - ring->base = dmam_alloc_coherent(dev, QDMA_INTR_RING_SIZE, + ring->base = dmam_alloc_coherent(pdata->dma_dev, + QDMA_INTR_RING_SIZE, &ring->dev_base, GFP_KERNEL); if (!ring->base) { qdma_err(qdev, "Failed to alloc intr ring %d", i); diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c index 9588773dd2eb..037ec38730cf 100644 --- a/drivers/dma/apple-admac.c +++ b/drivers/dma/apple-admac.c @@ -153,6 +153,8 @@ static int admac_alloc_sram_carveout(struct admac_data *ad, { struct admac_sram *sram; int i, ret = 0, nblocks; + ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE); + ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE); if (dir == DMA_MEM_TO_DEV) sram = &ad->txcache; @@ -912,12 +914,7 @@ static int admac_probe(struct platform_device *pdev) goto free_irq; } - ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE); - ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE); - dev_info(&pdev->dev, "Audio DMA Controller\n"); - dev_info(&pdev->dev, "imprint %x TX cache %u RX cache %u\n", - readl_relaxed(ad->base + REG_IMPRINT), ad->txcache.size, ad->rxcache.size); return 0; diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 299396121e6d..e847ad66dc0b 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1363,6 +1363,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, return NULL; desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value); + if (!desc) + return NULL; list_add_tail(&desc->desc_node, &desc->descs_list); desc->tx_dma_desc.cookie = -EBUSY; diff --git a/drivers/dma/dw/acpi.c b/drivers/dma/dw/acpi.c index c510c109d2c3..b6452fffa657 100644 --- a/drivers/dma/dw/acpi.c +++ b/drivers/dma/dw/acpi.c @@ -8,13 +8,15 @@ static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param) { + struct dw_dma *dw = to_dw_dma(chan->device); + struct dw_dma_chip_pdata *data = dev_get_drvdata(dw->dma.dev); struct acpi_dma_spec *dma_spec = param; struct dw_dma_slave slave = { .dma_dev = dma_spec->dev, .src_id = dma_spec->slave_id, .dst_id = dma_spec->slave_id, - .m_master = 0, - .p_master = 1, + .m_master = data->m_master, + .p_master = data->p_master, }; return dw_dma_filter(chan, &slave); diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h index 563ce73488db..f1bd06a20cd6 100644 --- a/drivers/dma/dw/internal.h +++ b/drivers/dma/dw/internal.h @@ -51,11 +51,15 @@ struct dw_dma_chip_pdata { int (*probe)(struct dw_dma_chip *chip); int (*remove)(struct dw_dma_chip *chip); struct dw_dma_chip *chip; + u8 m_master; + u8 p_master; }; static __maybe_unused const struct dw_dma_chip_pdata dw_dma_chip_pdata = { .probe = dw_dma_probe, .remove = dw_dma_remove, + .m_master = 0, + .p_master = 1, }; static const struct dw_dma_platform_data idma32_pdata = { @@ -72,6 +76,8 @@ static __maybe_unused const struct dw_dma_chip_pdata idma32_chip_pdata = { .pdata = &idma32_pdata, .probe = idma32_dma_probe, .remove = idma32_dma_remove, + .m_master = 0, + .p_master = 0, }; static const struct dw_dma_platform_data xbar_pdata = { @@ -88,6 +94,8 @@ static __maybe_unused const struct dw_dma_chip_pdata xbar_chip_pdata = { .pdata = &xbar_pdata, .probe = idma32_dma_probe, .remove = idma32_dma_remove, + .m_master = 0, + .p_master = 0, }; #endif /* _DMA_DW_INTERNAL_H */ diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index ad2d4d012cf7..e8a0eb81726a 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c @@ -56,10 +56,10 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) if (ret) return ret; - dw_dma_acpi_controller_register(chip->dw); - pci_set_drvdata(pdev, data); + dw_dma_acpi_controller_register(chip->dw); + return 0; } diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index ce37e1ee9c46..fe8f103d4a63 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h @@ -166,6 +166,7 @@ struct fsl_edma_chan { struct work_struct issue_worker; struct platform_device *pdev; struct device *pd_dev; + struct device_link *pd_dev_link; u32 srcid; struct clk *clk; int priority; diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c index f9f1eda79254..70cb7fda757a 100644 --- a/drivers/dma/fsl-edma-main.c +++ b/drivers/dma/fsl-edma-main.c @@ -417,10 +417,33 @@ static const struct of_device_id fsl_edma_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids); +static void fsl_edma3_detach_pd(struct fsl_edma_engine *fsl_edma) +{ + struct fsl_edma_chan *fsl_chan; + int i; + + for (i = 0; i < fsl_edma->n_chans; i++) { + if (fsl_edma->chan_masked & BIT(i)) + continue; + fsl_chan = &fsl_edma->chans[i]; + if (fsl_chan->pd_dev_link) + device_link_del(fsl_chan->pd_dev_link); + if (fsl_chan->pd_dev) { + dev_pm_domain_detach(fsl_chan->pd_dev, false); + pm_runtime_dont_use_autosuspend(fsl_chan->pd_dev); + pm_runtime_set_suspended(fsl_chan->pd_dev); + } + } +} + +static void devm_fsl_edma3_detach_pd(void *data) +{ + fsl_edma3_detach_pd(data); +} + static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) { struct fsl_edma_chan *fsl_chan; - struct device_link *link; struct device *pd_chan; struct device *dev; int i; @@ -436,15 +459,16 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng pd_chan = dev_pm_domain_attach_by_id(dev, i); if (IS_ERR_OR_NULL(pd_chan)) { dev_err(dev, "Failed attach pd %d\n", i); - return -EINVAL; + goto detach; } - link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS | + fsl_chan->pd_dev_link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); - if (!link) { + if (!fsl_chan->pd_dev_link) { dev_err(dev, "Failed to add device_link to %d\n", i); - return -EINVAL; + dev_pm_domain_detach(pd_chan, false); + goto detach; } fsl_chan->pd_dev = pd_chan; @@ -455,6 +479,10 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng } return 0; + +detach: + fsl_edma3_detach_pd(fsl_edma); + return -EINVAL; } static int fsl_edma_probe(struct platform_device *pdev) @@ -544,6 +572,9 @@ static int fsl_edma_probe(struct platform_device *pdev) ret = fsl_edma3_attach_pd(pdev, fsl_edma); if (ret) return ret; + ret = devm_add_action_or_reset(&pdev->dev, devm_fsl_edma3_detach_pd, fsl_edma); + if (ret) + return ret; } if (drvdata->flags & FSL_EDMA_DRV_TCD64) diff --git a/drivers/dma/ls2x-apb-dma.c b/drivers/dma/ls2x-apb-dma.c index 9652e8666722..b4f18be62945 100644 --- a/drivers/dma/ls2x-apb-dma.c +++ b/drivers/dma/ls2x-apb-dma.c @@ -31,7 +31,7 @@ #define LDMA_ASK_VALID BIT(2) #define LDMA_START BIT(3) /* DMA start operation */ #define LDMA_STOP BIT(4) /* DMA stop operation */ -#define LDMA_CONFIG_MASK GENMASK(4, 0) /* DMA controller config bits mask */ +#define LDMA_CONFIG_MASK GENMASK_ULL(4, 0) /* DMA controller config bits mask */ /* Bitfields in ndesc_addr field of HW descriptor */ #define LDMA_DESC_EN BIT(0) /*1: The next descriptor is valid */ diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 43efce77bb57..40b76b40bc30 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -1388,6 +1388,7 @@ static int mv_xor_probe(struct platform_device *pdev) irq = irq_of_parse_and_map(np, 0); if (!irq) { ret = -ENODEV; + of_node_put(np); goto err_channel_add; } @@ -1396,6 +1397,7 @@ static int mv_xor_probe(struct platform_device *pdev) if (IS_ERR(chan)) { ret = PTR_ERR(chan); irq_dispose_mapping(irq); + of_node_put(np); goto err_channel_add; } diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c index 3642508e88bb..adca05ee98c9 100644 --- a/drivers/dma/tegra186-gpc-dma.c +++ b/drivers/dma/tegra186-gpc-dma.c @@ -231,6 +231,7 @@ struct tegra_dma_channel { bool config_init; char name[30]; enum dma_transfer_direction sid_dir; + enum dma_status status; int id; int irq; int slave_id; @@ -393,6 +394,8 @@ static int tegra_dma_pause(struct tegra_dma_channel *tdc) tegra_dma_dump_chan_regs(tdc); } + tdc->status = DMA_PAUSED; + return ret; } @@ -419,6 +422,8 @@ static void tegra_dma_resume(struct tegra_dma_channel *tdc) val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE); val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE; tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val); + + tdc->status = DMA_IN_PROGRESS; } static int tegra_dma_device_resume(struct dma_chan *dc) @@ -544,6 +549,7 @@ static void tegra_dma_xfer_complete(struct tegra_dma_channel *tdc) tegra_dma_sid_free(tdc); tdc->dma_desc = NULL; + tdc->status = DMA_COMPLETE; } static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc, @@ -716,6 +722,7 @@ static int tegra_dma_terminate_all(struct dma_chan *dc) tdc->dma_desc = NULL; } + tdc->status = DMA_COMPLETE; tegra_dma_sid_free(tdc); vchan_get_all_descriptors(&tdc->vc, &head); spin_unlock_irqrestore(&tdc->vc.lock, flags); @@ -769,6 +776,9 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, if (ret == DMA_COMPLETE) return ret; + if (tdc->status == DMA_PAUSED) + ret = DMA_PAUSED; + spin_lock_irqsave(&tdc->vc.lock, flags); vd = vchan_find_desc(&tdc->vc, cookie); if (vd) { diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index bcf3a33123be..f0c6d50d8c33 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -4108,9 +4108,10 @@ static void drm_dp_mst_up_req_work(struct work_struct *work) static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) { struct drm_dp_pending_up_req *up_req; + struct drm_dp_mst_branch *mst_primary; if (!drm_dp_get_one_sb_msg(mgr, true, NULL)) - goto out; + goto out_clear_reply; if (!mgr->up_req_recv.have_eomt) return 0; @@ -4128,10 +4129,19 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n", up_req->msg.req_type); kfree(up_req); - goto out; + goto out_clear_reply; + } + + mutex_lock(&mgr->lock); + mst_primary = mgr->mst_primary; + if (!mst_primary || !drm_dp_mst_topology_try_get_mstb(mst_primary)) { + mutex_unlock(&mgr->lock); + kfree(up_req); + goto out_clear_reply; } + mutex_unlock(&mgr->lock); - drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type, + drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type, false); if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { @@ -4148,13 +4158,13 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) conn_stat->peer_device_type); mutex_lock(&mgr->probe_lock); - handle_csn = mgr->mst_primary->link_address_sent; + handle_csn = mst_primary->link_address_sent; mutex_unlock(&mgr->probe_lock); if (!handle_csn) { drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it."); kfree(up_req); - goto out; + goto out_put_primary; } } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { const struct drm_dp_resource_status_notify *res_stat = @@ -4171,7 +4181,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) mutex_unlock(&mgr->up_req_lock); queue_work(system_long_wq, &mgr->up_req_work); -out: +out_put_primary: + drm_dp_mst_topology_put_mstb(mst_primary); +out_clear_reply: memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); return 0; } diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c index 5221ee3f1214..c18e463092af 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump.c +++ b/drivers/gpu/drm/xe/xe_devcoredump.c @@ -20,6 +20,7 @@ #include "xe_guc_ct.h" #include "xe_guc_submit.h" #include "xe_hw_engine.h" +#include "xe_pm.h" #include "xe_sched_job.h" #include "xe_vm.h" @@ -143,31 +144,6 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss) ss->vm = NULL; } -static void xe_devcoredump_deferred_snap_work(struct work_struct *work) -{ - struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work); - struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot); - unsigned int fw_ref; - - /* keep going if fw fails as we still want to save the memory and SW data */ - fw_ref = xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL); - if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) - xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n"); - xe_vm_snapshot_capture_delayed(ss->vm); - xe_guc_exec_queue_snapshot_capture_delayed(ss->ge); - xe_force_wake_put(gt_to_fw(ss->gt), fw_ref); - - /* Calculate devcoredump size */ - ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump); - - ss->read.buffer = kvmalloc(ss->read.size, GFP_USER); - if (!ss->read.buffer) - return; - - __xe_devcoredump_read(ss->read.buffer, ss->read.size, coredump); - xe_devcoredump_snapshot_free(ss); -} - static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, size_t count, void *data, size_t datalen) { @@ -216,6 +192,45 @@ static void xe_devcoredump_free(void *data) "Xe device coredump has been deleted.\n"); } +static void xe_devcoredump_deferred_snap_work(struct work_struct *work) +{ + struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work); + struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot); + struct xe_device *xe = coredump_to_xe(coredump); + unsigned int fw_ref; + + /* + * NB: Despite passing a GFP_ flags parameter here, more allocations are done + * internally using GFP_KERNEL expliictly. Hence this call must be in the worker + * thread and not in the initial capture call. + */ + dev_coredumpm_timeout(gt_to_xe(ss->gt)->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL, + xe_devcoredump_read, xe_devcoredump_free, + XE_COREDUMP_TIMEOUT_JIFFIES); + + xe_pm_runtime_get(xe); + + /* keep going if fw fails as we still want to save the memory and SW data */ + fw_ref = xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) + xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n"); + xe_vm_snapshot_capture_delayed(ss->vm); + xe_guc_exec_queue_snapshot_capture_delayed(ss->ge); + xe_force_wake_put(gt_to_fw(ss->gt), fw_ref); + + xe_pm_runtime_put(xe); + + /* Calculate devcoredump size */ + ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump); + + ss->read.buffer = kvmalloc(ss->read.size, GFP_USER); + if (!ss->read.buffer) + return; + + __xe_devcoredump_read(ss->read.buffer, ss->read.size, coredump); + xe_devcoredump_snapshot_free(ss); +} + static void devcoredump_snapshot(struct xe_devcoredump *coredump, struct xe_sched_job *job) { @@ -299,10 +314,6 @@ void xe_devcoredump(struct xe_sched_job *job) drm_info(&xe->drm, "Xe device coredump has been created\n"); drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n", xe->drm.primary->index); - - dev_coredumpm_timeout(xe->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL, - xe_devcoredump_read, xe_devcoredump_free, - XE_COREDUMP_TIMEOUT_JIFFIES); } static void xe_driver_devcoredump_fini(void *arg) diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 98539313cbc9..c5224d43eea4 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -282,6 +282,7 @@ static const struct of_device_id i2c_imx_dt_ids[] = { { .compatible = "fsl,imx6sll-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,imx6sx-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,imx6ul-i2c", .data = &imx6_i2c_hwdata, }, + { .compatible = "fsl,imx7d-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,imx7s-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,imx8mm-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,imx8mn-i2c", .data = &imx6_i2c_hwdata, }, diff --git a/drivers/i2c/busses/i2c-microchip-corei2c.c b/drivers/i2c/busses/i2c-microchip-corei2c.c index 0b0a1c4d17ca..b0a51695138a 100644 --- a/drivers/i2c/busses/i2c-microchip-corei2c.c +++ b/drivers/i2c/busses/i2c-microchip-corei2c.c @@ -93,27 +93,35 @@ * @base: pointer to register struct * @dev: device reference * @i2c_clk: clock reference for i2c input clock + * @msg_queue: pointer to the messages requiring sending * @buf: pointer to msg buffer for easier use * @msg_complete: xfer completion object * @adapter: core i2c abstraction * @msg_err: error code for completed message * @bus_clk_rate: current i2c bus clock rate * @isr_status: cached copy of local ISR status + * @total_num: total number of messages to be sent/received + * @current_num: index of the current message being sent/received * @msg_len: number of bytes transferred in msg * @addr: address of the current slave + * @restart_needed: whether or not a repeated start is required after current message */ struct mchp_corei2c_dev { void __iomem *base; struct device *dev; struct clk *i2c_clk; + struct i2c_msg *msg_queue; u8 *buf; struct completion msg_complete; struct i2c_adapter adapter; int msg_err; + int total_num; + int current_num; u32 bus_clk_rate; u32 isr_status; u16 msg_len; u8 addr; + bool restart_needed; }; static void mchp_corei2c_core_disable(struct mchp_corei2c_dev *idev) @@ -222,6 +230,47 @@ static int mchp_corei2c_fill_tx(struct mchp_corei2c_dev *idev) return 0; } +static void mchp_corei2c_next_msg(struct mchp_corei2c_dev *idev) +{ + struct i2c_msg *this_msg; + u8 ctrl; + + if (idev->current_num >= idev->total_num) { + complete(&idev->msg_complete); + return; + } + + /* + * If there's been an error, the isr needs to return control + * to the "main" part of the driver, so as not to keep sending + * messages once it completes and clears the SI bit. + */ + if (idev->msg_err) { + complete(&idev->msg_complete); + return; + } + + this_msg = idev->msg_queue++; + + if (idev->current_num < (idev->total_num - 1)) { + struct i2c_msg *next_msg = idev->msg_queue; + + idev->restart_needed = next_msg->flags & I2C_M_RD; + } else { + idev->restart_needed = false; + } + + idev->addr = i2c_8bit_addr_from_msg(this_msg); + idev->msg_len = this_msg->len; + idev->buf = this_msg->buf; + + ctrl = readb(idev->base + CORE_I2C_CTRL); + ctrl |= CTRL_STA; + writeb(ctrl, idev->base + CORE_I2C_CTRL); + + idev->current_num++; +} + static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev) { u32 status = idev->isr_status; @@ -238,8 +287,6 @@ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev) ctrl &= ~CTRL_STA; writeb(idev->addr, idev->base + CORE_I2C_DATA); writeb(ctrl, idev->base + CORE_I2C_CTRL); - if (idev->msg_len == 0) - finished = true; break; case STATUS_M_ARB_LOST: idev->msg_err = -EAGAIN; @@ -247,10 +294,14 @@ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev) break; case STATUS_M_SLAW_ACK: case STATUS_M_TX_DATA_ACK: - if (idev->msg_len > 0) + if (idev->msg_len > 0) { mchp_corei2c_fill_tx(idev); - else - last_byte = true; + } else { + if (idev->restart_needed) + finished = true; + else + last_byte = true; + } break; case STATUS_M_TX_DATA_NACK: case STATUS_M_SLAR_NACK: @@ -287,7 +338,7 @@ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev) mchp_corei2c_stop(idev); if (last_byte || finished) - complete(&idev->msg_complete); + mchp_corei2c_next_msg(idev); return IRQ_HANDLED; } @@ -311,21 +362,48 @@ static irqreturn_t mchp_corei2c_isr(int irq, void *_dev) return ret; } -static int mchp_corei2c_xfer_msg(struct mchp_corei2c_dev *idev, - struct i2c_msg *msg) +static int mchp_corei2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, + int num) { - u8 ctrl; + struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap); + struct i2c_msg *this_msg = msgs; unsigned long time_left; + u8 ctrl; + + mchp_corei2c_core_enable(idev); + + /* + * The isr controls the flow of a transfer, this info needs to be saved + * to a location that it can access the queue information from. + */ + idev->restart_needed = false; + idev->msg_queue = msgs; + idev->total_num = num; + idev->current_num = 0; - idev->addr = i2c_8bit_addr_from_msg(msg); - idev->msg_len = msg->len; - idev->buf = msg->buf; + /* + * But the first entry to the isr is triggered by the start in this + * function, so the first message needs to be "dequeued". + */ + idev->addr = i2c_8bit_addr_from_msg(this_msg); + idev->msg_len = this_msg->len; + idev->buf = this_msg->buf; idev->msg_err = 0; - reinit_completion(&idev->msg_complete); + if (idev->total_num > 1) { + struct i2c_msg *next_msg = msgs + 1; - mchp_corei2c_core_enable(idev); + idev->restart_needed = next_msg->flags & I2C_M_RD; + } + idev->current_num++; + idev->msg_queue++; + + reinit_completion(&idev->msg_complete); + + /* + * Send the first start to pass control to the isr + */ ctrl = readb(idev->base + CORE_I2C_CTRL); ctrl |= CTRL_STA; writeb(ctrl, idev->base + CORE_I2C_CTRL); @@ -335,20 +413,8 @@ static int mchp_corei2c_xfer_msg(struct mchp_corei2c_dev *idev, if (!time_left) return -ETIMEDOUT; - return idev->msg_err; -} - -static int mchp_corei2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, - int num) -{ - struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap); - int i, ret; - - for (i = 0; i < num; i++) { - ret = mchp_corei2c_xfer_msg(idev, msgs++); - if (ret) - return ret; - } + if (idev->msg_err) + return idev->msg_err; return num; } diff --git a/drivers/media/dvb-frontends/dib3000mb.c b/drivers/media/dvb-frontends/dib3000mb.c index c598b2a63325..7c452ddd9e40 100644 --- a/drivers/media/dvb-frontends/dib3000mb.c +++ b/drivers/media/dvb-frontends/dib3000mb.c @@ -51,7 +51,7 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info,2=xfer,4=setfe,8=getfe (|-a static int dib3000_read_reg(struct dib3000_state *state, u16 reg) { u8 wb[] = { ((reg >> 8) | 0x80) & 0xff, reg & 0xff }; - u8 rb[2]; + u8 rb[2] = {}; struct i2c_msg msg[] = { { .addr = state->config.demod_address, .flags = 0, .buf = wb, .len = 2 }, { .addr = state->config.demod_address, .flags = I2C_M_RD, .buf = rb, .len = 2 }, diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c index 5436ec4a8fde..a52a9f5a75e0 100644 --- a/drivers/mtd/nand/raw/arasan-nand-controller.c +++ b/drivers/mtd/nand/raw/arasan-nand-controller.c @@ -1409,8 +1409,8 @@ static int anfc_parse_cs(struct arasan_nfc *nfc) * case, the "not" chosen CS is assigned to nfc->spare_cs and selected * whenever a GPIO CS must be asserted. */ - if (nfc->cs_array && nfc->ncs > 2) { - if (!nfc->cs_array[0] && !nfc->cs_array[1]) { + if (nfc->cs_array) { + if (nfc->ncs > 2 && !nfc->cs_array[0] && !nfc->cs_array[1]) { dev_err(nfc->dev, "Assign a single native CS when using GPIOs\n"); return -EINVAL; @@ -1478,8 +1478,15 @@ static int anfc_probe(struct platform_device *pdev) static void anfc_remove(struct platform_device *pdev) { + int i; struct arasan_nfc *nfc = platform_get_drvdata(pdev); + for (i = 0; i < nfc->ncs; i++) { + if (nfc->cs_array[i]) { + gpiod_put(nfc->cs_array[i]); + } + } + anfc_chips_cleanup(nfc); } diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c index a22aab4ed4e8..3c7dee1be21d 100644 --- a/drivers/mtd/nand/raw/atmel/pmecc.c +++ b/drivers/mtd/nand/raw/atmel/pmecc.c @@ -380,10 +380,8 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc, user->delta = user->dmu + req->ecc.strength + 1; gf_tables = atmel_pmecc_get_gf_tables(req); - if (IS_ERR(gf_tables)) { - kfree(user); + if (IS_ERR(gf_tables)) return ERR_CAST(gf_tables); - } user->gf_tables = gf_tables; diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c index 8db7fc424571..70d6c2250f32 100644 --- a/drivers/mtd/nand/raw/diskonchip.c +++ b/drivers/mtd/nand/raw/diskonchip.c @@ -1098,7 +1098,7 @@ static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partiti (i == 0) && (ip->firstUnit > 0)) { parts[0].name = " DiskOnChip IPL / Media Header partition"; parts[0].offset = 0; - parts[0].size = mtd->erasesize * ip->firstUnit; + parts[0].size = (uint64_t)mtd->erasesize * ip->firstUnit; numparts = 1; } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index e95ffe303547..c70da7281551 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -1074,12 +1074,13 @@ int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs, void iwl_trans_debugfs_cleanup(struct iwl_trans *trans); #endif -#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \ - do { \ - if (__builtin_constant_p(bufsize)) \ - BUILD_BUG_ON((bufsize) % sizeof(u32)); \ - iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\ - } while (0) +#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \ + ({ \ + if (__builtin_constant_p(bufsize)) \ + BUILD_BUG_ON((bufsize) % sizeof(u32)); \ + iwl_trans_read_mem(trans, addr, buf, \ + (bufsize) / sizeof(u32)); \ + }) int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr, u64 src_addr, u32 byte_cnt); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 244ca8cab9d1..1a814eb6743e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -3032,13 +3032,18 @@ static bool iwl_mvm_rt_status(struct iwl_trans *trans, u32 base, u32 *err_id) /* cf. struct iwl_error_event_table */ u32 valid; __le32 err_id; - } err_info; + } err_info = {}; + int ret; if (!base) return false; - iwl_trans_read_mem_bytes(trans, base, - &err_info, sizeof(err_info)); + ret = iwl_trans_read_mem_bytes(trans, base, + &err_info, sizeof(err_info)); + + if (ret) + return true; + if (err_info.valid && err_id) *err_id = le32_to_cpu(err_info.err_id); @@ -3635,22 +3640,31 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm) iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); if (iwl_mvm_check_rt_status(mvm, NULL)) { + IWL_ERR(mvm, + "iwl_mvm_check_rt_status failed, device is gone during suspend\n"); set_bit(STATUS_FW_ERROR, &mvm->trans->status); iwl_mvm_dump_nic_error_log(mvm); iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL); iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, false, 0); - return -ENODEV; + mvm->trans->state = IWL_TRANS_NO_FW; + ret = -ENODEV; + + goto out; } ret = iwl_mvm_d3_notif_wait(mvm, &d3_data); + + if (ret) { + IWL_ERR(mvm, "Couldn't get the d3 notif %d\n", ret); + mvm->trans->state = IWL_TRANS_NO_FW; + } + +out: clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; mvm->fast_resume = false; - if (ret) - IWL_ERR(mvm, "Couldn't get the d3 notif %d\n", ret); - return ret; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 3b9943eb6934..d19b3bd0866b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1643,6 +1643,8 @@ int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, out: if (*status == IWL_D3_STATUS_ALIVE) ret = iwl_pcie_d3_handshake(trans, false); + else + trans->state = IWL_TRANS_NO_FW; return ret; } diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c index 569125726b3e..d7ba8795d60f 100644 --- a/drivers/pci/msi/irqdomain.c +++ b/drivers/pci/msi/irqdomain.c @@ -350,8 +350,11 @@ bool pci_msi_domain_supports(struct pci_dev *pdev, unsigned int feature_mask, domain = dev_get_msi_domain(&pdev->dev); - if (!domain || !irq_domain_is_hierarchy(domain)) - return mode == ALLOW_LEGACY; + if (!domain || !irq_domain_is_hierarchy(domain)) { + if (IS_ENABLED(CONFIG_PCI_MSI_ARCH_FALLBACKS)) + return mode == ALLOW_LEGACY; + return false; + } if (!irq_domain_is_msi_parent(domain)) { /* diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c index 3a45879d85db..2f647cac4cae 100644 --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c @@ -433,6 +433,10 @@ int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, if (WARN_ON_ONCE(dev->msi_enabled)) return -EINVAL; + /* Test for the availability of MSI support */ + if (!pci_msi_domain_supports(dev, 0, ALLOW_LEGACY)) + return -ENOTSUPP; + nvec = pci_msi_vec_count(dev); if (nvec < 0) return nvec; diff --git a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c index 950b7ae1d1a8..dc452610934a 100644 --- a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c +++ b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c @@ -325,6 +325,12 @@ static void usb_init_common_7216(struct brcm_usb_init_params *params) void __iomem *ctrl = params->regs[BRCM_REGS_CTRL]; USB_CTRL_UNSET(ctrl, USB_PM, XHC_S2_CLK_SWITCH_EN); + + /* + * The PHY might be in a bad state if it is already powered + * up. Toggle the power just in case. + */ + USB_CTRL_SET(ctrl, USB_PM, USB_PWRDN); USB_CTRL_UNSET(ctrl, USB_PM, USB_PWRDN); /* 1 millisecond - for USB clocks to settle down */ diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index f053b525ccff..413f76e2d174 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -145,8 +145,10 @@ static struct phy_provider *of_phy_provider_lookup(struct device_node *node) return phy_provider; for_each_child_of_node(phy_provider->children, child) - if (child == node) + if (child == node) { + of_node_put(child); return phy_provider; + } } return ERR_PTR(-EPROBE_DEFER); @@ -629,8 +631,10 @@ static struct phy *_of_phy_get(struct device_node *np, int index) return ERR_PTR(-ENODEV); /* This phy type handled by the usb-phy subsystem for now */ - if (of_device_is_compatible(args.np, "usb-nop-xceiv")) - return ERR_PTR(-ENODEV); + if (of_device_is_compatible(args.np, "usb-nop-xceiv")) { + phy = ERR_PTR(-ENODEV); + goto out_put_node; + } mutex_lock(&phy_provider_mutex); phy_provider = of_phy_provider_lookup(args.np); @@ -652,6 +656,7 @@ static struct phy *_of_phy_get(struct device_node *np, int index) out_unlock: mutex_unlock(&phy_provider_mutex); +out_put_node: of_node_put(args.np); return phy; @@ -737,7 +742,7 @@ void devm_phy_put(struct device *dev, struct phy *phy) if (!phy) return; - r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy); + r = devres_release(dev, devm_phy_release, devm_phy_match, phy); dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n"); } EXPORT_SYMBOL_GPL(devm_phy_put); @@ -1121,7 +1126,7 @@ void devm_phy_destroy(struct device *dev, struct phy *phy) { int r; - r = devres_destroy(dev, devm_phy_consume, devm_phy_match, phy); + r = devres_release(dev, devm_phy_consume, devm_phy_match, phy); dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n"); } EXPORT_SYMBOL_GPL(devm_phy_destroy); @@ -1259,12 +1264,12 @@ EXPORT_SYMBOL_GPL(of_phy_provider_unregister); * of_phy_provider_unregister to unregister the phy provider. */ void devm_of_phy_provider_unregister(struct device *dev, - struct phy_provider *phy_provider) + struct phy_provider *phy_provider) { int r; - r = devres_destroy(dev, devm_phy_provider_release, devm_phy_match, - phy_provider); + r = devres_release(dev, devm_phy_provider_release, devm_phy_match, + phy_provider); dev_WARN_ONCE(dev, r, "couldn't find PHY provider device resource\n"); } EXPORT_SYMBOL_GPL(devm_of_phy_provider_unregister); diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c index 1246d3bc8b92..8e2cd2c178d6 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c @@ -1008,7 +1008,7 @@ static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_rx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f), QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff), QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f), - QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a), QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54), QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f), QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f), diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c index 0a9989e41237..2eb3329ca23f 100644 --- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c +++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c @@ -309,7 +309,7 @@ static int rockchip_combphy_parse_dt(struct device *dev, struct rockchip_combphy priv->ext_refclk = device_property_present(dev, "rockchip,ext-refclk"); - priv->phy_rst = devm_reset_control_array_get_exclusive(dev); + priv->phy_rst = devm_reset_control_get(dev, "phy"); if (IS_ERR(priv->phy_rst)) return dev_err_probe(dev, PTR_ERR(priv->phy_rst), "failed to get phy reset\n"); diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c index 9f084697dd05..69c3ec0938f7 100644 --- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c +++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c @@ -1116,6 +1116,8 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev) return dev_err_probe(dev, PTR_ERR(hdptx->grf), "Could not get GRF syscon\n"); + platform_set_drvdata(pdev, hdptx); + ret = devm_pm_runtime_enable(dev); if (ret) return dev_err_probe(dev, ret, "Failed to enable runtime PM\n"); @@ -1125,7 +1127,6 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev) return dev_err_probe(dev, PTR_ERR(hdptx->phy), "Failed to create HDMI PHY\n"); - platform_set_drvdata(pdev, hdptx); phy_set_drvdata(hdptx->phy, hdptx); phy_set_bus_width(hdptx->phy, 8); diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c index c784119ab5dc..626e2635e3da 100644 --- a/drivers/platform/chrome/cros_ec_lpc.c +++ b/drivers/platform/chrome/cros_ec_lpc.c @@ -707,7 +707,7 @@ static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = { /* Framework Laptop (12th Gen Intel Core) */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Framework"), - DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "12th Gen Intel Core"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Laptop (12th Gen Intel Core)"), }, .driver_data = (void *)&framework_laptop_mec_lpc_driver_data, }, @@ -715,7 +715,7 @@ static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = { /* Framework Laptop (13th Gen Intel Core) */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Framework"), - DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "13th Gen Intel Core"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Laptop (13th Gen Intel Core)"), }, .driver_data = (void *)&framework_laptop_mec_lpc_driver_data, }, diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index ef04d396f61c..a5933980ade3 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -623,6 +623,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0xC4, { KEY_KBDILLUMUP } }, { KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } }, { KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */ + { KE_IGNORE, 0xCF, }, /* AC mode */ { KE_KEY, 0xFA, { KEY_PROG2 } }, /* Lid flip action */ { KE_KEY, 0xBD, { KEY_PROG2 } }, /* Lid flip action on ROG xflow laptops */ { KE_END, 0}, diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c index 2b393eb5c282..c47f32f152e6 100644 --- a/drivers/power/supply/bq24190_charger.c +++ b/drivers/power/supply/bq24190_charger.c @@ -567,6 +567,7 @@ static int bq24190_set_otg_vbus(struct bq24190_dev_info *bdi, bool enable) static int bq24296_set_otg_vbus(struct bq24190_dev_info *bdi, bool enable) { + union power_supply_propval val = { .intval = bdi->charge_type }; int ret; ret = pm_runtime_resume_and_get(bdi->dev); @@ -587,13 +588,18 @@ static int bq24296_set_otg_vbus(struct bq24190_dev_info *bdi, bool enable) ret = bq24190_write_mask(bdi, BQ24190_REG_POC, BQ24296_REG_POC_OTG_CONFIG_MASK, - BQ24296_REG_POC_CHG_CONFIG_SHIFT, + BQ24296_REG_POC_OTG_CONFIG_SHIFT, BQ24296_REG_POC_OTG_CONFIG_OTG); - } else + } else { ret = bq24190_write_mask(bdi, BQ24190_REG_POC, BQ24296_REG_POC_OTG_CONFIG_MASK, - BQ24296_REG_POC_CHG_CONFIG_SHIFT, + BQ24296_REG_POC_OTG_CONFIG_SHIFT, BQ24296_REG_POC_OTG_CONFIG_DISABLE); + if (ret < 0) + goto out; + + ret = bq24190_charger_set_charge_type(bdi, &val); + } out: pm_runtime_mark_last_busy(bdi->dev); diff --git a/drivers/power/supply/cros_charge-control.c b/drivers/power/supply/cros_charge-control.c index 17c53591ce19..9b0a7500296b 100644 --- a/drivers/power/supply/cros_charge-control.c +++ b/drivers/power/supply/cros_charge-control.c @@ -7,8 +7,10 @@ #include <acpi/battery.h> #include <linux/container_of.h> #include <linux/dmi.h> +#include <linux/lockdep.h> #include <linux/mod_devicetable.h> #include <linux/module.h> +#include <linux/mutex.h> #include <linux/platform_data/cros_ec_commands.h> #include <linux/platform_data/cros_ec_proto.h> #include <linux/platform_device.h> @@ -49,6 +51,7 @@ struct cros_chctl_priv { struct attribute *attributes[_CROS_CHCTL_ATTR_COUNT]; struct attribute_group group; + struct mutex lock; /* protects fields below and cros_ec */ enum power_supply_charge_behaviour current_behaviour; u8 current_start_threshold, current_end_threshold; }; @@ -85,6 +88,8 @@ static int cros_chctl_configure_ec(struct cros_chctl_priv *priv) { struct ec_params_charge_control req = {}; + lockdep_assert_held(&priv->lock); + req.cmd = EC_CHARGE_CONTROL_CMD_SET; switch (priv->current_behaviour) { @@ -134,11 +139,15 @@ static ssize_t cros_chctl_store_threshold(struct device *dev, struct cros_chctl_ return -EINVAL; if (is_end_threshold) { - if (val <= priv->current_start_threshold) + /* Start threshold is not exposed, use fixed value */ + if (priv->cmd_version == 2) + priv->current_start_threshold = val == 100 ? 0 : val; + + if (val < priv->current_start_threshold) return -EINVAL; priv->current_end_threshold = val; } else { - if (val >= priv->current_end_threshold) + if (val > priv->current_end_threshold) return -EINVAL; priv->current_start_threshold = val; } @@ -159,6 +168,7 @@ static ssize_t charge_control_start_threshold_show(struct device *dev, struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr, CROS_CHCTL_ATTR_START_THRESHOLD); + guard(mutex)(&priv->lock); return sysfs_emit(buf, "%u\n", (unsigned int)priv->current_start_threshold); } @@ -169,6 +179,7 @@ static ssize_t charge_control_start_threshold_store(struct device *dev, struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr, CROS_CHCTL_ATTR_START_THRESHOLD); + guard(mutex)(&priv->lock); return cros_chctl_store_threshold(dev, priv, 0, buf, count); } @@ -178,6 +189,7 @@ static ssize_t charge_control_end_threshold_show(struct device *dev, struct devi struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr, CROS_CHCTL_ATTR_END_THRESHOLD); + guard(mutex)(&priv->lock); return sysfs_emit(buf, "%u\n", (unsigned int)priv->current_end_threshold); } @@ -187,6 +199,7 @@ static ssize_t charge_control_end_threshold_store(struct device *dev, struct dev struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr, CROS_CHCTL_ATTR_END_THRESHOLD); + guard(mutex)(&priv->lock); return cros_chctl_store_threshold(dev, priv, 1, buf, count); } @@ -195,6 +208,7 @@ static ssize_t charge_behaviour_show(struct device *dev, struct device_attribute struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr, CROS_CHCTL_ATTR_CHARGE_BEHAVIOUR); + guard(mutex)(&priv->lock); return power_supply_charge_behaviour_show(dev, EC_CHARGE_CONTROL_BEHAVIOURS, priv->current_behaviour, buf); } @@ -210,6 +224,7 @@ static ssize_t charge_behaviour_store(struct device *dev, struct device_attribut if (ret < 0) return ret; + guard(mutex)(&priv->lock); priv->current_behaviour = ret; ret = cros_chctl_configure_ec(priv); @@ -223,12 +238,10 @@ static umode_t cros_chtl_attr_is_visible(struct kobject *kobj, struct attribute { struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(attr, n); - if (priv->cmd_version < 2) { - if (n == CROS_CHCTL_ATTR_START_THRESHOLD) - return 0; - if (n == CROS_CHCTL_ATTR_END_THRESHOLD) - return 0; - } + if (n == CROS_CHCTL_ATTR_START_THRESHOLD && priv->cmd_version < 3) + return 0; + else if (n == CROS_CHCTL_ATTR_END_THRESHOLD && priv->cmd_version < 2) + return 0; return attr->mode; } @@ -290,6 +303,10 @@ static int cros_chctl_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; + ret = devm_mutex_init(dev, &priv->lock); + if (ret) + return ret; + ret = cros_ec_get_cmd_versions(cros_ec, EC_CMD_CHARGE_CONTROL); if (ret < 0) return ret; @@ -327,7 +344,8 @@ static int cros_chctl_probe(struct platform_device *pdev) priv->current_end_threshold = 100; /* Bring EC into well-known state */ - ret = cros_chctl_configure_ec(priv); + scoped_guard(mutex, &priv->lock) + ret = cros_chctl_configure_ec(priv); if (ret < 0) return ret; diff --git a/drivers/power/supply/gpio-charger.c b/drivers/power/supply/gpio-charger.c index 68212b39785b..6139f736ecbe 100644 --- a/drivers/power/supply/gpio-charger.c +++ b/drivers/power/supply/gpio-charger.c @@ -67,6 +67,14 @@ static int set_charge_current_limit(struct gpio_charger *gpio_charger, int val) if (gpio_charger->current_limit_map[i].limit_ua <= val) break; } + + /* + * If a valid charge current limit isn't found, default to smallest + * current limitation for safety reasons. + */ + if (i >= gpio_charger->current_limit_map_size) + i = gpio_charger->current_limit_map_size - 1; + mapping = gpio_charger->current_limit_map[i]; for (i = 0; i < ndescs; i++) { diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 8e75e2e279a4..50f1dcb6d584 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -8907,8 +8907,11 @@ megasas_aen_polling(struct work_struct *work) (ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL), (ld_target_id % MEGASAS_MAX_DEV_PER_CHANNEL), 0); - if (sdev1) + if (sdev1) { + mutex_unlock(&instance->reset_mutex); megasas_remove_scsi_device(sdev1); + mutex_lock(&instance->reset_mutex); + } event_type = SCAN_VD_CHANNEL; break; diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h index 81bb408ce56d..1e715fd65a7d 100644 --- a/drivers/scsi/mpi3mr/mpi3mr.h +++ b/drivers/scsi/mpi3mr/mpi3mr.h @@ -134,8 +134,6 @@ extern atomic64_t event_counter; #define MPI3MR_WATCHDOG_INTERVAL 1000 /* in milli seconds */ -#define MPI3MR_DEFAULT_CFG_PAGE_SZ 1024 /* in bytes */ - #define MPI3MR_RESET_TOPOLOGY_SETTLE_TIME 10 #define MPI3MR_SCMD_TIMEOUT (60 * HZ) @@ -1133,9 +1131,6 @@ struct scmd_priv { * @io_throttle_low: I/O size to stop throttle in 512b blocks * @num_io_throttle_group: Maximum number of throttle groups * @throttle_groups: Pointer to throttle group info structures - * @cfg_page: Default memory for configuration pages - * @cfg_page_dma: Configuration page DMA address - * @cfg_page_sz: Default configuration page memory size * @sas_transport_enabled: SAS transport enabled or not * @scsi_device_channel: Channel ID for SCSI devices * @transport_cmds: Command tracker for SAS transport commands @@ -1332,10 +1327,6 @@ struct mpi3mr_ioc { u16 num_io_throttle_group; struct mpi3mr_throttle_group_info *throttle_groups; - void *cfg_page; - dma_addr_t cfg_page_dma; - u16 cfg_page_sz; - u8 sas_transport_enabled; u8 scsi_device_channel; struct mpi3mr_drv_cmd transport_cmds; diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c index 01f035f9330e..10b8e4dc64f8 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_app.c +++ b/drivers/scsi/mpi3mr/mpi3mr_app.c @@ -2329,6 +2329,15 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) if (!mrioc) return -ENODEV; + if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) + return -ERESTARTSYS; + + if (mrioc->bsg_cmds.state & MPI3MR_CMD_PENDING) { + dprint_bsg_err(mrioc, "%s: command is in use\n", __func__); + mutex_unlock(&mrioc->bsg_cmds.mutex); + return -EAGAIN; + } + if (!mrioc->ioctl_sges_allocated) { dprint_bsg_err(mrioc, "%s: DMA memory was not allocated\n", __func__); @@ -2339,13 +2348,16 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) karg->timeout = MPI3MR_APP_DEFAULT_TIMEOUT; mpi_req = kzalloc(MPI3MR_ADMIN_REQ_FRAME_SZ, GFP_KERNEL); - if (!mpi_req) + if (!mpi_req) { + mutex_unlock(&mrioc->bsg_cmds.mutex); return -ENOMEM; + } mpi_header = (struct mpi3_request_header *)mpi_req; bufcnt = karg->buf_entry_list.num_of_entries; drv_bufs = kzalloc((sizeof(*drv_bufs) * bufcnt), GFP_KERNEL); if (!drv_bufs) { + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -ENOMEM; goto out; } @@ -2353,6 +2365,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) dout_buf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); if (!dout_buf) { + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -ENOMEM; goto out; } @@ -2360,6 +2373,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) din_buf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL); if (!din_buf) { + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -ENOMEM; goto out; } @@ -2435,6 +2449,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) (mpi_msg_size > MPI3MR_ADMIN_REQ_FRAME_SZ)) { dprint_bsg_err(mrioc, "%s: invalid MPI message size\n", __func__); + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -EINVAL; goto out; } @@ -2447,6 +2462,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) if (invalid_be) { dprint_bsg_err(mrioc, "%s: invalid buffer entries passed\n", __func__); + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -EINVAL; goto out; } @@ -2454,12 +2470,14 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) { dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n", __func__); + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -EINVAL; goto out; } if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) { dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n", __func__); + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -EINVAL; goto out; } @@ -2472,6 +2490,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) dprint_bsg_err(mrioc, "%s:%d: invalid data transfer size passed for function 0x%x din_size = %d, dout_size = %d\n", __func__, __LINE__, mpi_header->function, din_size, dout_size); + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -EINVAL; goto out; } @@ -2480,6 +2499,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) dprint_bsg_err(mrioc, "%s:%d: invalid data transfer size passed for function 0x%x din_size=%d\n", __func__, __LINE__, mpi_header->function, din_size); + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -EINVAL; goto out; } @@ -2487,6 +2507,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) dprint_bsg_err(mrioc, "%s:%d: invalid data transfer size passed for function 0x%x dout_size = %d\n", __func__, __LINE__, mpi_header->function, dout_size); + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -EINVAL; goto out; } @@ -2497,6 +2518,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) dprint_bsg_err(mrioc, "%s:%d: invalid message size passed:%d:%d:%d:%d\n", __func__, __LINE__, din_cnt, dout_cnt, din_size, dout_size); + mutex_unlock(&mrioc->bsg_cmds.mutex); rval = -EINVAL; goto out; } @@ -2544,6 +2566,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) continue; if (mpi3mr_map_data_buffer_dma(mrioc, drv_buf_iter, desc_count)) { rval = -ENOMEM; + mutex_unlock(&mrioc->bsg_cmds.mutex); dprint_bsg_err(mrioc, "%s:%d: mapping data buffers failed\n", __func__, __LINE__); goto out; @@ -2556,20 +2579,11 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) sense_buff_k = kzalloc(erbsz, GFP_KERNEL); if (!sense_buff_k) { rval = -ENOMEM; + mutex_unlock(&mrioc->bsg_cmds.mutex); goto out; } } - if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) { - rval = -ERESTARTSYS; - goto out; - } - if (mrioc->bsg_cmds.state & MPI3MR_CMD_PENDING) { - rval = -EAGAIN; - dprint_bsg_err(mrioc, "%s: command is in use\n", __func__); - mutex_unlock(&mrioc->bsg_cmds.mutex); - goto out; - } if (mrioc->unrecoverable) { dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", __func__); diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c index f1ab76351bd8..5ed31fe57474 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_fw.c +++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c @@ -1035,6 +1035,36 @@ static const char *mpi3mr_reset_type_name(u16 reset_type) return name; } +/** + * mpi3mr_is_fault_recoverable - Read fault code and decide + * whether the controller can be recoverable + * @mrioc: Adapter instance reference + * Return: true if fault is recoverable, false otherwise. + */ +static inline bool mpi3mr_is_fault_recoverable(struct mpi3mr_ioc *mrioc) +{ + u32 fault; + + fault = (readl(&mrioc->sysif_regs->fault) & + MPI3_SYSIF_FAULT_CODE_MASK); + + switch (fault) { + case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED: + case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED: + ioc_warn(mrioc, + "controller requires system power cycle, marking controller as unrecoverable\n"); + return false; + case MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER: + ioc_warn(mrioc, + "controller faulted due to insufficient power,\n" + " try by connecting it to a different slot\n"); + return false; + default: + break; + } + return true; +} + /** * mpi3mr_print_fault_info - Display fault information * @mrioc: Adapter instance reference @@ -1373,6 +1403,11 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n", ioc_status, ioc_config, base_info); + if (!mpi3mr_is_fault_recoverable(mrioc)) { + mrioc->unrecoverable = 1; + goto out_device_not_present; + } + /*The timeout value is in 2sec unit, changing it to seconds*/ mrioc->ready_timeout = ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >> @@ -2734,6 +2769,11 @@ static void mpi3mr_watchdog_work(struct work_struct *work) mpi3mr_print_fault_info(mrioc); mrioc->diagsave_timeout = 0; + if (!mpi3mr_is_fault_recoverable(mrioc)) { + mrioc->unrecoverable = 1; + goto schedule_work; + } + switch (trigger_data.fault) { case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED: case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED: @@ -4186,17 +4226,6 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc) mpi3mr_read_tsu_interval(mrioc); mpi3mr_print_ioc_info(mrioc); - if (!mrioc->cfg_page) { - dprint_init(mrioc, "allocating config page buffers\n"); - mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ; - mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev, - mrioc->cfg_page_sz, &mrioc->cfg_page_dma, GFP_KERNEL); - if (!mrioc->cfg_page) { - retval = -1; - goto out_failed_noretry; - } - } - dprint_init(mrioc, "allocating host diag buffers\n"); mpi3mr_alloc_diag_bufs(mrioc); @@ -4768,11 +4797,7 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) mrioc->admin_req_base, mrioc->admin_req_dma); mrioc->admin_req_base = NULL; } - if (mrioc->cfg_page) { - dma_free_coherent(&mrioc->pdev->dev, mrioc->cfg_page_sz, - mrioc->cfg_page, mrioc->cfg_page_dma); - mrioc->cfg_page = NULL; - } + if (mrioc->pel_seqnum_virt) { dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz, mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma); @@ -5392,55 +5417,6 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, return retval; } - -/** - * mpi3mr_free_config_dma_memory - free memory for config page - * @mrioc: Adapter instance reference - * @mem_desc: memory descriptor structure - * - * Check whether the size of the buffer specified by the memory - * descriptor is greater than the default page size if so then - * free the memory pointed by the descriptor. - * - * Return: Nothing. - */ -static void mpi3mr_free_config_dma_memory(struct mpi3mr_ioc *mrioc, - struct dma_memory_desc *mem_desc) -{ - if ((mem_desc->size > mrioc->cfg_page_sz) && mem_desc->addr) { - dma_free_coherent(&mrioc->pdev->dev, mem_desc->size, - mem_desc->addr, mem_desc->dma_addr); - mem_desc->addr = NULL; - } -} - -/** - * mpi3mr_alloc_config_dma_memory - Alloc memory for config page - * @mrioc: Adapter instance reference - * @mem_desc: Memory descriptor to hold dma memory info - * - * This function allocates new dmaable memory or provides the - * default config page dmaable memory based on the memory size - * described by the descriptor. - * - * Return: 0 on success, non-zero on failure. - */ -static int mpi3mr_alloc_config_dma_memory(struct mpi3mr_ioc *mrioc, - struct dma_memory_desc *mem_desc) -{ - if (mem_desc->size > mrioc->cfg_page_sz) { - mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev, - mem_desc->size, &mem_desc->dma_addr, GFP_KERNEL); - if (!mem_desc->addr) - return -ENOMEM; - } else { - mem_desc->addr = mrioc->cfg_page; - mem_desc->dma_addr = mrioc->cfg_page_dma; - memset(mem_desc->addr, 0, mrioc->cfg_page_sz); - } - return 0; -} - /** * mpi3mr_post_cfg_req - Issue config requests and wait * @mrioc: Adapter instance reference @@ -5596,8 +5572,12 @@ static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc, cfg_req->page_length = cfg_hdr->page_length; cfg_req->page_version = cfg_hdr->page_version; } - if (mpi3mr_alloc_config_dma_memory(mrioc, &mem_desc)) - goto out; + + mem_desc.addr = dma_alloc_coherent(&mrioc->pdev->dev, + mem_desc.size, &mem_desc.dma_addr, GFP_KERNEL); + + if (!mem_desc.addr) + return retval; mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size, mem_desc.dma_addr); @@ -5626,7 +5606,12 @@ static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc, } out: - mpi3mr_free_config_dma_memory(mrioc, &mem_desc); + if (mem_desc.addr) { + dma_free_coherent(&mrioc->pdev->dev, mem_desc.size, + mem_desc.addr, mem_desc.dma_addr); + mem_desc.addr = NULL; + } + return retval; } diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c index 5f2f67acf8bf..1bef88130d0c 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_os.c +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -5215,7 +5215,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) } mrioc = shost_priv(shost); - retval = ida_alloc_range(&mrioc_ida, 1, U8_MAX, GFP_KERNEL); + retval = ida_alloc_range(&mrioc_ida, 0, U8_MAX, GFP_KERNEL); if (retval < 0) goto id_alloc_failed; mrioc->id = (u8)retval; diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index ed5046593fda..16ac2267c71e 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -7041,11 +7041,12 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, int i; u8 failed; __le32 *mfp; + int ret_val; /* make sure doorbell is not in use */ if ((ioc->base_readl_ext_retry(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__); - return -EFAULT; + goto doorbell_diag_reset; } /* clear pending doorbell interrupts from previous state changes */ @@ -7135,6 +7136,10 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, le32_to_cpu(mfp[i])); } return 0; + +doorbell_diag_reset: + ret_val = _base_diag_reset(ioc); + return ret_val; } /** diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h index d309e2ca14de..dea2290b37d4 100644 --- a/drivers/scsi/qla1280.h +++ b/drivers/scsi/qla1280.h @@ -116,12 +116,12 @@ struct device_reg { uint16_t id_h; /* ID high */ uint16_t cfg_0; /* Configuration 0 */ #define ISP_CFG0_HWMSK 0x000f /* Hardware revision mask */ -#define ISP_CFG0_1020 BIT_0 /* ISP1020 */ -#define ISP_CFG0_1020A BIT_1 /* ISP1020A */ -#define ISP_CFG0_1040 BIT_2 /* ISP1040 */ -#define ISP_CFG0_1040A BIT_3 /* ISP1040A */ -#define ISP_CFG0_1040B BIT_4 /* ISP1040B */ -#define ISP_CFG0_1040C BIT_5 /* ISP1040C */ +#define ISP_CFG0_1020 1 /* ISP1020 */ +#define ISP_CFG0_1020A 2 /* ISP1020A */ +#define ISP_CFG0_1040 3 /* ISP1040 */ +#define ISP_CFG0_1040A 4 /* ISP1040A */ +#define ISP_CFG0_1040B 5 /* ISP1040B */ +#define ISP_CFG0_1040C 6 /* ISP1040C */ uint16_t cfg_1; /* Configuration 1 */ #define ISP_CFG1_F128 BIT_6 /* 128-byte FIFO threshold */ #define ISP_CFG1_F64 BIT_4|BIT_5 /* 128-byte FIFO threshold */ diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 7ceb982040a5..d0b55c1fa908 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -149,6 +149,8 @@ struct hv_fc_wwn_packet { */ static int vmstor_proto_version; +static bool hv_dev_is_fc(struct hv_device *hv_dev); + #define STORVSC_LOGGING_NONE 0 #define STORVSC_LOGGING_ERROR 1 #define STORVSC_LOGGING_WARN 2 @@ -1138,6 +1140,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device, * not correctly handle: * INQUIRY command with page code parameter set to 0x80 * MODE_SENSE command with cmd[2] == 0x1c + * MAINTENANCE_IN is not supported by HyperV FC passthrough * * Setup srb and scsi status so this won't be fatal. * We do this so we can distinguish truly fatal failues @@ -1145,7 +1148,9 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device, */ if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) || - (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) { + (stor_pkt->vm_srb.cdb[0] == MODE_SENSE) || + (stor_pkt->vm_srb.cdb[0] == MAINTENANCE_IN && + hv_dev_is_fc(device))) { vstor_packet->vm_srb.scsi_status = 0; vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS; } diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c index 4337ca51d7aa..5c0dec90eec1 100644 --- a/drivers/spi/spi-intel-pci.c +++ b/drivers/spi/spi-intel-pci.c @@ -86,6 +86,8 @@ static const struct pci_device_id intel_spi_pci_ids[] = { { PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0xa823), (unsigned long)&cnl_info }, + { PCI_VDEVICE(INTEL, 0xe323), (unsigned long)&cnl_info }, + { PCI_VDEVICE(INTEL, 0xe423), (unsigned long)&cnl_info }, { }, }; MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids); diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 2c043817c66a..4a2f84c4d22e 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -1561,10 +1561,10 @@ static int omap2_mcspi_probe(struct platform_device *pdev) } mcspi->ref_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL); - if (mcspi->ref_clk) - mcspi->ref_clk_hz = clk_get_rate(mcspi->ref_clk); - else + if (IS_ERR(mcspi->ref_clk)) mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ; + else + mcspi->ref_clk_hz = clk_get_rate(mcspi->ref_clk); ctlr->max_speed_hz = mcspi->ref_clk_hz; ctlr->min_speed_hz = mcspi->ref_clk_hz >> 15; diff --git a/drivers/virt/coco/tdx-guest/tdx-guest.c b/drivers/virt/coco/tdx-guest/tdx-guest.c index d7db6c824e13..224e7dde9cde 100644 --- a/drivers/virt/coco/tdx-guest/tdx-guest.c +++ b/drivers/virt/coco/tdx-guest/tdx-guest.c @@ -124,10 +124,8 @@ static void *alloc_quote_buf(void) if (!addr) return NULL; - if (set_memory_decrypted((unsigned long)addr, count)) { - free_pages_exact(addr, len); + if (set_memory_decrypted((unsigned long)addr, count)) return NULL; - } return addr; } diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 94c96bcfefe3..0b59c669c26d 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -549,6 +549,7 @@ config S3C2410_WATCHDOG tristate "S3C6410/S5Pv210/Exynos Watchdog" depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST select WATCHDOG_CORE + select MFD_SYSCON if ARCH_EXYNOS help Watchdog timer block in the Samsung S3C64xx, S5Pv210 and Exynos SoCs. This will reboot the system when the timer expires with diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c index 3e8c15138edd..1a5a0a2c3f2e 100644 --- a/drivers/watchdog/it87_wdt.c +++ b/drivers/watchdog/it87_wdt.c @@ -20,6 +20,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/bits.h> +#include <linux/dmi.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> @@ -40,6 +42,7 @@ #define VAL 0x2f /* Logical device Numbers LDN */ +#define EC 0x04 #define GPIO 0x07 /* Configuration Registers and Functions */ @@ -73,6 +76,12 @@ #define IT8784_ID 0x8784 #define IT8786_ID 0x8786 +/* Environment Controller Configuration Registers LDN=0x04 */ +#define SCR1 0xfa + +/* Environment Controller Bits SCR1 */ +#define WDT_PWRGD 0x20 + /* GPIO Configuration Registers LDN=0x07 */ #define WDTCTRL 0x71 #define WDTCFG 0x72 @@ -240,6 +249,21 @@ static int wdt_set_timeout(struct watchdog_device *wdd, unsigned int t) return ret; } +enum { + IT87_WDT_OUTPUT_THROUGH_PWRGD = BIT(0), +}; + +static const struct dmi_system_id it87_quirks[] = { + { + /* Qotom Q30900P (IT8786) */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_NAME, "QCML04"), + }, + .driver_data = (void *)IT87_WDT_OUTPUT_THROUGH_PWRGD, + }, + {} +}; + static const struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .firmware_version = 1, @@ -261,8 +285,10 @@ static struct watchdog_device wdt_dev = { static int __init it87_wdt_init(void) { + const struct dmi_system_id *dmi_id; u8 chip_rev; u8 ctrl; + int quirks = 0; int rc; rc = superio_enter(); @@ -273,6 +299,10 @@ static int __init it87_wdt_init(void) chip_rev = superio_inb(CHIPREV) & 0x0f; superio_exit(); + dmi_id = dmi_first_match(it87_quirks); + if (dmi_id) + quirks = (long)dmi_id->driver_data; + switch (chip_type) { case IT8702_ID: max_units = 255; @@ -333,6 +363,15 @@ static int __init it87_wdt_init(void) superio_outb(0x00, WDTCTRL); } + if (quirks & IT87_WDT_OUTPUT_THROUGH_PWRGD) { + superio_select(EC); + ctrl = superio_inb(SCR1); + if (!(ctrl & WDT_PWRGD)) { + ctrl |= WDT_PWRGD; + superio_outb(ctrl, SCR1); + } + } + superio_exit(); if (timeout < 1 || timeout > max_units * 60) { diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c index e2d7a57d6ea2..91d110646e16 100644 --- a/drivers/watchdog/mtk_wdt.c +++ b/drivers/watchdog/mtk_wdt.c @@ -10,6 +10,7 @@ */ #include <dt-bindings/reset/mt2712-resets.h> +#include <dt-bindings/reset/mediatek,mt6735-wdt.h> #include <dt-bindings/reset/mediatek,mt6795-resets.h> #include <dt-bindings/reset/mt7986-resets.h> #include <dt-bindings/reset/mt8183-resets.h> @@ -87,6 +88,10 @@ static const struct mtk_wdt_data mt2712_data = { .toprgu_sw_rst_num = MT2712_TOPRGU_SW_RST_NUM, }; +static const struct mtk_wdt_data mt6735_data = { + .toprgu_sw_rst_num = MT6735_TOPRGU_RST_NUM, +}; + static const struct mtk_wdt_data mt6795_data = { .toprgu_sw_rst_num = MT6795_TOPRGU_SW_RST_NUM, }; @@ -489,6 +494,7 @@ static int mtk_wdt_resume(struct device *dev) static const struct of_device_id mtk_wdt_dt_ids[] = { { .compatible = "mediatek,mt2712-wdt", .data = &mt2712_data }, { .compatible = "mediatek,mt6589-wdt" }, + { .compatible = "mediatek,mt6735-wdt", .data = &mt6735_data }, { .compatible = "mediatek,mt6795-wdt", .data = &mt6795_data }, { .compatible = "mediatek,mt7986-wdt", .data = &mt7986_data }, { .compatible = "mediatek,mt7988-wdt", .data = &mt7988_data }, diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c index 2a35f890a288..11bbe48160ec 100644 --- a/drivers/watchdog/rzg2l_wdt.c +++ b/drivers/watchdog/rzg2l_wdt.c @@ -12,6 +12,7 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> +#include <linux/pm_domain.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/units.h> @@ -166,8 +167,22 @@ static int rzg2l_wdt_restart(struct watchdog_device *wdev, struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev); int ret; - clk_prepare_enable(priv->pclk); - clk_prepare_enable(priv->osc_clk); + /* + * In case of RZ/G3S the watchdog device may be part of an IRQ safe power + * domain that is currently powered off. In this case we need to power + * it on before accessing registers. Along with this the clocks will be + * enabled. We don't undo the pm_runtime_resume_and_get() as the device + * need to be on for the reboot to happen. + * + * For the rest of SoCs not registering a watchdog IRQ safe power + * domain it is safe to call pm_runtime_resume_and_get() as the + * irq_safe_dev_in_sleep_domain() call in genpd_runtime_resume() + * returns non zero value and the genpd_lock() is avoided, thus, there + * will be no invalid wait context reported by lockdep. + */ + ret = pm_runtime_resume_and_get(wdev->parent); + if (ret) + return ret; if (priv->devtype == WDT_RZG2L) { ret = reset_control_deassert(priv->rstc); @@ -275,6 +290,7 @@ static int rzg2l_wdt_probe(struct platform_device *pdev) priv->devtype = (uintptr_t)of_device_get_match_data(dev); + pm_runtime_irq_safe(&pdev->dev); pm_runtime_enable(&pdev->dev); priv->wdev.info = &rzg2l_wdt_ident; diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index 686cf544d0ae..349d30462c8c 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c @@ -24,9 +24,9 @@ #include <linux/slab.h> #include <linux/err.h> #include <linux/of.h> +#include <linux/mfd/syscon.h> #include <linux/regmap.h> #include <linux/delay.h> -#include <linux/soc/samsung/exynos-pmu.h> #define S3C2410_WTCON 0x00 #define S3C2410_WTDAT 0x04 @@ -699,11 +699,11 @@ static int s3c2410wdt_probe(struct platform_device *pdev) return ret; if (wdt->drv_data->quirks & QUIRKS_HAVE_PMUREG) { - wdt->pmureg = exynos_get_pmu_regmap_by_phandle(dev->of_node, - "samsung,syscon-phandle"); + wdt->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node, + "samsung,syscon-phandle"); if (IS_ERR(wdt->pmureg)) return dev_err_probe(dev, PTR_ERR(wdt->pmureg), - "PMU regmap lookup failed.\n"); + "syscon regmap lookup failed.\n"); } wdt_irq = platform_get_irq(pdev, 0); diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 9c05cab473f5..29c164597401 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -654,6 +654,8 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans, goto error_unlock_cow; } } + + trace_btrfs_cow_block(root, buf, cow); if (unlock_orig) btrfs_tree_unlock(buf); free_extent_buffer_stale(buf); @@ -710,7 +712,6 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans, { struct btrfs_fs_info *fs_info = root->fs_info; u64 search_start; - int ret; if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) { btrfs_abort_transaction(trans, -EUCLEAN); @@ -751,12 +752,8 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans, * Also We don't care about the error, as it's handled internally. */ btrfs_qgroup_trace_subtree_after_cow(trans, root, buf); - ret = btrfs_force_cow_block(trans, root, buf, parent, parent_slot, - cow_ret, search_start, 0, nest); - - trace_btrfs_cow_block(root, buf, *cow_ret); - - return ret; + return btrfs_force_cow_block(trans, root, buf, parent, parent_slot, + cow_ret, search_start, 0, nest); } ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 58ffe78132d9..4b3e256e0d0b 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7117,6 +7117,8 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, ret = -EAGAIN; goto out; } + + cond_resched(); } if (file_extent) @@ -9780,15 +9782,25 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, struct btrfs_fs_info *fs_info = root->fs_info; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_state *cached_state = NULL; - struct extent_map *em = NULL; struct btrfs_chunk_map *map = NULL; struct btrfs_device *device = NULL; struct btrfs_swap_info bsi = { .lowest_ppage = (sector_t)-1ULL, }; + struct btrfs_backref_share_check_ctx *backref_ctx = NULL; + struct btrfs_path *path = NULL; int ret = 0; u64 isize; - u64 start; + u64 prev_extent_end = 0; + + /* + * Acquire the inode's mmap lock to prevent races with memory mapped + * writes, as they could happen after we flush delalloc below and before + * we lock the extent range further below. The inode was already locked + * up in the call chain. + */ + btrfs_assert_inode_locked(BTRFS_I(inode)); + down_write(&BTRFS_I(inode)->i_mmap_lock); /* * If the swap file was just created, make sure delalloc is done. If the @@ -9797,22 +9809,32 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, */ ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1); if (ret) - return ret; + goto out_unlock_mmap; /* * The inode is locked, so these flags won't change after we check them. */ if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) { btrfs_warn(fs_info, "swapfile must not be compressed"); - return -EINVAL; + ret = -EINVAL; + goto out_unlock_mmap; } if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) { btrfs_warn(fs_info, "swapfile must not be copy-on-write"); - return -EINVAL; + ret = -EINVAL; + goto out_unlock_mmap; } if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { btrfs_warn(fs_info, "swapfile must not be checksummed"); - return -EINVAL; + ret = -EINVAL; + goto out_unlock_mmap; + } + + path = btrfs_alloc_path(); + backref_ctx = btrfs_alloc_backref_share_check_ctx(); + if (!path || !backref_ctx) { + ret = -ENOMEM; + goto out_unlock_mmap; } /* @@ -9827,7 +9849,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) { btrfs_warn(fs_info, "cannot activate swapfile while exclusive operation is running"); - return -EBUSY; + ret = -EBUSY; + goto out_unlock_mmap; } /* @@ -9841,7 +9864,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, btrfs_exclop_finish(fs_info); btrfs_warn(fs_info, "cannot activate swapfile because snapshot creation is in progress"); - return -EINVAL; + ret = -EINVAL; + goto out_unlock_mmap; } /* * Snapshots can create extents which require COW even if NODATACOW is @@ -9862,7 +9886,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, btrfs_warn(fs_info, "cannot activate swapfile because subvolume %llu is being deleted", btrfs_root_id(root)); - return -EPERM; + ret = -EPERM; + goto out_unlock_mmap; } atomic_inc(&root->nr_swapfiles); spin_unlock(&root->root_item_lock); @@ -9870,24 +9895,39 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize); lock_extent(io_tree, 0, isize - 1, &cached_state); - start = 0; - while (start < isize) { - u64 logical_block_start, physical_block_start; + while (prev_extent_end < isize) { + struct btrfs_key key; + struct extent_buffer *leaf; + struct btrfs_file_extent_item *ei; struct btrfs_block_group *bg; - u64 len = isize - start; + u64 logical_block_start; + u64 physical_block_start; + u64 extent_gen; + u64 disk_bytenr; + u64 len; - em = btrfs_get_extent(BTRFS_I(inode), NULL, start, len); - if (IS_ERR(em)) { - ret = PTR_ERR(em); + key.objectid = btrfs_ino(BTRFS_I(inode)); + key.type = BTRFS_EXTENT_DATA_KEY; + key.offset = prev_extent_end; + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) goto out; - } - if (em->disk_bytenr == EXTENT_MAP_HOLE) { + /* + * If key not found it means we have an implicit hole (NO_HOLES + * is enabled). + */ + if (ret > 0) { btrfs_warn(fs_info, "swapfile must not have holes"); ret = -EINVAL; goto out; } - if (em->disk_bytenr == EXTENT_MAP_INLINE) { + + leaf = path->nodes[0]; + ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); + + if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) { /* * It's unlikely we'll ever actually find ourselves * here, as a file small enough to fit inline won't be @@ -9899,23 +9939,45 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, ret = -EINVAL; goto out; } - if (extent_map_is_compressed(em)) { + + if (btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) { btrfs_warn(fs_info, "swapfile must not be compressed"); ret = -EINVAL; goto out; } - logical_block_start = extent_map_block_start(em) + (start - em->start); - len = min(len, em->len - (start - em->start)); - free_extent_map(em); - em = NULL; + disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei); + if (disk_bytenr == 0) { + btrfs_warn(fs_info, "swapfile must not have holes"); + ret = -EINVAL; + goto out; + } + + logical_block_start = disk_bytenr + btrfs_file_extent_offset(leaf, ei); + extent_gen = btrfs_file_extent_generation(leaf, ei); + prev_extent_end = btrfs_file_extent_end(path); + + if (prev_extent_end > isize) + len = isize - key.offset; + else + len = btrfs_file_extent_num_bytes(leaf, ei); + + backref_ctx->curr_leaf_bytenr = leaf->start; + + /* + * Don't need the path anymore, release to avoid deadlocks when + * calling btrfs_is_data_extent_shared() because when joining a + * transaction it can block waiting for the current one's commit + * which in turn may be trying to lock the same leaf to flush + * delayed items for example. + */ + btrfs_release_path(path); - ret = can_nocow_extent(inode, start, &len, NULL, false, true); + ret = btrfs_is_data_extent_shared(BTRFS_I(inode), disk_bytenr, + extent_gen, backref_ctx); if (ret < 0) { goto out; - } else if (ret) { - ret = 0; - } else { + } else if (ret > 0) { btrfs_warn(fs_info, "swapfile must not be copy-on-write"); ret = -EINVAL; @@ -9950,7 +10012,6 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, physical_block_start = (map->stripes[0].physical + (logical_block_start - map->start)); - len = min(len, map->chunk_len - (logical_block_start - map->start)); btrfs_free_chunk_map(map); map = NULL; @@ -9991,20 +10052,16 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, if (ret) goto out; } - bsi.start = start; + bsi.start = key.offset; bsi.block_start = physical_block_start; bsi.block_len = len; } - - start += len; } if (bsi.block_len) ret = btrfs_add_swap_extent(sis, &bsi); out: - if (!IS_ERR_OR_NULL(em)) - free_extent_map(em); if (!IS_ERR_OR_NULL(map)) btrfs_free_chunk_map(map); @@ -10017,6 +10074,10 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, btrfs_exclop_finish(fs_info); +out_unlock_mmap: + up_write(&BTRFS_I(inode)->i_mmap_lock); + btrfs_free_backref_share_ctx(backref_ctx); + btrfs_free_path(path); if (ret) return ret; diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index a0e8deca87a7..e70ed857fc74 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1122,6 +1122,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info, fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON; if (simple) { fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE; + btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA); btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid); } else { fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; @@ -1255,8 +1256,6 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info, spin_lock(&fs_info->qgroup_lock); fs_info->quota_root = quota_root; set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); - if (simple) - btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA); spin_unlock(&fs_info->qgroup_lock); /* Skip rescan for simple qgroups. */ diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index f3834f8d26b4..adcbdc970f9e 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2902,6 +2902,7 @@ static int relocate_one_folio(struct reloc_control *rc, const bool use_rst = btrfs_need_stripe_tree_update(fs_info, rc->block_group->flags); ASSERT(index <= last_index); +again: folio = filemap_lock_folio(inode->i_mapping, index); if (IS_ERR(folio)) { @@ -2937,6 +2938,11 @@ static int relocate_one_folio(struct reloc_control *rc, ret = -EIO; goto release_folio; } + if (folio->mapping != inode->i_mapping) { + folio_unlock(folio); + folio_put(folio); + goto again; + } } /* diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 0cb11dcd10cd..b1015f383f75 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -5291,6 +5291,7 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len) unsigned cur_len = min_t(unsigned, len, PAGE_SIZE - pg_offset); +again: folio = filemap_lock_folio(mapping, index); if (IS_ERR(folio)) { page_cache_sync_readahead(mapping, @@ -5323,6 +5324,11 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len) ret = -EIO; break; } + if (folio->mapping != mapping) { + folio_unlock(folio); + folio_put(folio); + goto again; + } } memcpy_from_folio(sctx->send_buf + sctx->send_size, folio, diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 03926ad467c9..5912d5057766 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -1118,7 +1118,7 @@ static ssize_t btrfs_nodesize_show(struct kobject *kobj, { struct btrfs_fs_info *fs_info = to_fs_info(kobj); - return sysfs_emit(buf, "%u\n", fs_info->super_copy->nodesize); + return sysfs_emit(buf, "%u\n", fs_info->nodesize); } BTRFS_ATTR(, nodesize, btrfs_nodesize_show); @@ -1128,7 +1128,7 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj, { struct btrfs_fs_info *fs_info = to_fs_info(kobj); - return sysfs_emit(buf, "%u\n", fs_info->super_copy->sectorsize); + return sysfs_emit(buf, "%u\n", fs_info->sectorsize); } BTRFS_ATTR(, sectorsize, btrfs_sectorsize_show); @@ -1180,7 +1180,7 @@ static ssize_t btrfs_clone_alignment_show(struct kobject *kobj, { struct btrfs_fs_info *fs_info = to_fs_info(kobj); - return sysfs_emit(buf, "%u\n", fs_info->super_copy->sectorsize); + return sysfs_emit(buf, "%u\n", fs_info->sectorsize); } BTRFS_ATTR(, clone_alignment, btrfs_clone_alignment_show); diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 67468d88f139..851d70200c6b 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -1552,7 +1552,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, } op = &req->r_ops[0]; - if (sparse) { + if (!write && sparse) { extent_cnt = __ceph_sparse_read_ext_count(inode, size); ret = ceph_alloc_sparse_ext_map(op, extent_cnt); if (ret) { diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index 6d0455973d64..49aede376d86 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -40,24 +40,15 @@ #define EXPKEY_HASHMAX (1 << EXPKEY_HASHBITS) #define EXPKEY_HASHMASK (EXPKEY_HASHMAX -1) -static void expkey_put_work(struct work_struct *work) +static void expkey_put(struct kref *ref) { - struct svc_expkey *key = - container_of(to_rcu_work(work), struct svc_expkey, ek_rcu_work); + struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref); if (test_bit(CACHE_VALID, &key->h.flags) && !test_bit(CACHE_NEGATIVE, &key->h.flags)) path_put(&key->ek_path); auth_domain_put(key->ek_client); - kfree(key); -} - -static void expkey_put(struct kref *ref) -{ - struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref); - - INIT_RCU_WORK(&key->ek_rcu_work, expkey_put_work); - queue_rcu_work(system_wq, &key->ek_rcu_work); + kfree_rcu(key, ek_rcu); } static int expkey_upcall(struct cache_detail *cd, struct cache_head *h) @@ -364,26 +355,16 @@ static void export_stats_destroy(struct export_stats *stats) EXP_STATS_COUNTERS_NUM); } -static void svc_export_put_work(struct work_struct *work) +static void svc_export_put(struct kref *ref) { - struct svc_export *exp = - container_of(to_rcu_work(work), struct svc_export, ex_rcu_work); - + struct svc_export *exp = container_of(ref, struct svc_export, h.ref); path_put(&exp->ex_path); auth_domain_put(exp->ex_client); nfsd4_fslocs_free(&exp->ex_fslocs); export_stats_destroy(exp->ex_stats); kfree(exp->ex_stats); kfree(exp->ex_uuid); - kfree(exp); -} - -static void svc_export_put(struct kref *ref) -{ - struct svc_export *exp = container_of(ref, struct svc_export, h.ref); - - INIT_RCU_WORK(&exp->ex_rcu_work, svc_export_put_work); - queue_rcu_work(system_wq, &exp->ex_rcu_work); + kfree_rcu(exp, ex_rcu); } static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h) diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h index 081afb68681e..3794ae253a70 100644 --- a/fs/nfsd/export.h +++ b/fs/nfsd/export.h @@ -75,7 +75,7 @@ struct svc_export { u32 ex_layout_types; struct nfsd4_deviceid_map *ex_devid_map; struct cache_detail *cd; - struct rcu_work ex_rcu_work; + struct rcu_head ex_rcu; unsigned long ex_xprtsec_modes; struct export_stats *ex_stats; }; @@ -92,7 +92,7 @@ struct svc_expkey { u32 ek_fsid[6]; struct path ek_path; - struct rcu_work ek_rcu_work; + struct rcu_head ek_rcu; }; #define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC)) diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index b8cbb1556004..de0763652549 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -1058,7 +1058,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c args.authflavor = clp->cl_cred.cr_flavor; clp->cl_cb_ident = conn->cb_ident; } else { - if (!conn->cb_xprt) + if (!conn->cb_xprt || !ses) return -EINVAL; clp->cl_cb_session = ses; args.bc_xprt = conn->cb_xprt; @@ -1461,8 +1461,6 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb) ses = c->cn_session; } spin_unlock(&clp->cl_lock); - if (!c) - return; err = setup_callback_client(clp, &conn, ses); if (err) { diff --git a/fs/smb/client/Kconfig b/fs/smb/client/Kconfig index 2aff6d1395ce..9f05f94e265a 100644 --- a/fs/smb/client/Kconfig +++ b/fs/smb/client/Kconfig @@ -2,7 +2,6 @@ config CIFS tristate "SMB3 and CIFS support (advanced network filesystem)" depends on INET - select NETFS_SUPPORT select NLS select NLS_UCS2_UTILS select CRYPTO diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c index d1bd69cbfe09..4750505465ae 100644 --- a/fs/smb/client/smb2pdu.c +++ b/fs/smb/client/smb2pdu.c @@ -4855,6 +4855,8 @@ smb2_writev_callback(struct mid_q_entry *mid) if (written > wdata->subreq.len) written &= 0xFFFF; + cifs_stats_bytes_written(tcon, written); + if (written < wdata->subreq.len) wdata->result = -ENOSPC; else @@ -5171,6 +5173,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, cifs_dbg(VFS, "Send error in write = %d\n", rc); } else { *nbytes = le32_to_cpu(rsp->DataLength); + cifs_stats_bytes_written(io_parms->tcon, *nbytes); trace_smb3_write_done(0, 0, xid, req->PersistentFileId, io_parms->tcon->tid, diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c index 75b4eb856d32..af8e24163bf2 100644 --- a/fs/smb/server/smb_common.c +++ b/fs/smb/server/smb_common.c @@ -18,8 +18,8 @@ #include "mgmt/share_config.h" /*for shortname implementation */ -static const char basechars[43] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_-!@#$%"; -#define MANGLE_BASE (sizeof(basechars) / sizeof(char) - 1) +static const char *basechars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_-!@#$%"; +#define MANGLE_BASE (strlen(basechars) - 1) #define MAGIC_CHAR '~' #define PERIOD '.' #define mangle(V) ((char)(basechars[(V) % MANGLE_BASE])) diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 78a603129dd5..2cb49b6b0716 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -517,7 +517,11 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry) inode->i_nlink); clear_nlink(inode); inode->i_size = 0; - inode_dec_link_count(dir); + if (dir->i_nlink >= 3) + inode_dec_link_count(dir); + else + udf_warn(inode->i_sb, "parent dir link count too low (%u)\n", + dir->i_nlink); udf_add_fid_counter(dir->i_sb, true, -1); inode_set_mtime_to_ts(dir, inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode))); @@ -787,8 +791,18 @@ static int udf_rename(struct mnt_idmap *idmap, struct inode *old_dir, retval = -ENOTEMPTY; if (!empty_dir(new_inode)) goto out_oiter; + retval = -EFSCORRUPTED; + if (new_inode->i_nlink != 2) + goto out_oiter; } + retval = -EFSCORRUPTED; + if (old_dir->i_nlink < 3) + goto out_oiter; is_dir = true; + } else if (new_inode) { + retval = -EFSCORRUPTED; + if (new_inode->i_nlink < 1) + goto out_oiter; } if (is_dir && old_dir != new_dir) { retval = udf_fiiter_find_entry(old_inode, &dotdot_name, diff --git a/include/linux/platform_data/amd_qdma.h b/include/linux/platform_data/amd_qdma.h index 576d952f97ed..967a6ef31cf9 100644 --- a/include/linux/platform_data/amd_qdma.h +++ b/include/linux/platform_data/amd_qdma.h @@ -26,11 +26,13 @@ struct dma_slave_map; * @max_mm_channels: Maximum number of MM DMA channels in each direction * @device_map: DMA slave map * @irq_index: The index of first IRQ + * @dma_dev: The device pointer for dma operations */ struct qdma_platdata { u32 max_mm_channels; u32 irq_index; struct dma_slave_map *device_map; + struct device *dma_dev; }; #endif /* _PLATDATA_AMD_QDMA_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index c14446c6164d..02eaf84c8626 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1633,8 +1633,9 @@ static inline unsigned int __task_state_index(unsigned int tsk_state, * We're lying here, but rather than expose a completely new task state * to userspace, we can make this appear as if the task has gone through * a regular rt_mutex_lock() call. + * Report frozen tasks as uninterruptible. */ - if (tsk_state & TASK_RTLOCK_WAIT) + if ((tsk_state & TASK_RTLOCK_WAIT) || (tsk_state & TASK_FROZEN)) state = TASK_UNINTERRUPTIBLE; return fls(state); diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index d9b03e0746e7..2cbe0c22a32f 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -317,17 +317,22 @@ static inline void sock_drop(struct sock *sk, struct sk_buff *skb) kfree_skb(skb); } -static inline void sk_psock_queue_msg(struct sk_psock *psock, +static inline bool sk_psock_queue_msg(struct sk_psock *psock, struct sk_msg *msg) { + bool ret; + spin_lock_bh(&psock->ingress_lock); - if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) + if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { list_add_tail(&msg->list, &psock->ingress_msg); - else { + ret = true; + } else { sk_msg_free(psock->sk, msg); kfree(msg); + ret = false; } spin_unlock_bh(&psock->ingress_lock); + return ret; } static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock) diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 4df2ff81d3de..77769ff50544 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -379,7 +379,7 @@ struct trace_event_call { struct list_head list; struct trace_event_class *class; union { - char *name; + const char *name; /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */ struct tracepoint *tp; }; diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index d2761bf8ff32..9f3a04345b86 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -515,7 +515,7 @@ static inline const char *node_stat_name(enum node_stat_item item) static inline const char *lru_list_name(enum lru_list lru) { - return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_" + return node_stat_name(NR_LRU_BASE + (enum node_stat_item)lru) + 3; // skip "nr_" } #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG) diff --git a/include/net/sock.h b/include/net/sock.h index f29c14448938..fa055cf1785e 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1521,7 +1521,7 @@ static inline bool sk_wmem_schedule(struct sock *sk, int size) } static inline bool -sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) +__sk_rmem_schedule(struct sock *sk, int size, bool pfmemalloc) { int delta; @@ -1529,7 +1529,13 @@ sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) return true; delta = size - sk->sk_forward_alloc; return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_RECV) || - skb_pfmemalloc(skb); + pfmemalloc; +} + +static inline bool +sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) +{ + return __sk_rmem_schedule(sk, size, skb_pfmemalloc(skb)); } static inline int sk_unused_reserved_mem(const struct sock *sk) diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h index 58154117d9b0..a6fce46aeb37 100644 --- a/include/uapi/linux/stddef.h +++ b/include/uapi/linux/stddef.h @@ -8,6 +8,13 @@ #define __always_inline inline #endif +/* Not all C++ standards support type declarations inside an anonymous union */ +#ifndef __cplusplus +#define __struct_group_tag(TAG) TAG +#else +#define __struct_group_tag(TAG) +#endif + /** * __struct_group() - Create a mirrored named and anonyomous struct * @@ -20,13 +27,13 @@ * and size: one anonymous and one named. The former's members can be used * normally without sub-struct naming, and the latter can be used to * reason about the start, end, and size of the group of struct members. - * The named struct can also be explicitly tagged for layer reuse, as well - * as both having struct attributes appended. + * The named struct can also be explicitly tagged for layer reuse (C only), + * as well as both having struct attributes appended. */ #define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \ union { \ struct { MEMBERS } ATTRS; \ - struct TAG { MEMBERS } ATTRS NAME; \ + struct __struct_group_tag(TAG) { MEMBERS } ATTRS NAME; \ } ATTRS #ifdef __cplusplus diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c index a26593979887..1cfcc735b8e3 100644 --- a/io_uring/sqpoll.c +++ b/io_uring/sqpoll.c @@ -412,6 +412,7 @@ void io_sqpoll_wait_sq(struct io_ring_ctx *ctx) __cold int io_sq_offload_create(struct io_ring_ctx *ctx, struct io_uring_params *p) { + struct task_struct *task_to_put = NULL; int ret; /* Retain compatibility with failing for an invalid attach attempt */ @@ -492,6 +493,7 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, } sqd->thread = tsk; + task_to_put = get_task_struct(tsk); ret = io_uring_alloc_task_context(tsk, ctx); wake_up_new_task(tsk); if (ret) @@ -502,11 +504,15 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, goto err; } + if (task_to_put) + put_task_struct(task_to_put); return 0; err_sqpoll: complete(&ctx->sq_data->exited); err: io_sq_thread_finish(ctx); + if (task_to_put) + put_task_struct(task_to_put); return ret; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 4c486a0bfcc4..767f1cb8c27e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -7868,7 +7868,7 @@ static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn if (reg->type != PTR_TO_STACK && reg->type != CONST_PTR_TO_DYNPTR) { verbose(env, "arg#%d expected pointer to stack or const struct bpf_dynptr\n", - regno); + regno - 1); return -EINVAL; } @@ -7922,7 +7922,7 @@ static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn if (!is_dynptr_reg_valid_init(env, reg)) { verbose(env, "Expected an initialized dynptr as arg #%d\n", - regno); + regno - 1); return -EINVAL; } @@ -7930,7 +7930,7 @@ static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn if (!is_dynptr_type_expected(env, reg, arg_type & ~MEM_RDONLY)) { verbose(env, "Expected a dynptr of type %s as arg #%d\n", - dynptr_type_str(arg_to_dynptr_type(arg_type)), regno); + dynptr_type_str(arg_to_dynptr_type(arg_type)), regno - 1); return -EINVAL; } @@ -7999,7 +7999,7 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id */ btf_id = btf_check_iter_arg(meta->btf, meta->func_proto, regno - 1); if (btf_id < 0) { - verbose(env, "expected valid iter pointer as arg #%d\n", regno); + verbose(env, "expected valid iter pointer as arg #%d\n", regno - 1); return -EINVAL; } t = btf_type_by_id(meta->btf, btf_id); @@ -8009,7 +8009,7 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id /* bpf_iter_<type>_new() expects pointer to uninit iter state */ if (!is_iter_reg_valid_uninit(env, reg, nr_slots)) { verbose(env, "expected uninitialized iter_%s as arg #%d\n", - iter_type_str(meta->btf, btf_id), regno); + iter_type_str(meta->btf, btf_id), regno - 1); return -EINVAL; } @@ -8033,7 +8033,7 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id break; case -EINVAL: verbose(env, "expected an initialized iter_%s as arg #%d\n", - iter_type_str(meta->btf, btf_id), regno); + iter_type_str(meta->btf, btf_id), regno - 1); return err; case -EPROTO: verbose(env, "expected an RCU CS when using %s\n", meta->func_name); @@ -21085,11 +21085,15 @@ static int do_misc_fixups(struct bpf_verifier_env *env) * changed in some incompatible and hard to support * way, it's fine to back out this inlining logic */ +#ifdef CONFIG_SMP insn_buf[0] = BPF_MOV32_IMM(BPF_REG_0, (u32)(unsigned long)&pcpu_hot.cpu_number); insn_buf[1] = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0); insn_buf[2] = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0); cnt = 3; - +#else + insn_buf[0] = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0); + cnt = 1; +#endif new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; diff --git a/kernel/fork.c b/kernel/fork.c index ce8be55e5e04..e192bdbc9ade 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -640,11 +640,8 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, LIST_HEAD(uf); VMA_ITERATOR(vmi, mm, 0); - uprobe_start_dup_mmap(); - if (mmap_write_lock_killable(oldmm)) { - retval = -EINTR; - goto fail_uprobe_end; - } + if (mmap_write_lock_killable(oldmm)) + return -EINTR; flush_cache_dup_mm(oldmm); uprobe_dup_mmap(oldmm, mm); /* @@ -783,8 +780,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, dup_userfaultfd_complete(&uf); else dup_userfaultfd_fail(&uf); -fail_uprobe_end: - uprobe_end_dup_mmap(); return retval; fail_nomem_anon_vma_fork: @@ -1692,9 +1687,11 @@ static struct mm_struct *dup_mm(struct task_struct *tsk, if (!mm_init(mm, tsk, mm->user_ns)) goto fail_nomem; + uprobe_start_dup_mmap(); err = dup_mmap(mm, oldmm); if (err) goto free_pt; + uprobe_end_dup_mmap(); mm->hiwater_rss = get_mm_rss(mm); mm->hiwater_vm = mm->total_vm; @@ -1709,6 +1706,8 @@ static struct mm_struct *dup_mm(struct task_struct *tsk, mm->binfmt = NULL; mm_init_owner(mm, NULL); mmput(mm); + if (err) + uprobe_end_dup_mmap(); fail_nomem: return NULL; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 35515192aa0f..b04990385a6a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -5111,6 +5111,9 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, cpumask_var_t tracing_cpumask_new; int err; + if (count == 0 || count > KMALLOC_MAX_SIZE) + return -EINVAL; + if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) return -ENOMEM; diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 263fac44d3ca..935a886af40c 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -725,7 +725,7 @@ static int trace_kprobe_module_callback(struct notifier_block *nb, static struct notifier_block trace_kprobe_module_nb = { .notifier_call = trace_kprobe_module_callback, - .priority = 1 /* Invoked after kprobe module callback */ + .priority = 2 /* Invoked after kprobe and jump_label module callback */ }; static int trace_kprobe_register_module_notifier(void) { diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 9d078b37fe0b..abac770bc0b4 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1173,6 +1173,8 @@ EXPORT_SYMBOL(ceph_osdc_new_request); int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt) { + WARN_ON(op->op != CEPH_OSD_OP_SPARSE_READ); + op->extent.sparse_ext_cnt = cnt; op->extent.sparse_ext = kmalloc_array(cnt, sizeof(*op->extent.sparse_ext), diff --git a/net/core/filter.c b/net/core/filter.c index 9a459213d283..55495063621d 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3751,13 +3751,22 @@ static const struct bpf_func_proto bpf_skb_adjust_room_proto = { static u32 __bpf_skb_min_len(const struct sk_buff *skb) { - u32 min_len = skb_network_offset(skb); + int offset = skb_network_offset(skb); + u32 min_len = 0; - if (skb_transport_header_was_set(skb)) - min_len = skb_transport_offset(skb); - if (skb->ip_summed == CHECKSUM_PARTIAL) - min_len = skb_checksum_start_offset(skb) + - skb->csum_offset + sizeof(__sum16); + if (offset > 0) + min_len = offset; + if (skb_transport_header_was_set(skb)) { + offset = skb_transport_offset(skb); + if (offset > 0) + min_len = offset; + } + if (skb->ip_summed == CHECKSUM_PARTIAL) { + offset = skb_checksum_start_offset(skb) + + skb->csum_offset + sizeof(__sum16); + if (offset > 0) + min_len = offset; + } return min_len; } diff --git a/net/core/skmsg.c b/net/core/skmsg.c index e90fbab703b2..8ad7e6755fd6 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -445,8 +445,10 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, if (likely(!peek)) { sge->offset += copy; sge->length -= copy; - if (!msg_rx->skb) + if (!msg_rx->skb) { sk_mem_uncharge(sk, copy); + atomic_sub(copy, &sk->sk_rmem_alloc); + } msg_rx->sg.size -= copy; if (!sge->length) { @@ -772,6 +774,8 @@ static void __sk_psock_purge_ingress_msg(struct sk_psock *psock) list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) { list_del(&msg->list); + if (!msg->skb) + atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc); sk_msg_free(psock->sk, msg); kfree(msg); } diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 99cef92e6290..392678ae80f4 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -49,13 +49,14 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, sge = sk_msg_elem(msg, i); size = (apply && apply_bytes < sge->length) ? apply_bytes : sge->length; - if (!sk_wmem_schedule(sk, size)) { + if (!__sk_rmem_schedule(sk, size, false)) { if (!copied) ret = -ENOMEM; break; } sk_mem_charge(sk, size); + atomic_add(size, &sk->sk_rmem_alloc); sk_msg_xfer(tmp, msg, i, size); copied += size; if (sge->length) @@ -74,7 +75,8 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, if (!ret) { msg->sg.start = i; - sk_psock_queue_msg(psock, tmp); + if (!sk_psock_queue_msg(psock, tmp)) + atomic_sub(copied, &sk->sk_rmem_alloc); sk_psock_data_ready(sk, psock); } else { sk_msg_free(sk, tmp); diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c index 13b71069ae18..b3853583d2ae 100644 --- a/sound/core/memalloc.c +++ b/sound/core/memalloc.c @@ -505,7 +505,7 @@ static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size) if (!p) return NULL; dmab->addr = dma_map_single(dmab->dev.dev, p, size, DMA_BIDIRECTIONAL); - if (dmab->addr == DMA_MAPPING_ERROR) { + if (dma_mapping_error(dmab->dev.dev, dmab->addr)) { do_free_pages(dmab->area, size, true); return NULL; } diff --git a/sound/core/ump.c b/sound/core/ump.c index 8d37f237f83b..bd26bb2210cb 100644 --- a/sound/core/ump.c +++ b/sound/core/ump.c @@ -37,6 +37,7 @@ static int process_legacy_output(struct snd_ump_endpoint *ump, u32 *buffer, int count); static void process_legacy_input(struct snd_ump_endpoint *ump, const u32 *src, int words); +static void update_legacy_names(struct snd_ump_endpoint *ump); #else static inline int process_legacy_output(struct snd_ump_endpoint *ump, u32 *buffer, int count) @@ -47,6 +48,9 @@ static inline void process_legacy_input(struct snd_ump_endpoint *ump, const u32 *src, int words) { } +static inline void update_legacy_names(struct snd_ump_endpoint *ump) +{ +} #endif static const struct snd_rawmidi_global_ops snd_ump_rawmidi_ops = { @@ -861,6 +865,7 @@ static int ump_handle_fb_info_msg(struct snd_ump_endpoint *ump, fill_fb_info(ump, &fb->info, buf); if (ump->parsed) { snd_ump_update_group_attrs(ump); + update_legacy_names(ump); seq_notify_fb_change(ump, fb); } } @@ -893,6 +898,7 @@ static int ump_handle_fb_name_msg(struct snd_ump_endpoint *ump, /* notify the FB name update to sequencer, too */ if (ret > 0 && ump->parsed) { snd_ump_update_group_attrs(ump); + update_legacy_names(ump); seq_notify_fb_change(ump, fb); } return ret; @@ -1087,6 +1093,8 @@ static int snd_ump_legacy_open(struct snd_rawmidi_substream *substream) guard(mutex)(&ump->open_mutex); if (ump->legacy_substreams[dir][group]) return -EBUSY; + if (!ump->groups[group].active) + return -ENODEV; if (dir == SNDRV_RAWMIDI_STREAM_OUTPUT) { if (!ump->legacy_out_opens) { err = snd_rawmidi_kernel_open(&ump->core, 0, @@ -1254,11 +1262,20 @@ static void fill_substream_names(struct snd_ump_endpoint *ump, name = ump->groups[idx].name; if (!*name) name = ump->info.name; - snprintf(s->name, sizeof(s->name), "Group %d (%.16s)", - idx + 1, name); + scnprintf(s->name, sizeof(s->name), "Group %d (%.16s)%s", + idx + 1, name, + ump->groups[idx].active ? "" : " [Inactive]"); } } +static void update_legacy_names(struct snd_ump_endpoint *ump) +{ + struct snd_rawmidi *rmidi = ump->legacy_rmidi; + + fill_substream_names(ump, rmidi, SNDRV_RAWMIDI_STREAM_INPUT); + fill_substream_names(ump, rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT); +} + int snd_ump_attach_legacy_rawmidi(struct snd_ump_endpoint *ump, char *id, int device) { @@ -1295,10 +1312,7 @@ int snd_ump_attach_legacy_rawmidi(struct snd_ump_endpoint *ump, rmidi->ops = &snd_ump_legacy_ops; rmidi->private_data = ump; ump->legacy_rmidi = rmidi; - if (input) - fill_substream_names(ump, rmidi, SNDRV_RAWMIDI_STREAM_INPUT); - if (output) - fill_substream_names(ump, rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT); + update_legacy_names(ump); ump_dbg(ump, "Created a legacy rawmidi #%d (%s)\n", device, id); return 0; diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 2e9f817b948e..538c37a78a56 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -307,6 +307,7 @@ enum { CXT_FIXUP_HP_MIC_NO_PRESENCE, CXT_PINCFG_SWS_JS201D, CXT_PINCFG_TOP_SPEAKER, + CXT_FIXUP_HP_A_U, }; /* for hda_fixup_thinkpad_acpi() */ @@ -774,6 +775,18 @@ static void cxt_setup_mute_led(struct hda_codec *codec, } } +static void cxt_setup_gpio_unmute(struct hda_codec *codec, + unsigned int gpio_mute_mask) +{ + if (gpio_mute_mask) { + // set gpio data to 0. + snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA, 0); + snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_MASK, gpio_mute_mask); + snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DIRECTION, gpio_mute_mask); + snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_STICKY_MASK, 0); + } +} + static void cxt_fixup_mute_led_gpio(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -788,6 +801,15 @@ static void cxt_fixup_hp_zbook_mute_led(struct hda_codec *codec, cxt_setup_mute_led(codec, 0x10, 0x20); } +static void cxt_fixup_hp_a_u(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + // Init vers in BIOS mute the spk/hp by set gpio high to avoid pop noise, + // so need to unmute once by clearing the gpio data when runs into the system. + if (action == HDA_FIXUP_ACT_INIT) + cxt_setup_gpio_unmute(codec, 0x2); +} + /* ThinkPad X200 & co with cxt5051 */ static const struct hda_pintbl cxt_pincfg_lenovo_x200[] = { { 0x16, 0x042140ff }, /* HP (seq# overridden) */ @@ -998,6 +1020,10 @@ static const struct hda_fixup cxt_fixups[] = { { } }, }, + [CXT_FIXUP_HP_A_U] = { + .type = HDA_FIXUP_FUNC, + .v.func = cxt_fixup_hp_a_u, + }, }; static const struct hda_quirk cxt5045_fixups[] = { @@ -1072,6 +1098,7 @@ static const struct hda_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x8458, "HP Z2 G4 mini premium", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), + SND_PCI_QUIRK(0x14f1, 0x0252, "MBX-Z60MR100", CXT_FIXUP_HP_A_U), SND_PCI_QUIRK(0x14f1, 0x0265, "SWS JS201D", CXT_PINCFG_SWS_JS201D), SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO), SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410), @@ -1117,6 +1144,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = { { .id = CXT_PINCFG_LENOVO_NOTEBOOK, .name = "lenovo-20149" }, { .id = CXT_PINCFG_SWS_JS201D, .name = "sws-js201d" }, { .id = CXT_PINCFG_TOP_SPEAKER, .name = "sirius-top-speaker" }, + { .id = CXT_FIXUP_HP_A_U, .name = "HP-U-support" }, {} }; diff --git a/sound/sh/sh_dac_audio.c b/sound/sh/sh_dac_audio.c index e7b6ce7bd086..1c1c14708f01 100644 --- a/sound/sh/sh_dac_audio.c +++ b/sound/sh/sh_dac_audio.c @@ -163,7 +163,7 @@ static int snd_sh_dac_pcm_copy(struct snd_pcm_substream *substream, /* channel is not used (interleaved data) */ struct snd_sh_dac *chip = snd_pcm_substream_chip(substream); - if (copy_from_iter_toio(chip->data_buffer + pos, src, count)) + if (copy_from_iter(chip->data_buffer + pos, count, src) != count) return -EFAULT; chip->buffer_end = chip->data_buffer + pos + count; @@ -182,7 +182,7 @@ static int snd_sh_dac_pcm_silence(struct snd_pcm_substream *substream, /* channel is not used (interleaved data) */ struct snd_sh_dac *chip = snd_pcm_substream_chip(substream); - memset_io(chip->data_buffer + pos, 0, count); + memset(chip->data_buffer + pos, 0, count); chip->buffer_end = chip->data_buffer + pos + count; if (chip->empty) { @@ -211,7 +211,6 @@ static const struct snd_pcm_ops snd_sh_dac_pcm_ops = { .pointer = snd_sh_dac_pcm_pointer, .copy = snd_sh_dac_pcm_copy, .fill_silence = snd_sh_dac_pcm_silence, - .mmap = snd_pcm_lib_mmap_iomem, }; static int snd_sh_dac_pcm(struct snd_sh_dac *chip, int device) diff --git a/sound/soc/amd/ps/pci-ps.c b/sound/soc/amd/ps/pci-ps.c index c72d666d51bd..5c4a0be7a788 100644 --- a/sound/soc/amd/ps/pci-ps.c +++ b/sound/soc/amd/ps/pci-ps.c @@ -375,11 +375,18 @@ static int get_acp63_device_config(struct pci_dev *pci, struct acp63_dev_data *a { struct acpi_device *pdm_dev; const union acpi_object *obj; + acpi_handle handle; + acpi_integer dmic_status; u32 config; bool is_dmic_dev = false; bool is_sdw_dev = false; + bool wov_en, dmic_en; int ret; + /* IF WOV entry not found, enable dmic based on acp-audio-device-type entry*/ + wov_en = true; + dmic_en = false; + config = readl(acp_data->acp63_base + ACP_PIN_CONFIG); switch (config) { case ACP_CONFIG_4: @@ -412,10 +419,18 @@ static int get_acp63_device_config(struct pci_dev *pci, struct acp63_dev_data *a if (!acpi_dev_get_property(pdm_dev, "acp-audio-device-type", ACPI_TYPE_INTEGER, &obj) && obj->integer.value == ACP_DMIC_DEV) - is_dmic_dev = true; + dmic_en = true; } + + handle = ACPI_HANDLE(&pci->dev); + ret = acpi_evaluate_integer(handle, "_WOV", NULL, &dmic_status); + if (!ACPI_FAILURE(ret)) + wov_en = dmic_status; } + if (dmic_en && wov_en) + is_dmic_dev = true; + if (acp_data->is_sdw_config) { ret = acp_scan_sdw_devices(&pci->dev, ACP63_SDW_ADDR); if (!ret && acp_data->info.link_mask) diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c index db57292c00ca..41042259f2b2 100644 --- a/sound/soc/intel/boards/sof_sdw.c +++ b/sound/soc/intel/boards/sof_sdw.c @@ -608,7 +608,7 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = { .callback = sof_sdw_quirk_cb, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "233C") + DMI_MATCH(DMI_PRODUCT_NAME, "21QB") }, /* Note this quirk excludes the CODEC mic */ .driver_data = (void *)(SOC_SDW_CODEC_MIC), @@ -617,9 +617,26 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = { .callback = sof_sdw_quirk_cb, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "233B") + DMI_MATCH(DMI_PRODUCT_NAME, "21QA") }, - .driver_data = (void *)(SOC_SDW_SIDECAR_AMPS), + /* Note this quirk excludes the CODEC mic */ + .driver_data = (void *)(SOC_SDW_CODEC_MIC), + }, + { + .callback = sof_sdw_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21Q6") + }, + .driver_data = (void *)(SOC_SDW_SIDECAR_AMPS | SOC_SDW_CODEC_MIC), + }, + { + .callback = sof_sdw_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21Q7") + }, + .driver_data = (void *)(SOC_SDW_SIDECAR_AMPS | SOC_SDW_CODEC_MIC), }, /* ArrowLake devices */ diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c index ac505c7ad342..82f46ecd9430 100644 --- a/sound/soc/sof/intel/hda-dai.c +++ b/sound/soc/sof/intel/hda-dai.c @@ -103,8 +103,10 @@ hda_dai_get_ops(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai return sdai->platform_private; } -int hda_link_dma_cleanup(struct snd_pcm_substream *substream, struct hdac_ext_stream *hext_stream, - struct snd_soc_dai *cpu_dai) +static int +hda_link_dma_cleanup(struct snd_pcm_substream *substream, + struct hdac_ext_stream *hext_stream, + struct snd_soc_dai *cpu_dai, bool release) { const struct hda_dai_widget_dma_ops *ops = hda_dai_get_ops(substream, cpu_dai); struct sof_intel_hda_stream *hda_stream; @@ -128,6 +130,17 @@ int hda_link_dma_cleanup(struct snd_pcm_substream *substream, struct hdac_ext_st snd_hdac_ext_bus_link_clear_stream_id(hlink, stream_tag); } + if (!release) { + /* + * Force stream reconfiguration without releasing the channel on + * subsequent stream restart (without free), including LinkDMA + * reset. + * The stream is released via hda_dai_hw_free() + */ + hext_stream->link_prepared = 0; + return 0; + } + if (ops->release_hext_stream) ops->release_hext_stream(sdev, cpu_dai, substream); @@ -211,7 +224,7 @@ static int __maybe_unused hda_dai_hw_free(struct snd_pcm_substream *substream, if (!hext_stream) return 0; - return hda_link_dma_cleanup(substream, hext_stream, cpu_dai); + return hda_link_dma_cleanup(substream, hext_stream, cpu_dai, true); } static int __maybe_unused hda_dai_hw_params_data(struct snd_pcm_substream *substream, @@ -304,7 +317,8 @@ static int __maybe_unused hda_dai_trigger(struct snd_pcm_substream *substream, i switch (cmd) { case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: - ret = hda_link_dma_cleanup(substream, hext_stream, dai); + ret = hda_link_dma_cleanup(substream, hext_stream, dai, + cmd == SNDRV_PCM_TRIGGER_STOP ? false : true); if (ret < 0) { dev_err(sdev->dev, "%s: failed to clean up link DMA\n", __func__); return ret; @@ -656,8 +670,7 @@ static int hda_dai_suspend(struct hdac_bus *bus) } ret = hda_link_dma_cleanup(hext_stream->link_substream, - hext_stream, - cpu_dai); + hext_stream, cpu_dai, true); if (ret < 0) return ret; } diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h index b74a472435b5..4a4a0b55f0bc 100644 --- a/sound/soc/sof/intel/hda.h +++ b/sound/soc/sof/intel/hda.h @@ -1028,8 +1028,6 @@ const struct hda_dai_widget_dma_ops * hda_select_dai_widget_ops(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget); int hda_dai_config(struct snd_soc_dapm_widget *w, unsigned int flags, struct snd_sof_dai_config_data *data); -int hda_link_dma_cleanup(struct snd_pcm_substream *substream, struct hdac_ext_stream *hext_stream, - struct snd_soc_dai *cpu_dai); static inline struct snd_sof_dev *widget_to_sdev(struct snd_soc_dapm_widget *w) { diff --git a/tools/include/uapi/linux/stddef.h b/tools/include/uapi/linux/stddef.h index bb6ea517efb5..c53cde425406 100644 --- a/tools/include/uapi/linux/stddef.h +++ b/tools/include/uapi/linux/stddef.h @@ -8,6 +8,13 @@ #define __always_inline __inline__ #endif +/* Not all C++ standards support type declarations inside an anonymous union */ +#ifndef __cplusplus +#define __struct_group_tag(TAG) TAG +#else +#define __struct_group_tag(TAG) +#endif + /** * __struct_group() - Create a mirrored named and anonyomous struct * @@ -20,14 +27,14 @@ * and size: one anonymous and one named. The former's members can be used * normally without sub-struct naming, and the latter can be used to * reason about the start, end, and size of the group of struct members. - * The named struct can also be explicitly tagged for layer reuse, as well - * as both having struct attributes appended. + * The named struct can also be explicitly tagged for layer reuse (C only), + * as well as both having struct attributes appended. */ #define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \ union { \ struct { MEMBERS } ATTRS; \ - struct TAG { MEMBERS } ATTRS NAME; \ - } + struct __struct_group_tag(TAG) { MEMBERS } ATTRS NAME; \ + } ATTRS /** * __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union diff --git a/tools/objtool/noreturns.h b/tools/objtool/noreturns.h index e7da92489167..f98dc0e1c99c 100644 --- a/tools/objtool/noreturns.h +++ b/tools/objtool/noreturns.h @@ -20,6 +20,7 @@ NORETURN(__x64_sys_exit_group) NORETURN(arch_cpu_idle_dead) NORETURN(bch2_trans_in_restart_error) NORETURN(bch2_trans_restart_error) +NORETURN(bch2_trans_unlocked_error) NORETURN(cpu_bringup_and_idle) NORETURN(cpu_startup_entry) NORETURN(do_exit) diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index 8f36c9de7591..dfd817d0348c 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -149,7 +149,7 @@ int ringbuf_release_uninit_dynptr(void *ctx) /* A dynptr can't be used after it has been invalidated */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #3") +__failure __msg("Expected an initialized dynptr as arg #2") int use_after_invalid(void *ctx) { struct bpf_dynptr ptr; @@ -428,7 +428,7 @@ int invalid_helper2(void *ctx) /* A bpf_dynptr is invalidated if it's been written into */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #1") +__failure __msg("Expected an initialized dynptr as arg #0") int invalid_write1(void *ctx) { struct bpf_dynptr ptr; @@ -1407,7 +1407,7 @@ int invalid_slice_rdwr_rdonly(struct __sk_buff *skb) /* bpf_dynptr_adjust can only be called on initialized dynptrs */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #1") +__failure __msg("Expected an initialized dynptr as arg #0") int dynptr_adjust_invalid(void *ctx) { struct bpf_dynptr ptr = {}; @@ -1420,7 +1420,7 @@ int dynptr_adjust_invalid(void *ctx) /* bpf_dynptr_is_null can only be called on initialized dynptrs */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #1") +__failure __msg("Expected an initialized dynptr as arg #0") int dynptr_is_null_invalid(void *ctx) { struct bpf_dynptr ptr = {}; @@ -1433,7 +1433,7 @@ int dynptr_is_null_invalid(void *ctx) /* bpf_dynptr_is_rdonly can only be called on initialized dynptrs */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #1") +__failure __msg("Expected an initialized dynptr as arg #0") int dynptr_is_rdonly_invalid(void *ctx) { struct bpf_dynptr ptr = {}; @@ -1446,7 +1446,7 @@ int dynptr_is_rdonly_invalid(void *ctx) /* bpf_dynptr_size can only be called on initialized dynptrs */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #1") +__failure __msg("Expected an initialized dynptr as arg #0") int dynptr_size_invalid(void *ctx) { struct bpf_dynptr ptr = {}; @@ -1459,7 +1459,7 @@ int dynptr_size_invalid(void *ctx) /* Only initialized dynptrs can be cloned */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #1") +__failure __msg("Expected an initialized dynptr as arg #0") int clone_invalid1(void *ctx) { struct bpf_dynptr ptr1 = {}; @@ -1493,7 +1493,7 @@ int clone_invalid2(struct xdp_md *xdp) /* Invalidating a dynptr should invalidate its clones */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #3") +__failure __msg("Expected an initialized dynptr as arg #2") int clone_invalidate1(void *ctx) { struct bpf_dynptr clone; @@ -1514,7 +1514,7 @@ int clone_invalidate1(void *ctx) /* Invalidating a dynptr should invalidate its parent */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #3") +__failure __msg("Expected an initialized dynptr as arg #2") int clone_invalidate2(void *ctx) { struct bpf_dynptr ptr; @@ -1535,7 +1535,7 @@ int clone_invalidate2(void *ctx) /* Invalidating a dynptr should invalidate its siblings */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #3") +__failure __msg("Expected an initialized dynptr as arg #2") int clone_invalidate3(void *ctx) { struct bpf_dynptr ptr; @@ -1723,7 +1723,7 @@ __noinline long global_call_bpf_dynptr(const struct bpf_dynptr *dynptr) } SEC("?raw_tp") -__failure __msg("arg#1 expected pointer to stack or const struct bpf_dynptr") +__failure __msg("arg#0 expected pointer to stack or const struct bpf_dynptr") int test_dynptr_reg_type(void *ctx) { struct task_struct *current = NULL; diff --git a/tools/testing/selftests/bpf/progs/iters_state_safety.c b/tools/testing/selftests/bpf/progs/iters_state_safety.c index d47e59aba6de..f41257eadbb2 100644 --- a/tools/testing/selftests/bpf/progs/iters_state_safety.c +++ b/tools/testing/selftests/bpf/progs/iters_state_safety.c @@ -73,7 +73,7 @@ int create_and_forget_to_destroy_fail(void *ctx) } SEC("?raw_tp") -__failure __msg("expected an initialized iter_num as arg #1") +__failure __msg("expected an initialized iter_num as arg #0") int destroy_without_creating_fail(void *ctx) { /* init with zeros to stop verifier complaining about uninit stack */ @@ -91,7 +91,7 @@ int destroy_without_creating_fail(void *ctx) } SEC("?raw_tp") -__failure __msg("expected an initialized iter_num as arg #1") +__failure __msg("expected an initialized iter_num as arg #0") int compromise_iter_w_direct_write_fail(void *ctx) { struct bpf_iter_num iter; @@ -143,7 +143,7 @@ int compromise_iter_w_direct_write_and_skip_destroy_fail(void *ctx) } SEC("?raw_tp") -__failure __msg("expected an initialized iter_num as arg #1") +__failure __msg("expected an initialized iter_num as arg #0") int compromise_iter_w_helper_write_fail(void *ctx) { struct bpf_iter_num iter; @@ -230,7 +230,7 @@ int valid_stack_reuse(void *ctx) } SEC("?raw_tp") -__failure __msg("expected uninitialized iter_num as arg #1") +__failure __msg("expected uninitialized iter_num as arg #0") int double_create_fail(void *ctx) { struct bpf_iter_num iter; @@ -258,7 +258,7 @@ int double_create_fail(void *ctx) } SEC("?raw_tp") -__failure __msg("expected an initialized iter_num as arg #1") +__failure __msg("expected an initialized iter_num as arg #0") int double_destroy_fail(void *ctx) { struct bpf_iter_num iter; @@ -284,7 +284,7 @@ int double_destroy_fail(void *ctx) } SEC("?raw_tp") -__failure __msg("expected an initialized iter_num as arg #1") +__failure __msg("expected an initialized iter_num as arg #0") int next_without_new_fail(void *ctx) { struct bpf_iter_num iter; @@ -305,7 +305,7 @@ int next_without_new_fail(void *ctx) } SEC("?raw_tp") -__failure __msg("expected an initialized iter_num as arg #1") +__failure __msg("expected an initialized iter_num as arg #0") int next_after_destroy_fail(void *ctx) { struct bpf_iter_num iter; diff --git a/tools/testing/selftests/bpf/progs/iters_testmod_seq.c b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c index 4a176e6aede8..6543d5b6e0a9 100644 --- a/tools/testing/selftests/bpf/progs/iters_testmod_seq.c +++ b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c @@ -79,7 +79,7 @@ int testmod_seq_truncated(const void *ctx) SEC("?raw_tp") __failure -__msg("expected an initialized iter_testmod_seq as arg #2") +__msg("expected an initialized iter_testmod_seq as arg #1") int testmod_seq_getter_before_bad(const void *ctx) { struct bpf_iter_testmod_seq it; @@ -89,7 +89,7 @@ int testmod_seq_getter_before_bad(const void *ctx) SEC("?raw_tp") __failure -__msg("expected an initialized iter_testmod_seq as arg #2") +__msg("expected an initialized iter_testmod_seq as arg #1") int testmod_seq_getter_after_bad(const void *ctx) { struct bpf_iter_testmod_seq it; diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c index e68667aec6a6..cd4d752bd089 100644 --- a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c +++ b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c @@ -45,7 +45,7 @@ int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size) } SEC("?lsm.s/bpf") -__failure __msg("arg#1 expected pointer to stack or const struct bpf_dynptr") +__failure __msg("arg#0 expected pointer to stack or const struct bpf_dynptr") int BPF_PROG(not_ptr_to_stack, int cmd, union bpf_attr *attr, unsigned int size) { unsigned long val = 0; diff --git a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c index a7a6ae6c162f..8bcddadfc4da 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c +++ b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c @@ -32,7 +32,7 @@ int BPF_PROG(no_destroy, struct bpf_iter_meta *meta, struct cgroup *cgrp) SEC("iter/cgroup") __description("uninitialized iter in ->next()") -__failure __msg("expected an initialized iter_bits as arg #1") +__failure __msg("expected an initialized iter_bits as arg #0") int BPF_PROG(next_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp) { struct bpf_iter_bits it = {}; @@ -43,7 +43,7 @@ int BPF_PROG(next_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp) SEC("iter/cgroup") __description("uninitialized iter in ->destroy()") -__failure __msg("expected an initialized iter_bits as arg #1") +__failure __msg("expected an initialized iter_bits as arg #0") int BPF_PROG(destroy_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp) { struct bpf_iter_bits it = {}; diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c index 2d742fdac6b9..81943c6254e6 100644 --- a/tools/testing/selftests/bpf/trace_helpers.c +++ b/tools/testing/selftests/bpf/trace_helpers.c @@ -293,6 +293,10 @@ static int procmap_query(int fd, const void *addr, __u32 query_flags, size_t *st return 0; } #else +# ifndef PROCMAP_QUERY_VMA_EXECUTABLE +# define PROCMAP_QUERY_VMA_EXECUTABLE 0x04 +# endif + static int procmap_query(int fd, const void *addr, __u32 query_flags, size_t *start, size_t *offset, int *flags) { return -EOPNOTSUPP; diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c index ae55cd791283..2cc3ffcbc983 100644 --- a/tools/tracing/rtla/src/timerlat_hist.c +++ b/tools/tracing/rtla/src/timerlat_hist.c @@ -280,6 +280,21 @@ static void timerlat_hist_header(struct osnoise_tool *tool) trace_seq_reset(s); } +/* + * format_summary_value - format a line of summary value (min, max or avg) + * of hist data + */ +static void format_summary_value(struct trace_seq *seq, + int count, + unsigned long long val, + bool avg) +{ + if (count) + trace_seq_printf(seq, "%9llu ", avg ? val / count : val); + else + trace_seq_printf(seq, "%9c ", '-'); +} + /* * timerlat_print_summary - print the summary of the hist data to the output */ @@ -327,29 +342,23 @@ timerlat_print_summary(struct timerlat_hist_params *params, if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count) continue; - if (!params->no_irq) { - if (data->hist[cpu].irq_count) - trace_seq_printf(trace->seq, "%9llu ", - data->hist[cpu].min_irq); - else - trace_seq_printf(trace->seq, " - "); - } + if (!params->no_irq) + format_summary_value(trace->seq, + data->hist[cpu].irq_count, + data->hist[cpu].min_irq, + false); - if (!params->no_thread) { - if (data->hist[cpu].thread_count) - trace_seq_printf(trace->seq, "%9llu ", - data->hist[cpu].min_thread); - else - trace_seq_printf(trace->seq, " - "); - } + if (!params->no_thread) + format_summary_value(trace->seq, + data->hist[cpu].thread_count, + data->hist[cpu].min_thread, + false); - if (params->user_hist) { - if (data->hist[cpu].user_count) - trace_seq_printf(trace->seq, "%9llu ", - data->hist[cpu].min_user); - else - trace_seq_printf(trace->seq, " - "); - } + if (params->user_hist) + format_summary_value(trace->seq, + data->hist[cpu].user_count, + data->hist[cpu].min_user, + false); } trace_seq_printf(trace->seq, "\n"); @@ -363,29 +372,23 @@ timerlat_print_summary(struct timerlat_hist_params *params, if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count) continue; - if (!params->no_irq) { - if (data->hist[cpu].irq_count) - trace_seq_printf(trace->seq, "%9llu ", - data->hist[cpu].sum_irq / data->hist[cpu].irq_count); - else - trace_seq_printf(trace->seq, " - "); - } + if (!params->no_irq) + format_summary_value(trace->seq, + data->hist[cpu].irq_count, + data->hist[cpu].sum_irq, + true); - if (!params->no_thread) { - if (data->hist[cpu].thread_count) - trace_seq_printf(trace->seq, "%9llu ", - data->hist[cpu].sum_thread / data->hist[cpu].thread_count); - else - trace_seq_printf(trace->seq, " - "); - } + if (!params->no_thread) + format_summary_value(trace->seq, + data->hist[cpu].thread_count, + data->hist[cpu].sum_thread, + true); - if (params->user_hist) { - if (data->hist[cpu].user_count) - trace_seq_printf(trace->seq, "%9llu ", - data->hist[cpu].sum_user / data->hist[cpu].user_count); - else - trace_seq_printf(trace->seq, " - "); - } + if (params->user_hist) + format_summary_value(trace->seq, + data->hist[cpu].user_count, + data->hist[cpu].sum_user, + true); } trace_seq_printf(trace->seq, "\n"); @@ -399,29 +402,23 @@ timerlat_print_summary(struct timerlat_hist_params *params, if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count) continue; - if (!params->no_irq) { - if (data->hist[cpu].irq_count) - trace_seq_printf(trace->seq, "%9llu ", - data->hist[cpu].max_irq); - else - trace_seq_printf(trace->seq, " - "); - } + if (!params->no_irq) + format_summary_value(trace->seq, + data->hist[cpu].irq_count, + data->hist[cpu].max_irq, + false); - if (!params->no_thread) { - if (data->hist[cpu].thread_count) - trace_seq_printf(trace->seq, "%9llu ", - data->hist[cpu].max_thread); - else - trace_seq_printf(trace->seq, " - "); - } + if (!params->no_thread) + format_summary_value(trace->seq, + data->hist[cpu].thread_count, + data->hist[cpu].max_thread, + false); - if (params->user_hist) { - if (data->hist[cpu].user_count) - trace_seq_printf(trace->seq, "%9llu ", - data->hist[cpu].max_user); - else - trace_seq_printf(trace->seq, " - "); - } + if (params->user_hist) + format_summary_value(trace->seq, + data->hist[cpu].user_count, + data->hist[cpu].max_user, + false); } trace_seq_printf(trace->seq, "\n"); trace_seq_do_printf(trace->seq); @@ -505,16 +502,22 @@ timerlat_print_stats_all(struct timerlat_hist_params *params, trace_seq_printf(trace->seq, "min: "); if (!params->no_irq) - trace_seq_printf(trace->seq, "%9llu ", - sum.min_irq); + format_summary_value(trace->seq, + sum.irq_count, + sum.min_irq, + false); if (!params->no_thread) - trace_seq_printf(trace->seq, "%9llu ", - sum.min_thread); + format_summary_value(trace->seq, + sum.thread_count, + sum.min_thread, + false); if (params->user_hist) - trace_seq_printf(trace->seq, "%9llu ", - sum.min_user); + format_summary_value(trace->seq, + sum.user_count, + sum.min_user, + false); trace_seq_printf(trace->seq, "\n"); @@ -522,16 +525,22 @@ timerlat_print_stats_all(struct timerlat_hist_params *params, trace_seq_printf(trace->seq, "avg: "); if (!params->no_irq) - trace_seq_printf(trace->seq, "%9llu ", - sum.sum_irq / sum.irq_count); + format_summary_value(trace->seq, + sum.irq_count, + sum.sum_irq, + true); if (!params->no_thread) - trace_seq_printf(trace->seq, "%9llu ", - sum.sum_thread / sum.thread_count); + format_summary_value(trace->seq, + sum.thread_count, + sum.sum_thread, + true); if (params->user_hist) - trace_seq_printf(trace->seq, "%9llu ", - sum.sum_user / sum.user_count); + format_summary_value(trace->seq, + sum.user_count, + sum.sum_user, + true); trace_seq_printf(trace->seq, "\n"); @@ -539,16 +548,22 @@ timerlat_print_stats_all(struct timerlat_hist_params *params, trace_seq_printf(trace->seq, "max: "); if (!params->no_irq) - trace_seq_printf(trace->seq, "%9llu ", - sum.max_irq); + format_summary_value(trace->seq, + sum.irq_count, + sum.max_irq, + false); if (!params->no_thread) - trace_seq_printf(trace->seq, "%9llu ", - sum.max_thread); + format_summary_value(trace->seq, + sum.thread_count, + sum.max_thread, + false); if (params->user_hist) - trace_seq_printf(trace->seq, "%9llu ", - sum.max_user); + format_summary_value(trace->seq, + sum.user_count, + sum.max_user, + false); trace_seq_printf(trace->seq, "\n"); trace_seq_do_printf(trace->seq);