A subsequent patch will be issuing SIC from KVM -- export the necessary routine and make the operation control definitions available from a header. Because the routine will now be exported, let's swap the purpose of zpci_set_irq_ctrl and __zpci_set_irq_ctrl, leaving the latter as a static within pci_irq.c only for SIC calls that don't specify an iib. Signed-off-by: Matthew Rosato <mjrosato@xxxxxxxxxxxxx> --- arch/s390/include/asm/pci_insn.h | 17 +++++++++-------- arch/s390/pci/pci_insn.c | 3 ++- arch/s390/pci/pci_irq.c | 28 ++++++++++++++-------------- 3 files changed, 25 insertions(+), 23 deletions(-) diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h index 61cf9531f68f..5331082fa516 100644 --- a/arch/s390/include/asm/pci_insn.h +++ b/arch/s390/include/asm/pci_insn.h @@ -98,6 +98,14 @@ struct zpci_fib { u32 gd; } __packed __aligned(8); +/* Set Interruption Controls Operation Controls */ +#define SIC_IRQ_MODE_ALL 0 +#define SIC_IRQ_MODE_SINGLE 1 +#define SIC_IRQ_MODE_DIRECT 4 +#define SIC_IRQ_MODE_D_ALL 16 +#define SIC_IRQ_MODE_D_SINGLE 17 +#define SIC_IRQ_MODE_SET_CPU 18 + /* directed interruption information block */ struct zpci_diib { u32 : 1; @@ -134,13 +142,6 @@ int __zpci_store(u64 data, u64 req, u64 offset); int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len); int __zpci_store_block(const u64 *data, u64 req, u64 offset); void zpci_barrier(void); -int __zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib); - -static inline int zpci_set_irq_ctrl(u16 ctl, u8 isc) -{ - union zpci_sic_iib iib = {{0}}; - - return __zpci_set_irq_ctrl(ctl, isc, &iib); -} +int zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib); #endif diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c index 28d863aaafea..d1a8bd43ce26 100644 --- a/arch/s390/pci/pci_insn.c +++ b/arch/s390/pci/pci_insn.c @@ -97,7 +97,7 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range) } /* Set Interruption Controls */ -int __zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib) +int zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib) { if (!test_facility(72)) return -EIO; @@ -108,6 +108,7 @@ int __zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib) return 0; } +EXPORT_SYMBOL_GPL(zpci_set_irq_ctrl); /* PCI Load */ static inline int ____pcilg(u64 *data, u64 req, u64 offset, u8 *status) diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c index dfd4f3276a6d..6b29e39496d1 100644 --- a/arch/s390/pci/pci_irq.c +++ b/arch/s390/pci/pci_irq.c @@ -15,13 +15,6 @@ static enum {FLOATING, DIRECTED} irq_delivery; -#define SIC_IRQ_MODE_ALL 0 -#define SIC_IRQ_MODE_SINGLE 1 -#define SIC_IRQ_MODE_DIRECT 4 -#define SIC_IRQ_MODE_D_ALL 16 -#define SIC_IRQ_MODE_D_SINGLE 17 -#define SIC_IRQ_MODE_SET_CPU 18 - /* * summary bit vector * FLOATING - summary bit per function @@ -145,6 +138,13 @@ static int zpci_set_irq_affinity(struct irq_data *data, const struct cpumask *de return IRQ_SET_MASK_OK; } +static inline int __zpci_set_irq_ctrl(u16 ctl, u8 isc) +{ + union zpci_sic_iib iib = {{0}}; + + return zpci_set_irq_ctrl(ctl, isc, &iib); +} + static struct irq_chip zpci_irq_chip = { .name = "PCI-MSI", .irq_unmask = pci_msi_unmask_irq, @@ -165,7 +165,7 @@ static void zpci_handle_cpu_local_irq(bool rescan) /* End of second scan with interrupts on. */ break; /* First scan complete, reenable interrupts. */ - if (zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC)) + if (__zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC)) break; bit = 0; continue; @@ -203,7 +203,7 @@ static void zpci_handle_fallback_irq(void) /* End of second scan with interrupts on. */ break; /* First scan complete, reenable interrupts. */ - if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC)) + if (__zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC)) break; cpu = 0; continue; @@ -247,7 +247,7 @@ static void zpci_floating_irq_handler(struct airq_struct *airq, /* End of second scan with interrupts on. */ break; /* First scan complete, reenable interrupts. */ - if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC)) + if (__zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC)) break; si = 0; continue; @@ -412,8 +412,8 @@ static void __init cpu_enable_directed_irq(void *unused) iib.cdiib.dibv_addr = (u64) zpci_ibv[smp_processor_id()]->vector; - __zpci_set_irq_ctrl(SIC_IRQ_MODE_SET_CPU, 0, &iib); - zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC); + zpci_set_irq_ctrl(SIC_IRQ_MODE_SET_CPU, 0, &iib); + __zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC); } static int __init zpci_directed_irq_init(void) @@ -428,7 +428,7 @@ static int __init zpci_directed_irq_init(void) iib.diib.isc = PCI_ISC; iib.diib.nr_cpus = num_possible_cpus(); iib.diib.disb_addr = (u64) zpci_sbv->vector; - __zpci_set_irq_ctrl(SIC_IRQ_MODE_DIRECT, 0, &iib); + zpci_set_irq_ctrl(SIC_IRQ_MODE_DIRECT, 0, &iib); zpci_ibv = kcalloc(num_possible_cpus(), sizeof(*zpci_ibv), GFP_KERNEL); @@ -504,7 +504,7 @@ int __init zpci_irq_init(void) * Enable floating IRQs (with suppression after one IRQ). When using * directed IRQs this enables the fallback path. */ - zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC); + __zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC); return 0; out_airq: -- 2.27.0