@@ -806,16 +806,27 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
806
806
{
807
807
#ifdef CONFIG_IRQ_REMAP
808
808
u32 status , i ;
809
+ u64 entry ;
809
810
810
811
if (!iommu -> ga_log )
811
812
return - EINVAL ;
812
813
813
- status = readl (iommu -> mmio_base + MMIO_STATUS_OFFSET );
814
-
815
814
/* Check if already running */
816
- if (status & (MMIO_STATUS_GALOG_RUN_MASK ))
815
+ status = readl (iommu -> mmio_base + MMIO_STATUS_OFFSET );
816
+ if (WARN_ON (status & (MMIO_STATUS_GALOG_RUN_MASK )))
817
817
return 0 ;
818
818
819
+ entry = iommu_virt_to_phys (iommu -> ga_log ) | GA_LOG_SIZE_512 ;
820
+ memcpy_toio (iommu -> mmio_base + MMIO_GA_LOG_BASE_OFFSET ,
821
+ & entry , sizeof (entry ));
822
+ entry = (iommu_virt_to_phys (iommu -> ga_log_tail ) &
823
+ (BIT_ULL (52 )- 1 )) & ~7ULL ;
824
+ memcpy_toio (iommu -> mmio_base + MMIO_GA_LOG_TAIL_OFFSET ,
825
+ & entry , sizeof (entry ));
826
+ writel (0x00 , iommu -> mmio_base + MMIO_GA_HEAD_OFFSET );
827
+ writel (0x00 , iommu -> mmio_base + MMIO_GA_TAIL_OFFSET );
828
+
829
+
819
830
iommu_feature_enable (iommu , CONTROL_GAINT_EN );
820
831
iommu_feature_enable (iommu , CONTROL_GALOG_EN );
821
832
@@ -825,7 +836,7 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
825
836
break ;
826
837
}
827
838
828
- if (i >= LOOP_TIMEOUT )
839
+ if (WARN_ON ( i >= LOOP_TIMEOUT ) )
829
840
return - EINVAL ;
830
841
#endif /* CONFIG_IRQ_REMAP */
831
842
return 0 ;
@@ -834,8 +845,6 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
834
845
static int iommu_init_ga_log (struct amd_iommu * iommu )
835
846
{
836
847
#ifdef CONFIG_IRQ_REMAP
837
- u64 entry ;
838
-
839
848
if (!AMD_IOMMU_GUEST_IR_VAPIC (amd_iommu_guest_ir ))
840
849
return 0 ;
841
850
@@ -849,16 +858,6 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
849
858
if (!iommu -> ga_log_tail )
850
859
goto err_out ;
851
860
852
- entry = iommu_virt_to_phys (iommu -> ga_log ) | GA_LOG_SIZE_512 ;
853
- memcpy_toio (iommu -> mmio_base + MMIO_GA_LOG_BASE_OFFSET ,
854
- & entry , sizeof (entry ));
855
- entry = (iommu_virt_to_phys (iommu -> ga_log_tail ) &
856
- (BIT_ULL (52 )- 1 )) & ~7ULL ;
857
- memcpy_toio (iommu -> mmio_base + MMIO_GA_LOG_TAIL_OFFSET ,
858
- & entry , sizeof (entry ));
859
- writel (0x00 , iommu -> mmio_base + MMIO_GA_HEAD_OFFSET );
860
- writel (0x00 , iommu -> mmio_base + MMIO_GA_TAIL_OFFSET );
861
-
862
861
return 0 ;
863
862
err_out :
864
863
free_ga_log (iommu );
@@ -1523,7 +1522,7 @@ static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1523
1522
}
1524
1523
1525
1524
/*
1526
- * This function clues the initialization function for one IOMMU
1525
+ * This function glues the initialization function for one IOMMU
1527
1526
* together and also allocates the command buffer and programs the
1528
1527
* hardware. It does NOT enable the IOMMU. This is done afterwards.
1529
1528
*/
@@ -2016,48 +2015,18 @@ union intcapxt {
2016
2015
};
2017
2016
} __attribute__ ((packed ));
2018
2017
2019
- /*
2020
- * There isn't really any need to mask/unmask at the irqchip level because
2021
- * the 64-bit INTCAPXT registers can be updated atomically without tearing
2022
- * when the affinity is being updated.
2023
- */
2024
- static void intcapxt_unmask_irq (struct irq_data * data )
2025
- {
2026
- }
2027
-
2028
- static void intcapxt_mask_irq (struct irq_data * data )
2029
- {
2030
- }
2031
2018
2032
2019
static struct irq_chip intcapxt_controller ;
2033
2020
2034
2021
static int intcapxt_irqdomain_activate (struct irq_domain * domain ,
2035
2022
struct irq_data * irqd , bool reserve )
2036
2023
{
2037
- struct amd_iommu * iommu = irqd -> chip_data ;
2038
- struct irq_cfg * cfg = irqd_cfg (irqd );
2039
- union intcapxt xt ;
2040
-
2041
- xt .capxt = 0ULL ;
2042
- xt .dest_mode_logical = apic -> dest_mode_logical ;
2043
- xt .vector = cfg -> vector ;
2044
- xt .destid_0_23 = cfg -> dest_apicid & GENMASK (23 , 0 );
2045
- xt .destid_24_31 = cfg -> dest_apicid >> 24 ;
2046
-
2047
- /**
2048
- * Current IOMMU implemtation uses the same IRQ for all
2049
- * 3 IOMMU interrupts.
2050
- */
2051
- writeq (xt .capxt , iommu -> mmio_base + MMIO_INTCAPXT_EVT_OFFSET );
2052
- writeq (xt .capxt , iommu -> mmio_base + MMIO_INTCAPXT_PPR_OFFSET );
2053
- writeq (xt .capxt , iommu -> mmio_base + MMIO_INTCAPXT_GALOG_OFFSET );
2054
2024
return 0 ;
2055
2025
}
2056
2026
2057
2027
static void intcapxt_irqdomain_deactivate (struct irq_domain * domain ,
2058
2028
struct irq_data * irqd )
2059
2029
{
2060
- intcapxt_mask_irq (irqd );
2061
2030
}
2062
2031
2063
2032
@@ -2091,6 +2060,38 @@ static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq
2091
2060
irq_domain_free_irqs_top (domain , virq , nr_irqs );
2092
2061
}
2093
2062
2063
+
2064
+ static void intcapxt_unmask_irq (struct irq_data * irqd )
2065
+ {
2066
+ struct amd_iommu * iommu = irqd -> chip_data ;
2067
+ struct irq_cfg * cfg = irqd_cfg (irqd );
2068
+ union intcapxt xt ;
2069
+
2070
+ xt .capxt = 0ULL ;
2071
+ xt .dest_mode_logical = apic -> dest_mode_logical ;
2072
+ xt .vector = cfg -> vector ;
2073
+ xt .destid_0_23 = cfg -> dest_apicid & GENMASK (23 , 0 );
2074
+ xt .destid_24_31 = cfg -> dest_apicid >> 24 ;
2075
+
2076
+ /**
2077
+ * Current IOMMU implementation uses the same IRQ for all
2078
+ * 3 IOMMU interrupts.
2079
+ */
2080
+ writeq (xt .capxt , iommu -> mmio_base + MMIO_INTCAPXT_EVT_OFFSET );
2081
+ writeq (xt .capxt , iommu -> mmio_base + MMIO_INTCAPXT_PPR_OFFSET );
2082
+ writeq (xt .capxt , iommu -> mmio_base + MMIO_INTCAPXT_GALOG_OFFSET );
2083
+ }
2084
+
2085
+ static void intcapxt_mask_irq (struct irq_data * irqd )
2086
+ {
2087
+ struct amd_iommu * iommu = irqd -> chip_data ;
2088
+
2089
+ writeq (0 , iommu -> mmio_base + MMIO_INTCAPXT_EVT_OFFSET );
2090
+ writeq (0 , iommu -> mmio_base + MMIO_INTCAPXT_PPR_OFFSET );
2091
+ writeq (0 , iommu -> mmio_base + MMIO_INTCAPXT_GALOG_OFFSET );
2092
+ }
2093
+
2094
+
2094
2095
static int intcapxt_set_affinity (struct irq_data * irqd ,
2095
2096
const struct cpumask * mask , bool force )
2096
2097
{
@@ -2100,8 +2101,12 @@ static int intcapxt_set_affinity(struct irq_data *irqd,
2100
2101
ret = parent -> chip -> irq_set_affinity (parent , mask , force );
2101
2102
if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE )
2102
2103
return ret ;
2104
+ return 0 ;
2105
+ }
2103
2106
2104
- return intcapxt_irqdomain_activate (irqd -> domain , irqd , false);
2107
+ static int intcapxt_set_wake (struct irq_data * irqd , unsigned int on )
2108
+ {
2109
+ return on ? - EOPNOTSUPP : 0 ;
2105
2110
}
2106
2111
2107
2112
static struct irq_chip intcapxt_controller = {
@@ -2111,7 +2116,8 @@ static struct irq_chip intcapxt_controller = {
2111
2116
.irq_ack = irq_chip_ack_parent ,
2112
2117
.irq_retrigger = irq_chip_retrigger_hierarchy ,
2113
2118
.irq_set_affinity = intcapxt_set_affinity ,
2114
- .flags = IRQCHIP_SKIP_SET_WAKE ,
2119
+ .irq_set_wake = intcapxt_set_wake ,
2120
+ .flags = IRQCHIP_MASK_ON_SUSPEND ,
2115
2121
};
2116
2122
2117
2123
static const struct irq_domain_ops intcapxt_domain_ops = {
@@ -2173,7 +2179,6 @@ static int iommu_setup_intcapxt(struct amd_iommu *iommu)
2173
2179
return ret ;
2174
2180
}
2175
2181
2176
- iommu_feature_enable (iommu , CONTROL_INTCAPXT_EN );
2177
2182
return 0 ;
2178
2183
}
2179
2184
@@ -2196,6 +2201,10 @@ static int iommu_init_irq(struct amd_iommu *iommu)
2196
2201
2197
2202
iommu -> int_enabled = true;
2198
2203
enable_faults :
2204
+
2205
+ if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE )
2206
+ iommu_feature_enable (iommu , CONTROL_INTCAPXT_EN );
2207
+
2199
2208
iommu_feature_enable (iommu , CONTROL_EVT_INT_EN );
2200
2209
2201
2210
if (iommu -> ppr_log != NULL )
0 commit comments