@@ -166,7 +166,7 @@ struct nwl_pcie {
166
166
int irq_intx ;
167
167
int irq_misc ;
168
168
struct nwl_msi msi ;
169
- struct irq_domain * legacy_irq_domain ;
169
+ struct irq_domain * intx_irq_domain ;
170
170
struct clk * clk ;
171
171
raw_spinlock_t leg_mask_lock ;
172
172
};
@@ -324,7 +324,7 @@ static void nwl_pcie_leg_handler(struct irq_desc *desc)
324
324
while ((status = nwl_bridge_readl (pcie , MSGF_LEG_STATUS ) &
325
325
MSGF_LEG_SR_MASKALL ) != 0 ) {
326
326
for_each_set_bit (bit , & status , PCI_NUM_INTX )
327
- generic_handle_domain_irq (pcie -> legacy_irq_domain , bit );
327
+ generic_handle_domain_irq (pcie -> intx_irq_domain , bit );
328
328
}
329
329
330
330
chained_irq_exit (chip , desc );
@@ -364,7 +364,7 @@ static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
364
364
chained_irq_exit (chip , desc );
365
365
}
366
366
367
- static void nwl_mask_leg_irq (struct irq_data * data )
367
+ static void nwl_mask_intx_irq (struct irq_data * data )
368
368
{
369
369
struct nwl_pcie * pcie = irq_data_get_irq_chip_data (data );
370
370
unsigned long flags ;
@@ -378,7 +378,7 @@ static void nwl_mask_leg_irq(struct irq_data *data)
378
378
raw_spin_unlock_irqrestore (& pcie -> leg_mask_lock , flags );
379
379
}
380
380
381
- static void nwl_unmask_leg_irq (struct irq_data * data )
381
+ static void nwl_unmask_intx_irq (struct irq_data * data )
382
382
{
383
383
struct nwl_pcie * pcie = irq_data_get_irq_chip_data (data );
384
384
unsigned long flags ;
@@ -392,26 +392,26 @@ static void nwl_unmask_leg_irq(struct irq_data *data)
392
392
raw_spin_unlock_irqrestore (& pcie -> leg_mask_lock , flags );
393
393
}
394
394
395
- static struct irq_chip nwl_leg_irq_chip = {
395
+ static struct irq_chip nwl_intx_irq_chip = {
396
396
.name = "nwl_pcie:legacy" ,
397
- .irq_enable = nwl_unmask_leg_irq ,
398
- .irq_disable = nwl_mask_leg_irq ,
399
- .irq_mask = nwl_mask_leg_irq ,
400
- .irq_unmask = nwl_unmask_leg_irq ,
397
+ .irq_enable = nwl_unmask_intx_irq ,
398
+ .irq_disable = nwl_mask_intx_irq ,
399
+ .irq_mask = nwl_mask_intx_irq ,
400
+ .irq_unmask = nwl_unmask_intx_irq ,
401
401
};
402
402
403
- static int nwl_legacy_map (struct irq_domain * domain , unsigned int irq ,
404
- irq_hw_number_t hwirq )
403
+ static int nwl_intx_map (struct irq_domain * domain , unsigned int irq ,
404
+ irq_hw_number_t hwirq )
405
405
{
406
- irq_set_chip_and_handler (irq , & nwl_leg_irq_chip , handle_level_irq );
406
+ irq_set_chip_and_handler (irq , & nwl_intx_irq_chip , handle_level_irq );
407
407
irq_set_chip_data (irq , domain -> host_data );
408
408
irq_set_status_flags (irq , IRQ_LEVEL );
409
409
410
410
return 0 ;
411
411
}
412
412
413
- static const struct irq_domain_ops legacy_domain_ops = {
414
- .map = nwl_legacy_map ,
413
+ static const struct irq_domain_ops intx_domain_ops = {
414
+ .map = nwl_intx_map ,
415
415
.xlate = pci_irqd_intx_xlate ,
416
416
};
417
417
@@ -525,20 +525,20 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
525
525
{
526
526
struct device * dev = pcie -> dev ;
527
527
struct device_node * node = dev -> of_node ;
528
- struct device_node * legacy_intc_node ;
528
+ struct device_node * intc_node ;
529
529
530
- legacy_intc_node = of_get_next_child (node , NULL );
531
- if (!legacy_intc_node ) {
530
+ intc_node = of_get_next_child (node , NULL );
531
+ if (!intc_node ) {
532
532
dev_err (dev , "No legacy intc node found\n" );
533
533
return - EINVAL ;
534
534
}
535
535
536
- pcie -> legacy_irq_domain = irq_domain_add_linear (legacy_intc_node ,
537
- PCI_NUM_INTX ,
538
- & legacy_domain_ops ,
539
- pcie );
540
- of_node_put (legacy_intc_node );
541
- if (!pcie -> legacy_irq_domain ) {
536
+ pcie -> intx_irq_domain = irq_domain_add_linear (intc_node ,
537
+ PCI_NUM_INTX ,
538
+ & intx_domain_ops ,
539
+ pcie );
540
+ of_node_put (intc_node );
541
+ if (!pcie -> intx_irq_domain ) {
542
542
dev_err (dev , "failed to create IRQ domain\n" );
543
543
return - ENOMEM ;
544
544
}
@@ -710,14 +710,14 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
710
710
/* Enable all misc interrupts */
711
711
nwl_bridge_writel (pcie , MSGF_MISC_SR_MASKALL , MSGF_MISC_MASK );
712
712
713
- /* Disable all legacy interrupts */
713
+ /* Disable all INTX interrupts */
714
714
nwl_bridge_writel (pcie , (u32 )~MSGF_LEG_SR_MASKALL , MSGF_LEG_MASK );
715
715
716
- /* Clear pending legacy interrupts */
716
+ /* Clear pending INTX interrupts */
717
717
nwl_bridge_writel (pcie , nwl_bridge_readl (pcie , MSGF_LEG_STATUS ) &
718
718
MSGF_LEG_SR_MASKALL , MSGF_LEG_STATUS );
719
719
720
- /* Enable all legacy interrupts */
720
+ /* Enable all INTX interrupts */
721
721
nwl_bridge_writel (pcie , MSGF_LEG_SR_MASKALL , MSGF_LEG_MASK );
722
722
723
723
/* Enable the bridge config interrupt */
0 commit comments