| From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| Date: Thu, 22 Mar 2018 16:22:40 +0100 |
| Subject: [PATCH 08/10] iommu/amd: Drop the lock while allocating new irq remap |
| table |
| |
| Upstream commit 993ca6e063a69a0c65ca42ed449b6bc1b3844151 |
| |
| The irq_remap_table is allocated while the iommu_table_lock is held with |
| interrupts disabled. |
| >From looking at the call sites, all callers are in the early device |
| initialisation (apic_bsp_setup(), pci_enable_device(), |
| pci_enable_msi()) so make sense to drop the lock which also enables |
| interrupts and try to allocate that memory with GFP_KERNEL instead |
| GFP_ATOMIC. |
| |
| Since during the allocation the iommu_table_lock is dropped, we need to |
| recheck if table exists after the lock has been reacquired. I *think* |
| that it is impossible that the "devid" entry appears in irq_lookup_table |
| while the lock is dropped since the same device can only be probed once. |
| However I check for both cases, just to be sure. |
| |
| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| Signed-off-by: Joerg Roedel <jroedel@suse.de> |
| --- |
| drivers/iommu/amd_iommu.c | 63 ++++++++++++++++++++++++++++++++-------------- |
| 1 file changed, 45 insertions(+), 18 deletions(-) |
| |
| --- a/drivers/iommu/amd_iommu.c |
| +++ b/drivers/iommu/amd_iommu.c |
| @@ -3603,6 +3603,30 @@ static struct irq_remap_table *get_irq_t |
| return table; |
| } |
| |
| +static struct irq_remap_table *__alloc_irq_table(void) |
| +{ |
| + struct irq_remap_table *table; |
| + |
| + table = kzalloc(sizeof(*table), GFP_KERNEL); |
| + if (!table) |
| + return NULL; |
| + |
| + table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL); |
| + if (!table->table) { |
| + kfree(table); |
| + return NULL; |
| + } |
| + raw_spin_lock_init(&table->lock); |
| + |
| + if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) |
| + memset(table->table, 0, |
| + MAX_IRQS_PER_TABLE * sizeof(u32)); |
| + else |
| + memset(table->table, 0, |
| + (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2))); |
| + return table; |
| +} |
| + |
| static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid, |
| struct irq_remap_table *table) |
| { |
| @@ -3614,6 +3638,7 @@ static void set_remap_table_entry(struct |
| static struct irq_remap_table *alloc_irq_table(u16 devid) |
| { |
| struct irq_remap_table *table = NULL; |
| + struct irq_remap_table *new_table = NULL; |
| struct amd_iommu *iommu; |
| unsigned long flags; |
| u16 alias; |
| @@ -3632,42 +3657,44 @@ static struct irq_remap_table *alloc_irq |
| table = irq_lookup_table[alias]; |
| if (table) { |
| set_remap_table_entry(iommu, devid, table); |
| - goto out; |
| + goto out_wait; |
| } |
| + spin_unlock_irqrestore(&iommu_table_lock, flags); |
| |
| /* Nothing there yet, allocate new irq remapping table */ |
| - table = kzalloc(sizeof(*table), GFP_ATOMIC); |
| - if (!table) |
| - goto out_unlock; |
| + new_table = __alloc_irq_table(); |
| + if (!new_table) |
| + return NULL; |
| |
| - /* Initialize table spin-lock */ |
| - raw_spin_lock_init(&table->lock); |
| + spin_lock_irqsave(&iommu_table_lock, flags); |
| |
| - table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC); |
| - if (!table->table) { |
| - kfree(table); |
| - table = NULL; |
| + table = irq_lookup_table[devid]; |
| + if (table) |
| goto out_unlock; |
| - } |
| |
| - if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) |
| - memset(table->table, 0, |
| - MAX_IRQS_PER_TABLE * sizeof(u32)); |
| - else |
| - memset(table->table, 0, |
| - (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2))); |
| + table = irq_lookup_table[alias]; |
| + if (table) { |
| + set_remap_table_entry(iommu, devid, table); |
| + goto out_wait; |
| + } |
| |
| + table = new_table; |
| + new_table = NULL; |
| |
| set_remap_table_entry(iommu, devid, table); |
| if (devid != alias) |
| set_remap_table_entry(iommu, alias, table); |
| |
| -out: |
| +out_wait: |
| iommu_completion_wait(iommu); |
| |
| out_unlock: |
| spin_unlock_irqrestore(&iommu_table_lock, flags); |
| |
| + if (new_table) { |
| + kmem_cache_free(amd_iommu_irq_cache, new_table->table); |
| + kfree(new_table); |
| + } |
| return table; |
| } |
| |