Merge pull request #3010 from TiejunChina/master-dev

update -rt to 4.14.34-rt27
This commit is contained in:
Rolf Neugebauer 2018-04-22 21:38:13 +01:00 committed by GitHub
commit c08fe57306
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
412 changed files with 1503 additions and 139 deletions

View File

@ -1,5 +1,5 @@
kernel:
image: linuxkit/kernel:4.14.29-rt
image: linuxkit/kernel:4.14.34-rt
cmdline: "console=tty0"
init:
- linuxkit/init:d0bf64f4cea42bea71e7d8f8832ba497bb822e89

View File

@ -222,7 +222,7 @@ $(eval $(call kernel,4.16.3,4.16.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,4.15.18,4.15.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,4.14.35,4.14.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,4.14.35,4.14.x,,-dbg))
$(eval $(call kernel,4.14.29,4.14.x,-rt,))
$(eval $(call kernel,4.14.34,4.14.x,-rt,))
$(eval $(call kernel,4.9.94,4.9.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,4.4.128,4.4.x,$(EXTRA),$(DEBUG)))
@ -230,7 +230,7 @@ else ifeq ($(ARCH),aarch64)
$(eval $(call kernel,4.16.3,4.16.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,4.15.18,4.15.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,4.14.35,4.14.x,$(EXTRA),$(DEBUG)))
$(eval $(call kernel,4.14.29,4.14.x,-rt,))
$(eval $(call kernel,4.14.34,4.14.x,-rt,))
else ifeq ($(ARCH),s390x)
$(eval $(call kernel,4.16.3,4.16.x,$(EXTRA),$(DEBUG)))

View File

@ -0,0 +1,38 @@
From: Christoffer Dall <christoffer.dall@linaro.org>
Date: Fri, 8 Sep 2017 07:07:13 -0700
Subject: [PATCH] KVM: arm/arm64: Remove redundant preemptible checks
Upstream commit 5a24575032971c5a9a4580417a791c427ebdb8e5
The __this_cpu_read() and __this_cpu_write() functions already implement
checks for the required preemption levels when using
CONFIG_DEBUG_PREEMPT which gives you nice error messages and such.
Therefore there is no need to explicitly check this using a BUG_ON() in
the code (which we don't do for other uses of per cpu variables either).
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
virt/kvm/arm/arm.c | 2 --
1 file changed, 2 deletions(-)
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -69,7 +69,6 @@ static DEFINE_PER_CPU(unsigned char, kvm
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
{
- BUG_ON(preemptible());
__this_cpu_write(kvm_arm_running_vcpu, vcpu);
}
@@ -79,7 +78,6 @@ static void kvm_arm_set_running_vcpu(str
*/
struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
{
- BUG_ON(preemptible());
return __this_cpu_read(kvm_arm_running_vcpu);
}

View File

@ -0,0 +1,170 @@
From: Scott Wood <swood@redhat.com>
Date: Sun, 21 Jan 2018 03:28:54 -0600
Subject: [PATCH 1/3] iommu/amd: Use raw locks on atomic context paths
Upstream commit 27790398c2aed917828dc3c6f81240d57f1584c9
Several functions in this driver are called from atomic context,
and thus raw locks must be used in order to be safe on PREEMPT_RT.
This includes paths that must wait for command completion, which is
a potential PREEMPT_RT latency concern but not easily avoidable.
Signed-off-by: Scott Wood <swood@redhat.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
drivers/iommu/amd_iommu.c | 30 +++++++++++++++---------------
drivers/iommu/amd_iommu_init.c | 2 +-
drivers/iommu/amd_iommu_types.h | 4 ++--
3 files changed, 18 insertions(+), 18 deletions(-)
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1054,9 +1054,9 @@ static int iommu_queue_command_sync(stru
unsigned long flags;
int ret;
- spin_lock_irqsave(&iommu->lock, flags);
+ raw_spin_lock_irqsave(&iommu->lock, flags);
ret = __iommu_queue_command_sync(iommu, cmd, sync);
- spin_unlock_irqrestore(&iommu->lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->lock, flags);
return ret;
}
@@ -1082,7 +1082,7 @@ static int iommu_completion_wait(struct
build_completion_wait(&cmd, (u64)&iommu->cmd_sem);
- spin_lock_irqsave(&iommu->lock, flags);
+ raw_spin_lock_irqsave(&iommu->lock, flags);
iommu->cmd_sem = 0;
@@ -1093,7 +1093,7 @@ static int iommu_completion_wait(struct
ret = wait_on_sem(&iommu->cmd_sem);
out_unlock:
- spin_unlock_irqrestore(&iommu->lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->lock, flags);
return ret;
}
@@ -3618,7 +3618,7 @@ static struct irq_remap_table *get_irq_t
goto out_unlock;
/* Initialize table spin-lock */
- spin_lock_init(&table->lock);
+ raw_spin_lock_init(&table->lock);
if (ioapic)
/* Keep the first 32 indexes free for IOAPIC interrupts */
@@ -3677,7 +3677,7 @@ static int alloc_irq_index(u16 devid, in
if (!table)
return -ENODEV;
- spin_lock_irqsave(&table->lock, flags);
+ raw_spin_lock_irqsave(&table->lock, flags);
/* Scan table for free entries */
for (c = 0, index = table->min_index;
@@ -3700,7 +3700,7 @@ static int alloc_irq_index(u16 devid, in
index = -ENOSPC;
out:
- spin_unlock_irqrestore(&table->lock, flags);
+ raw_spin_unlock_irqrestore(&table->lock, flags);
return index;
}
@@ -3721,7 +3721,7 @@ static int modify_irte_ga(u16 devid, int
if (!table)
return -ENOMEM;
- spin_lock_irqsave(&table->lock, flags);
+ raw_spin_lock_irqsave(&table->lock, flags);
entry = (struct irte_ga *)table->table;
entry = &entry[index];
@@ -3732,7 +3732,7 @@ static int modify_irte_ga(u16 devid, int
if (data)
data->ref = entry;
- spin_unlock_irqrestore(&table->lock, flags);
+ raw_spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
@@ -3754,9 +3754,9 @@ static int modify_irte(u16 devid, int in
if (!table)
return -ENOMEM;
- spin_lock_irqsave(&table->lock, flags);
+ raw_spin_lock_irqsave(&table->lock, flags);
table->table[index] = irte->val;
- spin_unlock_irqrestore(&table->lock, flags);
+ raw_spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
@@ -3778,9 +3778,9 @@ static void free_irte(u16 devid, int ind
if (!table)
return;
- spin_lock_irqsave(&table->lock, flags);
+ raw_spin_lock_irqsave(&table->lock, flags);
iommu->irte_ops->clear_allocated(table, index);
- spin_unlock_irqrestore(&table->lock, flags);
+ raw_spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
@@ -4359,7 +4359,7 @@ int amd_iommu_update_ga(int cpu, bool is
if (!irt)
return -ENODEV;
- spin_lock_irqsave(&irt->lock, flags);
+ raw_spin_lock_irqsave(&irt->lock, flags);
if (ref->lo.fields_vapic.guest_mode) {
if (cpu >= 0)
@@ -4368,7 +4368,7 @@ int amd_iommu_update_ga(int cpu, bool is
barrier();
}
- spin_unlock_irqrestore(&irt->lock, flags);
+ raw_spin_unlock_irqrestore(&irt->lock, flags);
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1474,7 +1474,7 @@ static int __init init_iommu_one(struct
{
int ret;
- spin_lock_init(&iommu->lock);
+ raw_spin_lock_init(&iommu->lock);
/* Add IOMMU to internal data structures */
list_add_tail(&iommu->list, &amd_iommu_list);
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -406,7 +406,7 @@ extern bool amd_iommu_iotlb_sup;
#define IRQ_TABLE_ALIGNMENT 128
struct irq_remap_table {
- spinlock_t lock;
+ raw_spinlock_t lock;
unsigned min_index;
u32 *table;
};
@@ -488,7 +488,7 @@ struct amd_iommu {
int index;
/* locks the accesses to the hardware */
- spinlock_t lock;
+ raw_spinlock_t lock;
/* Pointer to PCI device of this IOMMU */
struct pci_dev *dev;

View File

@ -0,0 +1,31 @@
From: Scott Wood <swood@redhat.com>
Date: Sun, 28 Jan 2018 14:22:19 -0600
Subject: [PATCH 2/3] iommu/amd: Don't use dev_data in irte_ga_set_affinity()
Upstream commit 01ee04badefd296eb7a4430497373be9b7b16783
search_dev_data() acquires a non-raw lock, which can't be done
from atomic context on PREEMPT_RT. There is no need to look at
dev_data because guest_mode should never be set if use_vapic is
not set.
Signed-off-by: Scott Wood <swood@redhat.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
drivers/iommu/amd_iommu.c | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3861,10 +3861,8 @@ static void irte_ga_set_affinity(void *e
u8 vector, u32 dest_apicid)
{
struct irte_ga *irte = (struct irte_ga *) entry;
- struct iommu_dev_data *dev_data = search_dev_data(devid);
- if (!dev_data || !dev_data->use_vapic ||
- !irte->lo.fields_remap.guest_mode) {
+ if (!irte->lo.fields_remap.guest_mode) {
irte->hi.fields.vector = vector;
irte->lo.fields_remap.destination = dest_apicid;
modify_irte_ga(devid, index, irte, NULL);

View File

@ -0,0 +1,116 @@
From: Scott Wood <swood@redhat.com>
Date: Wed, 14 Feb 2018 17:36:28 -0600
Subject: [PATCH 3/3] iommu/amd: Avoid locking get_irq_table() from atomic
context
Upstream commit df42a04b15f19a842393dc98a84cbc52b1f8ed49
get_irq_table() previously acquired amd_iommu_devtable_lock which is not
a raw lock, and thus cannot be acquired from atomic context on
PREEMPT_RT. Many calls to modify_irte*() come from atomic context due to
the IRQ desc->lock, as does amd_iommu_update_ga() due to the preemption
disabling in vcpu_load/put().
The only difference between calling get_irq_table() and reading from
irq_lookup_table[] directly, other than the lock acquisition and
amd_iommu_rlookup_table[] check, is if the table entry is unpopulated,
which should never happen when looking up a devid that came from an
irq_2_irte struct, as get_irq_table() would have already been called on
that devid during irq_remapping_alloc().
The lock acquisition is not needed in these cases because entries in
irq_lookup_table[] never change once non-NULL -- nor would the
amd_iommu_devtable_lock usage in get_irq_table() provide meaningful
protection if they did, since it's released before using the looked up
table in the get_irq_table() caller.
Rename the old get_irq_table() to alloc_irq_table(), and create a new
lockless get_irq_table() to be used in non-allocating contexts that WARNs
if it doesn't find what it's looking for.
Signed-off-by: Scott Wood <swood@redhat.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
drivers/iommu/amd_iommu.c | 29 ++++++++++++++++++++++-------
1 file changed, 22 insertions(+), 7 deletions(-)
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3586,7 +3586,22 @@ static void set_dte_irq_entry(u16 devid,
amd_iommu_dev_table[devid].data[2] = dte;
}
-static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
+static struct irq_remap_table *get_irq_table(u16 devid)
+{
+ struct irq_remap_table *table;
+
+ if (WARN_ONCE(!amd_iommu_rlookup_table[devid],
+ "%s: no iommu for devid %x\n", __func__, devid))
+ return NULL;
+
+ table = irq_lookup_table[devid];
+ if (WARN_ONCE(!table, "%s: no table for devid %x\n", __func__, devid))
+ return NULL;
+
+ return table;
+}
+
+static struct irq_remap_table *alloc_irq_table(u16 devid, bool ioapic)
{
struct irq_remap_table *table = NULL;
struct amd_iommu *iommu;
@@ -3673,7 +3688,7 @@ static int alloc_irq_index(u16 devid, in
if (!iommu)
return -ENODEV;
- table = get_irq_table(devid, false);
+ table = alloc_irq_table(devid, false);
if (!table)
return -ENODEV;
@@ -3717,7 +3732,7 @@ static int modify_irte_ga(u16 devid, int
if (iommu == NULL)
return -EINVAL;
- table = get_irq_table(devid, false);
+ table = get_irq_table(devid);
if (!table)
return -ENOMEM;
@@ -3750,7 +3765,7 @@ static int modify_irte(u16 devid, int in
if (iommu == NULL)
return -EINVAL;
- table = get_irq_table(devid, false);
+ table = get_irq_table(devid);
if (!table)
return -ENOMEM;
@@ -3774,7 +3789,7 @@ static void free_irte(u16 devid, int ind
if (iommu == NULL)
return;
- table = get_irq_table(devid, false);
+ table = get_irq_table(devid);
if (!table)
return;
@@ -4092,7 +4107,7 @@ static int irq_remapping_alloc(struct ir
return ret;
if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
- if (get_irq_table(devid, true))
+ if (alloc_irq_table(devid, true))
index = info->ioapic_pin;
else
ret = -ENOMEM;
@@ -4353,7 +4368,7 @@ int amd_iommu_update_ga(int cpu, bool is
if (!iommu)
return -ENODEV;
- irt = get_irq_table(devid, false);
+ irt = get_irq_table(devid);
if (!irt)
return -ENODEV;

View File

@ -0,0 +1,32 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 22 Mar 2018 16:22:33 +0100
Subject: [PATCH 01/10] iommu/amd: Take into account that alloc_dev_data() may
return NULL
Upstream commit 39ffe39545cd5cb5b8cee9f0469165cf24dc62c2
find_dev_data() does not check whether the return value alloc_dev_data()
is NULL. This was okay once because the pointer was returned once as-is.
Since commit df3f7a6e8e85 ("iommu/amd: Use is_attach_deferred
call-back") the pointer may be used within find_dev_data() so a NULL
check is required.
Cc: Baoquan He <bhe@redhat.com>
Fixes: df3f7a6e8e85 ("iommu/amd: Use is_attach_deferred call-back")
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
drivers/iommu/amd_iommu.c | 2 ++
1 file changed, 2 insertions(+)
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -311,6 +311,8 @@ static struct iommu_dev_data *find_dev_d
if (dev_data == NULL) {
dev_data = alloc_dev_data(devid);
+ if (!dev_data)
+ return NULL;
if (translation_pre_enabled(iommu))
dev_data->defer_attach = true;

View File

@ -0,0 +1,97 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 22 Mar 2018 16:22:34 +0100
Subject: [PATCH 02/10] iommu/amd: Turn dev_data_list into a lock less list
Upstream commit 779da73273fc4c4c6f41579a95e4fb7880a1720e
alloc_dev_data() adds new items to dev_data_list and search_dev_data()
is searching for items in this list. Both protect the access to the list
with a spinlock.
There is no need to navigate forth and back within the list and there is
also no deleting of a specific item. This qualifies the list to become a
lock less list and as part of this, the spinlock can be removed.
With this change the ordering of those items within the list is changed:
before the change new items were added to the end of the list, now they
are added to the front. I don't think it matters but wanted to mention
it.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
drivers/iommu/amd_iommu.c | 28 ++++++++++------------------
drivers/iommu/amd_iommu_types.h | 2 +-
2 files changed, 11 insertions(+), 19 deletions(-)
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -84,8 +84,7 @@
static DEFINE_RWLOCK(amd_iommu_devtable_lock);
/* List of all available dev_data structures */
-static LIST_HEAD(dev_data_list);
-static DEFINE_SPINLOCK(dev_data_list_lock);
+static LLIST_HEAD(dev_data_list);
LIST_HEAD(ioapic_map);
LIST_HEAD(hpet_map);
@@ -204,40 +203,33 @@ static struct dma_ops_domain* to_dma_ops
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
- unsigned long flags;
dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
if (!dev_data)
return NULL;
dev_data->devid = devid;
-
- spin_lock_irqsave(&dev_data_list_lock, flags);
- list_add_tail(&dev_data->dev_data_list, &dev_data_list);
- spin_unlock_irqrestore(&dev_data_list_lock, flags);
-
ratelimit_default_init(&dev_data->rs);
+ llist_add(&dev_data->dev_data_list, &dev_data_list);
return dev_data;
}
static struct iommu_dev_data *search_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
- unsigned long flags;
+ struct llist_node *node;
+
+ if (llist_empty(&dev_data_list))
+ return NULL;
- spin_lock_irqsave(&dev_data_list_lock, flags);
- list_for_each_entry(dev_data, &dev_data_list, dev_data_list) {
+ node = dev_data_list.first;
+ llist_for_each_entry(dev_data, node, dev_data_list) {
if (dev_data->devid == devid)
- goto out_unlock;
+ return dev_data;
}
- dev_data = NULL;
-
-out_unlock:
- spin_unlock_irqrestore(&dev_data_list_lock, flags);
-
- return dev_data;
+ return NULL;
}
static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -625,7 +625,7 @@ struct devid_map {
*/
struct iommu_dev_data {
struct list_head list; /* For domain->dev_list */
- struct list_head dev_data_list; /* For global dev_data_list */
+ struct llist_node dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */
u16 devid; /* PCI Device ID */
u16 alias; /* Alias Device ID */

View File

@ -0,0 +1,62 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 22 Mar 2018 16:22:35 +0100
Subject: [PATCH 03/10] iommu/amd: Split domain id out of
amd_iommu_devtable_lock
Upstream commit 2bc00180890427dcc092b2f2b0d03c904bcade29
domain_id_alloc() and domain_id_free() is used for id management. Those
two function share a bitmap (amd_iommu_pd_alloc_bitmap) and set/clear
bits based on id allocation. There is no need to share this with
amd_iommu_devtable_lock, it can use its own lock for this operation.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
drivers/iommu/amd_iommu.c | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -82,6 +82,7 @@
#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
static DEFINE_RWLOCK(amd_iommu_devtable_lock);
+static DEFINE_SPINLOCK(pd_bitmap_lock);
/* List of all available dev_data structures */
static LLIST_HEAD(dev_data_list);
@@ -1596,29 +1597,26 @@ static void del_domain_from_list(struct
static u16 domain_id_alloc(void)
{
- unsigned long flags;
int id;
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ spin_lock(&pd_bitmap_lock);
id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
BUG_ON(id == 0);
if (id > 0 && id < MAX_DOMAIN_ID)
__set_bit(id, amd_iommu_pd_alloc_bitmap);
else
id = 0;
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ spin_unlock(&pd_bitmap_lock);
return id;
}
static void domain_id_free(int id)
{
- unsigned long flags;
-
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ spin_lock(&pd_bitmap_lock);
if (id > 0 && id < MAX_DOMAIN_ID)
__clear_bit(id, amd_iommu_pd_alloc_bitmap);
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ spin_unlock(&pd_bitmap_lock);
}
#define DEFINE_FREE_PT_FN(LVL, FN) \

View File

@ -0,0 +1,50 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 22 Mar 2018 16:22:36 +0100
Subject: [PATCH 04/10] iommu/amd: Split irq_lookup_table out of the
amd_iommu_devtable_lock
Upstream commit ea6166f4b83e9cfba1c18f46a764d50045682fe5
The function get_irq_table() reads/writes irq_lookup_table while holding
the amd_iommu_devtable_lock. It also modifies
amd_iommu_dev_table[].data[2].
set_dte_entry() is using amd_iommu_dev_table[].data[0|1] (under the
domain->lock) so it should be okay. The access to the iommu is
serialized with its own (iommu's) lock.
So split out get_irq_table() out of amd_iommu_devtable_lock's lock.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
drivers/iommu/amd_iommu.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -83,6 +83,7 @@
static DEFINE_RWLOCK(amd_iommu_devtable_lock);
static DEFINE_SPINLOCK(pd_bitmap_lock);
+static DEFINE_SPINLOCK(iommu_table_lock);
/* List of all available dev_data structures */
static LLIST_HEAD(dev_data_list);
@@ -3600,7 +3601,7 @@ static struct irq_remap_table *alloc_irq
unsigned long flags;
u16 alias;
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ spin_lock_irqsave(&iommu_table_lock, flags);
iommu = amd_iommu_rlookup_table[devid];
if (!iommu)
@@ -3665,7 +3666,7 @@ static struct irq_remap_table *alloc_irq
iommu_completion_wait(iommu);
out_unlock:
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ spin_unlock_irqrestore(&iommu_table_lock, flags);
return table;
}

View File

@ -0,0 +1,94 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 22 Mar 2018 16:22:37 +0100
Subject: [PATCH 05/10] iommu/amd: Remove the special case from
alloc_irq_table()
Upstream commit fde65dd3d3096e8f6ecc7bbe544eb91f4220772c
alloc_irq_table() has a special ioapic argument. If set then it will
pre-allocate / reserve the first 32 indexes. The argument is only once
true and it would make alloc_irq_table() a little simpler if we would
extract the special bits to the caller.
The caller of irq_remapping_alloc() is holding irq_domain_mutex so the
initialization of iommu->irte_ops->set_allocated() should not race
against other user.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
drivers/iommu/amd_iommu.c | 34 ++++++++++++++++++++--------------
1 file changed, 20 insertions(+), 14 deletions(-)
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3594,7 +3594,7 @@ static struct irq_remap_table *get_irq_t
return table;
}
-static struct irq_remap_table *alloc_irq_table(u16 devid, bool ioapic)
+static struct irq_remap_table *alloc_irq_table(u16 devid)
{
struct irq_remap_table *table = NULL;
struct amd_iommu *iommu;
@@ -3628,10 +3628,6 @@ static struct irq_remap_table *alloc_irq
/* Initialize table spin-lock */
raw_spin_lock_init(&table->lock);
- if (ioapic)
- /* Keep the first 32 indexes free for IOAPIC interrupts */
- table->min_index = 32;
-
table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
if (!table->table) {
kfree(table);
@@ -3646,12 +3642,6 @@ static struct irq_remap_table *alloc_irq
memset(table->table, 0,
(MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
- if (ioapic) {
- int i;
-
- for (i = 0; i < 32; ++i)
- iommu->irte_ops->set_allocated(table, i);
- }
irq_lookup_table[devid] = table;
set_dte_irq_entry(devid, table);
@@ -3681,7 +3671,7 @@ static int alloc_irq_index(u16 devid, in
if (!iommu)
return -ENODEV;
- table = alloc_irq_table(devid, false);
+ table = alloc_irq_table(devid);
if (!table)
return -ENODEV;
@@ -4100,10 +4090,26 @@ static int irq_remapping_alloc(struct ir
return ret;
if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
- if (alloc_irq_table(devid, true))
+ struct irq_remap_table *table;
+ struct amd_iommu *iommu;
+
+ table = alloc_irq_table(devid);
+ if (table) {
+ if (!table->min_index) {
+ /*
+ * Keep the first 32 indexes free for IOAPIC
+ * interrupts.
+ */
+ table->min_index = 32;
+ iommu = amd_iommu_rlookup_table[devid];
+ for (i = 0; i < 32; ++i)
+ iommu->irte_ops->set_allocated(table, i);
+ }
+ WARN_ON(table->min_index != 32);
index = info->ioapic_pin;
- else
+ } else {
ret = -ENOMEM;
+ }
} else {
index = alloc_irq_index(devid, nr_irqs);
}

View File

@ -0,0 +1,52 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 22 Mar 2018 16:22:38 +0100
Subject: [PATCH 06/10] iommu/amd: Use `table' instead `irt' as variable name
in amd_iommu_update_ga()
Upstream commit 4fde541c9dc114c5b448ad34b0286fe8b7c550f1
The variable of type struct irq_remap_table is always named `table'
except in amd_iommu_update_ga() where it is called `irt'. Make it
consistent and name it also `table'.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
drivers/iommu/amd_iommu.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -4353,7 +4353,7 @@ int amd_iommu_update_ga(int cpu, bool is
{
unsigned long flags;
struct amd_iommu *iommu;
- struct irq_remap_table *irt;
+ struct irq_remap_table *table;
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
int devid = ir_data->irq_2_irte.devid;
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
@@ -4367,11 +4367,11 @@ int amd_iommu_update_ga(int cpu, bool is
if (!iommu)
return -ENODEV;
- irt = get_irq_table(devid);
- if (!irt)
+ table = get_irq_table(devid);
+ if (!table)
return -ENODEV;
- raw_spin_lock_irqsave(&irt->lock, flags);
+ raw_spin_lock_irqsave(&table->lock, flags);
if (ref->lo.fields_vapic.guest_mode) {
if (cpu >= 0)
@@ -4380,7 +4380,7 @@ int amd_iommu_update_ga(int cpu, bool is
barrier();
}
- raw_spin_unlock_irqrestore(&irt->lock, flags);
+ raw_spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);

View File

@ -0,0 +1,66 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 22 Mar 2018 16:22:39 +0100
Subject: [PATCH 07/10] iommu/amd: Factor out setting the remap table for a
devid
Upstream commit 2fcc1e8ac4a8514c64f946178fc36c2e30e56a41
Setting the IRQ remap table for a specific devid (or its alias devid)
includes three steps. Those three steps are always repeated each time
this is done.
Introduce a new helper function, move those steps there and use that
function instead. The compiler can still decide if it is worth to
inline.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
drivers/iommu/amd_iommu.c | 23 ++++++++++++-----------
1 file changed, 12 insertions(+), 11 deletions(-)
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3594,6 +3594,14 @@ static struct irq_remap_table *get_irq_t
return table;
}
+static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
+ struct irq_remap_table *table)
+{
+ irq_lookup_table[devid] = table;
+ set_dte_irq_entry(devid, table);
+ iommu_flush_dte(iommu, devid);
+}
+
static struct irq_remap_table *alloc_irq_table(u16 devid)
{
struct irq_remap_table *table = NULL;
@@ -3614,9 +3622,7 @@ static struct irq_remap_table *alloc_irq
alias = amd_iommu_alias_table[devid];
table = irq_lookup_table[alias];
if (table) {
- irq_lookup_table[devid] = table;
- set_dte_irq_entry(devid, table);
- iommu_flush_dte(iommu, devid);
+ set_remap_table_entry(iommu, devid, table);
goto out;
}
@@ -3643,14 +3649,9 @@ static struct irq_remap_table *alloc_irq
(MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
- irq_lookup_table[devid] = table;
- set_dte_irq_entry(devid, table);
- iommu_flush_dte(iommu, devid);
- if (devid != alias) {
- irq_lookup_table[alias] = table;
- set_dte_irq_entry(alias, table);
- iommu_flush_dte(iommu, alias);
- }
+ set_remap_table_entry(iommu, devid, table);
+ if (devid != alias)
+ set_remap_table_entry(iommu, alias, table);
out:
iommu_completion_wait(iommu);

View File

@ -0,0 +1,131 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 22 Mar 2018 16:22:40 +0100
Subject: [PATCH 08/10] iommu/amd: Drop the lock while allocating new irq remap
table
Upstream commit 993ca6e063a69a0c65ca42ed449b6bc1b3844151
The irq_remap_table is allocated while the iommu_table_lock is held with
interrupts disabled.
>From looking at the call sites, all callers are in the early device
initialisation (apic_bsp_setup(), pci_enable_device(),
pci_enable_msi()) so make sense to drop the lock which also enables
interrupts and try to allocate that memory with GFP_KERNEL instead
GFP_ATOMIC.
Since during the allocation the iommu_table_lock is dropped, we need to
recheck if table exists after the lock has been reacquired. I *think*
that it is impossible that the "devid" entry appears in irq_lookup_table
while the lock is dropped since the same device can only be probed once.
However I check for both cases, just to be sure.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
drivers/iommu/amd_iommu.c | 63 ++++++++++++++++++++++++++++++++--------------
1 file changed, 45 insertions(+), 18 deletions(-)
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3594,6 +3594,30 @@ static struct irq_remap_table *get_irq_t
return table;
}
+static struct irq_remap_table *__alloc_irq_table(void)
+{
+ struct irq_remap_table *table;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return NULL;
+
+ table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL);
+ if (!table->table) {
+ kfree(table);
+ return NULL;
+ }
+ raw_spin_lock_init(&table->lock);
+
+ if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
+ memset(table->table, 0,
+ MAX_IRQS_PER_TABLE * sizeof(u32));
+ else
+ memset(table->table, 0,
+ (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
+ return table;
+}
+
static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
struct irq_remap_table *table)
{
@@ -3605,6 +3629,7 @@ static void set_remap_table_entry(struct
static struct irq_remap_table *alloc_irq_table(u16 devid)
{
struct irq_remap_table *table = NULL;
+ struct irq_remap_table *new_table = NULL;
struct amd_iommu *iommu;
unsigned long flags;
u16 alias;
@@ -3623,42 +3648,44 @@ static struct irq_remap_table *alloc_irq
table = irq_lookup_table[alias];
if (table) {
set_remap_table_entry(iommu, devid, table);
- goto out;
+ goto out_wait;
}
+ spin_unlock_irqrestore(&iommu_table_lock, flags);
/* Nothing there yet, allocate new irq remapping table */
- table = kzalloc(sizeof(*table), GFP_ATOMIC);
- if (!table)
- goto out_unlock;
+ new_table = __alloc_irq_table();
+ if (!new_table)
+ return NULL;
- /* Initialize table spin-lock */
- raw_spin_lock_init(&table->lock);
+ spin_lock_irqsave(&iommu_table_lock, flags);
- table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
- if (!table->table) {
- kfree(table);
- table = NULL;
+ table = irq_lookup_table[devid];
+ if (table)
goto out_unlock;
- }
- if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
- memset(table->table, 0,
- MAX_IRQS_PER_TABLE * sizeof(u32));
- else
- memset(table->table, 0,
- (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
+ table = irq_lookup_table[alias];
+ if (table) {
+ set_remap_table_entry(iommu, devid, table);
+ goto out_wait;
+ }
+ table = new_table;
+ new_table = NULL;
set_remap_table_entry(iommu, devid, table);
if (devid != alias)
set_remap_table_entry(iommu, alias, table);
-out:
+out_wait:
iommu_completion_wait(iommu);
out_unlock:
spin_unlock_irqrestore(&iommu_table_lock, flags);
+ if (new_table) {
+ kmem_cache_free(amd_iommu_irq_cache, new_table->table);
+ kfree(new_table);
+ }
return table;
}

View File

@ -0,0 +1,73 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 22 Mar 2018 16:22:41 +0100
Subject: [PATCH 09/10] iommu/amd: Make amd_iommu_devtable_lock a spin_lock
Upstream commit 2cd1083d79a0a8c223af430ca97884c28a1e2fc0
Before commit 0bb6e243d7fb ("iommu/amd: Support IOMMU_DOMAIN_DMA type
allocation") amd_iommu_devtable_lock had a read_lock() user but now
there are none. In fact, after the mentioned commit we had only
write_lock() user of the lock. Since there is no reason to keep it as
writer lock, change its type to a spin_lock.
I *think* that we might even be able to remove the lock because all its
current user seem to have their own protection.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
drivers/iommu/amd_iommu.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -81,7 +81,7 @@
*/
#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
-static DEFINE_RWLOCK(amd_iommu_devtable_lock);
+static DEFINE_SPINLOCK(amd_iommu_devtable_lock);
static DEFINE_SPINLOCK(pd_bitmap_lock);
static DEFINE_SPINLOCK(iommu_table_lock);
@@ -2086,9 +2086,9 @@ static int attach_device(struct device *
}
skip_ats_check:
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ spin_lock_irqsave(&amd_iommu_devtable_lock, flags);
ret = __attach_device(dev_data, domain);
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
/*
* We might boot into a crash-kernel here. The crashed kernel
@@ -2138,9 +2138,9 @@ static void detach_device(struct device
domain = dev_data->domain;
/* lock device table */
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ spin_lock_irqsave(&amd_iommu_devtable_lock, flags);
__detach_device(dev_data);
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
if (!dev_is_pci(dev))
return;
@@ -2804,7 +2804,7 @@ static void cleanup_domain(struct protec
struct iommu_dev_data *entry;
unsigned long flags;
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ spin_lock_irqsave(&amd_iommu_devtable_lock, flags);
while (!list_empty(&domain->dev_list)) {
entry = list_first_entry(&domain->dev_list,
@@ -2812,7 +2812,7 @@ static void cleanup_domain(struct protec
__detach_device(entry);
}
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
}
static void protection_domain_free(struct protection_domain *domain)

View File

@ -0,0 +1,40 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 22 Mar 2018 16:22:42 +0100
Subject: [PATCH 10/10] iommu/amd: Return proper error code in
irq_remapping_alloc()
Upstream commit 29d049be9438278c47253a74cf8d0ddf36bd5d68
In the unlikely case when alloc_irq_table() is not able to return a
remap table then "ret" will be assigned with an error code. Later, the
code checks `index' and if it is negative (which it is because it is
initialized with `-1') and then then function properly aborts but
returns `-1' instead `-ENOMEM' what was intended.
In order to correct this, I assign -ENOMEM to index.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
drivers/iommu/amd_iommu.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -4094,7 +4094,7 @@ static int irq_remapping_alloc(struct ir
struct amd_ir_data *data = NULL;
struct irq_cfg *cfg;
int i, ret, devid;
- int index = -1;
+ int index;
if (!info)
return -EINVAL;
@@ -4136,7 +4136,7 @@ static int irq_remapping_alloc(struct ir
WARN_ON(table->min_index != 32);
index = info->ioapic_pin;
} else {
- ret = -ENOMEM;
+ index = -ENOMEM;
}
} else {
index = alloc_irq_index(devid, nr_irqs);

View File

@ -1,65 +0,0 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 29 Nov 2017 16:32:20 +0100
Subject: [PATCH] tty: goldfish: Enable 'earlycon' only if built-in
Commit 3840ed9548f7 ("tty: goldfish: Implement support for kernel
'earlycon' parameter") breaks an allmodconfig config on x86:
| LD vmlinux.o
| MODPOST vmlinux.o
|drivers/tty/serial/earlycon.o: In function `parse_options':
|drivers/tty/serial/earlycon.c:97: undefined reference to `uart_parse_earlycon'
|Makefile:1005: recipe for target 'vmlinux' failed
earlycon.c::parse_options() invokes uart_parse_earlycon() from serial_core.c
which is compiled=m because GOLDFISH_TTY itself (and most others) are =m.
To avoid that, I'm adding the _CONSOLE config option which is selected if the
GOLDFISH module itself is =y since it doesn't need the early bits for the =m
case (other drivers do the same dance).
The alternative would be to move uart_parse_earlycon() from
serial_core.c to earlycon.c (we don't have that many users of that
function).
Fixes: 3840ed9548f7 ("tty: goldfish: Implement support for kernel
'earlycon' parameter")
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/tty/Kconfig | 6 +++++-
drivers/tty/goldfish.c | 2 ++
2 files changed, 7 insertions(+), 1 deletion(-)
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -394,10 +394,14 @@ config GOLDFISH_TTY
depends on GOLDFISH
select SERIAL_CORE
select SERIAL_CORE_CONSOLE
- select SERIAL_EARLYCON
help
Console and system TTY driver for the Goldfish virtual platform.
+config GOLDFISH_TTY_EARLY_CONSOLE
+ bool
+ default y if GOLDFISH_TTY=y
+ select SERIAL_EARLYCON
+
config DA_TTY
bool "DA TTY"
depends on METAG_DA
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -442,6 +442,7 @@ static int goldfish_tty_remove(struct pl
return 0;
}
+#ifdef CONFIG_GOLDFISH_TTY_EARLY_CONSOLE
static void gf_early_console_putchar(struct uart_port *port, int ch)
{
__raw_writel(ch, port->membase);
@@ -465,6 +466,7 @@ static int __init gf_earlycon_setup(stru
}
OF_EARLYCON_DECLARE(early_gf_tty, "google,goldfish-tty", gf_earlycon_setup);
+#endif
static const struct of_device_id goldfish_tty_of_match[] = {
{ .compatible = "google,goldfish-tty", },

View File

@ -192,7 +192,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
warn++;
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -623,92 +623,6 @@ void traceprobe_free_probe_arg(struct pr
@@ -621,92 +621,6 @@ void traceprobe_free_probe_arg(struct pr
kfree(arg->comm);
}
@ -297,7 +297,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Reserved field names */
@@ -356,12 +355,6 @@ extern void traceprobe_free_probe_arg(st
extern int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset);
extern int traceprobe_split_symbol_offset(char *symbol, long *offset);
-extern ssize_t traceprobe_probes_write(struct file *file,
- const char __user *buffer, size_t count, loff_t *ppos,

Some files were not shown because too many files have changed in this diff Show More