hv: bugfix: skip invalid ffs64 return value

ffs64() returns INVALID_BIT_INDEX (0xffffU) when it tries to deal with
zero input value. This may happen In calculate_logical_dest_mask() when
the guest tries to write some illegal destination IDs to MSI config
registers of a pt-device. The ffs64() return value is used as per_cpu
array index, and it would cause a page fault.

This patch adds protection to the per_cpu array, making this function
return zero on illegal value. As in logical destination's definition, a
zero logical designation would point to no CPU.

Fixes: 1334349f8
Tracked-On: #8454
Signed-off-by: Wu Zhou <wu.zhou@intel.com>
Reviewed-by: Junjie Mao <junjie.mao@intel.com>
This commit is contained in:
Wu Zhou 2023-07-14 11:36:29 +08:00 committed by acrnsi-robot
parent b38003b870
commit a9860fad05

View File

@ -53,30 +53,32 @@ static struct acrn_vcpu *is_single_destination(struct acrn_vm *vm, const struct
static uint32_t calculate_logical_dest_mask(uint64_t pdmask) static uint32_t calculate_logical_dest_mask(uint64_t pdmask)
{ {
uint32_t dest_cluster_id, cluster_id, logical_id_mask = 0U; uint32_t dest_cluster_id = 0U, cluster_id, logical_id_mask = 0U;
uint64_t pcpu_mask = pdmask; uint64_t pcpu_mask = pdmask;
uint16_t pcpu_id; uint16_t pcpu_id;
/* Guest using Guests working in xAPIC mode may use 'Flat Model' to select an
* arbitrary list of CPUs. But as the HW is woring in x2APIC mode and can only
* use 'Cluster Model', destination mask can only be assigned to pCPUs within
* one Cluster. So some pCPUs may not be included.
* Here we use the first Cluster of all the requested pCPUs.
*/
pcpu_id = ffs64(pcpu_mask); pcpu_id = ffs64(pcpu_mask);
dest_cluster_id = per_cpu(lapic_ldr, pcpu_id) & X2APIC_LDR_CLUSTER_ID_MASK; if (pcpu_id < MAX_PCPU_NUM) {
while (pcpu_id < MAX_PCPU_NUM) { /* Guests working in xAPIC mode may use 'Flat Model' to select an
bitmap_clear_nolock(pcpu_id, &pcpu_mask); * arbitrary list of CPUs. But as the HW is woring in x2APIC mode and can only
cluster_id = per_cpu(lapic_ldr, pcpu_id) & X2APIC_LDR_CLUSTER_ID_MASK; * use 'Cluster Model', destination mask can only be assigned to pCPUs within
if (cluster_id == dest_cluster_id) { * one Cluster. So some pCPUs may not be included.
logical_id_mask |= (per_cpu(lapic_ldr, pcpu_id) & X2APIC_LDR_LOGICAL_ID_MASK); * Here we use the first Cluster of all the requested pCPUs.
} else { */
pr_warn("The cluster ID of pCPU %d is %d which differs from that (%d) of " dest_cluster_id = per_cpu(lapic_ldr, pcpu_id) & X2APIC_LDR_CLUSTER_ID_MASK;
"the previous cores in the guest logical destination.\n" do {
"Ignore that pCPU in the logical destination for physical interrupts.", bitmap_clear_nolock(pcpu_id, &pcpu_mask);
pcpu_id, cluster_id >> 16U, dest_cluster_id >> 16U); cluster_id = per_cpu(lapic_ldr, pcpu_id) & X2APIC_LDR_CLUSTER_ID_MASK;
} if (cluster_id == dest_cluster_id) {
pcpu_id = ffs64(pcpu_mask); logical_id_mask |= (per_cpu(lapic_ldr, pcpu_id) & X2APIC_LDR_LOGICAL_ID_MASK);
} else {
pr_warn("The cluster ID of pCPU %d is %d which differs from that (%d) of "
"the previous cores in the guest logical destination.\n"
"Ignore that pCPU in the logical destination for physical interrupts.",
pcpu_id, cluster_id >> 16U, dest_cluster_id >> 16U);
}
pcpu_id = ffs64(pcpu_mask);
} while (pcpu_id < MAX_PCPU_NUM);
} }
return (dest_cluster_id | logical_id_mask); return (dest_cluster_id | logical_id_mask);