mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-21 13:08:42 +00:00
dm: protect pthread_cond_wait() against spurious wakeups
Users of pthread_cond_wait() should take care of spurious wakeups and it is usually used in conjunction with a predicate. Not doing so can result in unintended behavior. For example: virtio_net_tx_thread(): entry -> pthread_cond_wait() -> spurious wakeup -> vq_clear_used_ring_flags() -> segfault (vq->used uninitialized) tpm_crb_request_deliver(): entry -> pthread_cond_wait() -> spurious wakeup -> swtpm_handle_request() called needlessly virtio_rnd_get_entropy(): entry -> pthread_cond_wait() -> spurious wakeup -> no avail ring processing -> virtio_rnd_notify() skips pthread_cond_signal() due to rnd->in_progress -> vq_endchains() called needlessly -> wait in pthread_cond_wait() indefinitely Fix these uses of pthread_cond_wait() by using predicates. The only use case without a clear predicate is the tx thread in virtio-mei, because it works with two-dimensional linked lists. v1 -> v2: - fix bugs and comments - reduce code redundancy Tracked-On: #2763 Signed-off-by: Peter Fang <peter.fang@intel.com> Reviewed-by: Shuo A Liu <shuo.a.liu@intel.com>
This commit is contained in:
parent
e9261121b3
commit
f412d52546
@ -163,29 +163,39 @@ virtio_coreu_thread(void *param)
|
||||
struct coreu_msg *msg;
|
||||
|
||||
for (;;) {
|
||||
ret = 0;
|
||||
pthread_mutex_lock(&vcoreu->rx_mtx);
|
||||
ret = pthread_cond_wait(&vcoreu->rx_cond, &vcoreu->rx_mtx);
|
||||
|
||||
/*
|
||||
* Checking the avail ring here serves two purposes:
|
||||
* - avoid vring processing due to spurious wakeups
|
||||
* - catch missing notifications before acquiring rx_mtx
|
||||
*/
|
||||
while (!ret && !vq_has_descs(rvq))
|
||||
ret = pthread_cond_wait(&vcoreu->rx_cond, &vcoreu->rx_mtx);
|
||||
|
||||
pthread_mutex_unlock(&vcoreu->rx_mtx);
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
while(vq_has_descs(rvq)) {
|
||||
vq_getchain(rvq, &idx, &iov, 1, NULL);
|
||||
do {
|
||||
ret = vq_getchain(rvq, &idx, &iov, 1, NULL);
|
||||
assert(ret > 0);
|
||||
|
||||
msg = (struct coreu_msg *)(iov.iov_base);
|
||||
|
||||
ret = send_and_receive(vcoreu->fd, msg);
|
||||
if (ret < 0)
|
||||
{
|
||||
if (ret < 0) {
|
||||
close(vcoreu->fd);
|
||||
vcoreu->fd = -1;
|
||||
}
|
||||
|
||||
/* release this chain and handle more */
|
||||
vq_relchain(rvq, idx, sizeof(struct coreu_msg));
|
||||
}
|
||||
} while (vq_has_descs(rvq));
|
||||
|
||||
/* at least one avail ring element has been processed */
|
||||
vq_endchains(rvq, 1);
|
||||
}
|
||||
|
||||
|
@ -306,28 +306,37 @@ virtio_hdcp_talk_to_daemon(void *param)
|
||||
for (;;) {
|
||||
pthread_mutex_lock(&vhdcp->rx_mtx);
|
||||
vhdcp->in_progress = 0;
|
||||
ret = pthread_cond_wait(&vhdcp->rx_cond, &vhdcp->rx_mtx);
|
||||
assert(ret == 0);
|
||||
|
||||
/*
|
||||
* Checking the avail ring here serves two purposes:
|
||||
* - avoid vring processing due to spurious wakeups
|
||||
* - catch missing notifications before acquiring rx_mtx
|
||||
*/
|
||||
while (!vq_has_descs(rvq)) {
|
||||
ret = pthread_cond_wait(&vhdcp->rx_cond, &vhdcp->rx_mtx);
|
||||
assert(ret == 0);
|
||||
}
|
||||
|
||||
vhdcp->in_progress = 1;
|
||||
pthread_mutex_unlock(&vhdcp->rx_mtx);
|
||||
|
||||
while(vq_has_descs(rvq)) {
|
||||
vq_getchain(rvq, &idx, &iov, 1, NULL);
|
||||
do {
|
||||
ret = vq_getchain(rvq, &idx, &iov, 1, NULL);
|
||||
assert(ret > 0);
|
||||
|
||||
msg = (struct SocketData*)(iov.iov_base);
|
||||
|
||||
ret = performMessageTransaction(vhdcp->fd, *msg);
|
||||
if (ret < 0)
|
||||
{
|
||||
if (ret < 0) {
|
||||
close(vhdcp->fd);
|
||||
vhdcp->fd = -1;
|
||||
}
|
||||
|
||||
/* release this chain and handle more */
|
||||
vq_relchain(rvq, idx, sizeof(struct SocketData));
|
||||
}
|
||||
} while (vq_has_descs(rvq));
|
||||
|
||||
/* at least one avail ring element has been processed */
|
||||
vq_endchains(rvq, 1);
|
||||
}
|
||||
}
|
||||
|
@ -1556,11 +1556,10 @@ out:
|
||||
vq_relchain(vq, idx, tlen);
|
||||
DPRINTF("TX: release OUT-vq idx[%d]\n", idx);
|
||||
|
||||
if (vmei->rx_need_sched) {
|
||||
pthread_mutex_lock(&vmei->rx_mutex);
|
||||
pthread_mutex_lock(&vmei->rx_mutex);
|
||||
if (vmei->rx_need_sched)
|
||||
pthread_cond_signal(&vmei->rx_cond);
|
||||
pthread_mutex_unlock(&vmei->rx_mutex);
|
||||
}
|
||||
pthread_mutex_unlock(&vmei->rx_mutex);
|
||||
|
||||
return;
|
||||
|
||||
@ -1617,20 +1616,34 @@ static void *vmei_tx_thread(void *param)
|
||||
{
|
||||
struct virtio_mei *vmei = param;
|
||||
struct timespec max_wait = {0, 0};
|
||||
int err;
|
||||
int err, pending_cnt = 0;
|
||||
|
||||
pthread_mutex_lock(&vmei->tx_mutex);
|
||||
err = pthread_cond_wait(&vmei->tx_cond, &vmei->tx_mutex);
|
||||
assert(err == 0);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
while (vmei->status != VMEI_STST_DEINIT) {
|
||||
struct vmei_me_client *me;
|
||||
struct vmei_host_client *e;
|
||||
ssize_t len;
|
||||
int pending_cnt = 0;
|
||||
int send_ready = 0;
|
||||
int send_ready;
|
||||
|
||||
if (pending_cnt == 0) {
|
||||
err = pthread_cond_wait(&vmei->tx_cond,
|
||||
&vmei->tx_mutex);
|
||||
assert(err == 0);
|
||||
if (err)
|
||||
goto out;
|
||||
} else {
|
||||
max_wait.tv_sec = time(NULL) + 2;
|
||||
max_wait.tv_nsec = 0;
|
||||
err = pthread_cond_timedwait(&vmei->tx_cond,
|
||||
&vmei->tx_mutex,
|
||||
&max_wait);
|
||||
assert(err == 0 || err == ETIMEDOUT);
|
||||
if (err && err != ETIMEDOUT)
|
||||
goto out;
|
||||
|
||||
pending_cnt = 0;
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&vmei->list_mutex);
|
||||
LIST_FOREACH(me, &vmei->active_clients, list) {
|
||||
@ -1660,21 +1673,8 @@ static void *vmei_tx_thread(void *param)
|
||||
}
|
||||
unlock:
|
||||
pthread_mutex_unlock(&vmei->list_mutex);
|
||||
|
||||
if (pending_cnt == 0) {
|
||||
err = pthread_cond_wait(&vmei->tx_cond,
|
||||
&vmei->tx_mutex);
|
||||
} else {
|
||||
max_wait.tv_sec = time(NULL) + 2;
|
||||
max_wait.tv_nsec = 0;
|
||||
err = pthread_cond_timedwait(&vmei->tx_cond,
|
||||
&vmei->tx_mutex,
|
||||
&max_wait);
|
||||
}
|
||||
|
||||
if (vmei->status == VMEI_STST_DEINIT)
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
pthread_mutex_unlock(&vmei->tx_mutex);
|
||||
pthread_exit(NULL);
|
||||
@ -1869,33 +1869,33 @@ found:
|
||||
/*
|
||||
* Thread which will handle processing of RX desc
|
||||
*/
|
||||
|
||||
static void *vmei_rx_thread(void *param)
|
||||
{
|
||||
struct virtio_mei *vmei = param;
|
||||
struct virtio_vq_info *vq;
|
||||
struct virtio_vq_info *vq = &vmei->vqs[VMEI_RXQ];
|
||||
int err;
|
||||
|
||||
vq = &vmei->vqs[VMEI_RXQ];
|
||||
|
||||
/*
|
||||
* Let us wait till the rx queue pointers get initialised &
|
||||
* first tx signaled
|
||||
*/
|
||||
pthread_mutex_lock(&vmei->rx_mutex);
|
||||
err = pthread_cond_wait(&vmei->rx_cond, &vmei->rx_mutex);
|
||||
assert(err == 0);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
while (vmei->status != VMEI_STST_DEINIT && !vq_ring_ready(vq)) {
|
||||
err = pthread_cond_wait(&vmei->rx_cond, &vmei->rx_mutex);
|
||||
assert(err == 0);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
while (vmei->status != VMEI_STST_DEINIT) {
|
||||
/* note - rx mutex is locked here */
|
||||
while (vq_ring_ready(vq)) {
|
||||
while (!vmei->rx_need_sched || !vq_has_descs(vq)) {
|
||||
vq_clear_used_ring_flags(&vmei->base, vq);
|
||||
mb();
|
||||
if (vq_has_descs(vq) &&
|
||||
vmei->rx_need_sched &&
|
||||
vmei->status != VMEI_STS_RESET)
|
||||
if (vmei->rx_need_sched &&
|
||||
vmei->status != VMEI_STS_RESET &&
|
||||
vq_has_descs(vq))
|
||||
break;
|
||||
|
||||
err = pthread_cond_wait(&vmei->rx_cond,
|
||||
@ -1904,15 +1904,17 @@ static void *vmei_rx_thread(void *param)
|
||||
if (err || vmei->status == VMEI_STST_DEINIT)
|
||||
goto out;
|
||||
}
|
||||
|
||||
vq->used->flags |= VRING_USED_F_NO_NOTIFY;
|
||||
|
||||
do {
|
||||
vmei->rx_need_sched = vmei_proc_rx(vmei, vq);
|
||||
} while (vq_has_descs(vq) && vmei->rx_need_sched);
|
||||
} while (vmei->rx_need_sched && vq_has_descs(vq));
|
||||
|
||||
if (!vq_has_descs(vq))
|
||||
vq_endchains(vq, 1);
|
||||
/* at least one avail ring element has been processed */
|
||||
vq_endchains(vq, !vq_has_descs(vq));
|
||||
}
|
||||
|
||||
out:
|
||||
pthread_mutex_unlock(&vmei->rx_mutex);
|
||||
pthread_exit(NULL);
|
||||
|
@ -536,18 +536,20 @@ static void *
|
||||
virtio_net_tx_thread(void *param)
|
||||
{
|
||||
struct virtio_net *net = param;
|
||||
struct virtio_vq_info *vq;
|
||||
struct virtio_vq_info *vq = &net->queues[VIRTIO_NET_TXQ];
|
||||
int error;
|
||||
|
||||
vq = &net->queues[VIRTIO_NET_TXQ];
|
||||
|
||||
/*
|
||||
* Let us wait till the tx queue pointers get initialised &
|
||||
* first tx signaled
|
||||
*/
|
||||
pthread_mutex_lock(&net->tx_mtx);
|
||||
error = pthread_cond_wait(&net->tx_cond, &net->tx_mtx);
|
||||
assert(error == 0);
|
||||
|
||||
while (!net->closing && !vq_ring_ready(vq)) {
|
||||
error = pthread_cond_wait(&net->tx_cond, &net->tx_mtx);
|
||||
assert(error == 0);
|
||||
}
|
||||
|
||||
if (net->closing) {
|
||||
WPRINTF(("vtnet tx thread closing...\n"));
|
||||
pthread_mutex_unlock(&net->tx_mtx);
|
||||
@ -556,6 +558,13 @@ virtio_net_tx_thread(void *param)
|
||||
|
||||
for (;;) {
|
||||
/* note - tx mutex is locked here */
|
||||
net->tx_in_progress = 0;
|
||||
|
||||
/*
|
||||
* Checking the avail ring here serves two purposes:
|
||||
* - avoid vring processing due to spurious wakeups
|
||||
* - catch missing notifications before acquiring tx_mtx
|
||||
*/
|
||||
while (net->resetting || !vq_has_descs(vq)) {
|
||||
vq_clear_used_ring_flags(&net->base, vq);
|
||||
/* memory barrier */
|
||||
@ -563,7 +572,6 @@ virtio_net_tx_thread(void *param)
|
||||
if (!net->resetting && vq_has_descs(vq))
|
||||
break;
|
||||
|
||||
net->tx_in_progress = 0;
|
||||
error = pthread_cond_wait(&net->tx_cond, &net->tx_mtx);
|
||||
assert(error == 0);
|
||||
if (net->closing) {
|
||||
@ -572,6 +580,7 @@ virtio_net_tx_thread(void *param)
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
vq->used->flags |= VRING_USED_F_NO_NOTIFY;
|
||||
net->tx_in_progress = 1;
|
||||
pthread_mutex_unlock(&net->tx_mtx);
|
||||
|
@ -304,27 +304,38 @@ virtio_rnd_get_entropy(void *param)
|
||||
struct virtio_vq_info *vq = &rnd->vq;
|
||||
struct iovec iov;
|
||||
uint16_t idx;
|
||||
int len, error;
|
||||
int ret;
|
||||
ssize_t len;
|
||||
|
||||
for (;;) {
|
||||
pthread_mutex_lock(&rnd->rx_mtx);
|
||||
rnd->in_progress = 0;
|
||||
error = pthread_cond_wait(&rnd->rx_cond, &rnd->rx_mtx);
|
||||
assert(error == 0);
|
||||
|
||||
/*
|
||||
* Checking the avail ring here serves two purposes:
|
||||
* - avoid vring processing due to spurious wakeups
|
||||
* - catch missing notifications before acquiring rx_mtx
|
||||
*/
|
||||
while (!vq_has_descs(vq)) {
|
||||
ret = pthread_cond_wait(&rnd->rx_cond, &rnd->rx_mtx);
|
||||
assert(ret == 0);
|
||||
}
|
||||
|
||||
rnd->in_progress = 1;
|
||||
pthread_mutex_unlock(&rnd->rx_mtx);
|
||||
|
||||
while(vq_has_descs(vq)) {
|
||||
vq_getchain(vq, &idx, &iov, 1, NULL);
|
||||
do {
|
||||
ret = vq_getchain(vq, &idx, &iov, 1, NULL);
|
||||
assert(ret > 0);
|
||||
|
||||
len = read(rnd->fd, iov.iov_base, iov.iov_len);
|
||||
assert(len > 0);
|
||||
|
||||
/* release this chain and handle more */
|
||||
vq_relchain(vq, idx, len);
|
||||
}
|
||||
} while (vq_has_descs(vq));
|
||||
|
||||
/* at least one avail ring element has been processed */
|
||||
vq_endchains(vq, 1);
|
||||
}
|
||||
}
|
||||
|
@ -1390,7 +1390,7 @@ ioc_tx_thread(void *arg)
|
||||
for (;;) {
|
||||
pthread_mutex_lock(&ioc->tx_mtx);
|
||||
while (SIMPLEQ_EMPTY(&ioc->tx_qhead)) {
|
||||
err = pthread_cond_wait(&ioc->tx_cond, &ioc->tx_mtx);
|
||||
err = pthread_cond_wait(&ioc->tx_cond, &ioc->tx_mtx);
|
||||
assert(err == 0);
|
||||
if (ioc->closing)
|
||||
goto exit;
|
||||
|
@ -265,7 +265,12 @@ static void tpm_crb_request_deliver(void *arg)
|
||||
break;
|
||||
}
|
||||
|
||||
ret = pthread_cond_wait(&tpm_vdev->request_cond, &tpm_vdev->request_mutex);
|
||||
while (!ret &&
|
||||
tpm_vdev->crb_regs.regs.ctrl_start == CRB_CTRL_CMD_COMPLETED) {
|
||||
ret = pthread_cond_wait(
|
||||
&tpm_vdev->request_cond, &tpm_vdev->request_mutex);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
DPRINTF("ERROR: Failed to wait condition(%d)\n", ret);
|
||||
break;
|
||||
@ -312,6 +317,11 @@ static void crb_reg_write(struct tpm_crb_vdev *tpm_vdev, uint64_t addr, int size
|
||||
(tpm_vdev->crb_regs.regs.ctrl_sts.tpmIdle != 1) &&
|
||||
(get_active_locality(tpm_vdev) == target_loc)) {
|
||||
|
||||
if (pthread_mutex_lock(&tpm_vdev->request_mutex)) {
|
||||
DPRINTF("ERROR: Failed to acquire mutex lock\n");
|
||||
break;
|
||||
}
|
||||
|
||||
tpm_vdev->crb_regs.regs.ctrl_start = CRB_CTRL_START_CMD;
|
||||
cmd_size = MIN(get_tpm_cmd_size(tpm_vdev->data_buffer),
|
||||
TPM_CRB_DATA_BUFFER_SIZE);
|
||||
@ -322,11 +332,6 @@ static void crb_reg_write(struct tpm_crb_vdev *tpm_vdev, uint64_t addr, int size
|
||||
tpm_vdev->cmd.out = &tpm_vdev->data_buffer[0];
|
||||
tpm_vdev->cmd.out_len = TPM_CRB_DATA_BUFFER_SIZE;
|
||||
|
||||
if (pthread_mutex_lock(&tpm_vdev->request_mutex)) {
|
||||
DPRINTF("ERROR: Failed to acquire mutex lock\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (pthread_cond_signal(&tpm_vdev->request_cond)) {
|
||||
DPRINTF("ERROR: Failed to wait condition\n");
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user