dm: protect pthread_cond_wait() against spurious wakeups

Users of pthread_cond_wait() should take care of spurious wakeups and it
is usually used in conjunction with a predicate. Not doing so can result
in unintended behavior. For example:

virtio_net_tx_thread():
  entry -> pthread_cond_wait() -> spurious wakeup ->
  vq_clear_used_ring_flags() -> segfault (vq->used uninitialized)

tpm_crb_request_deliver():
  entry -> pthread_cond_wait() -> spurious wakeup ->
  swtpm_handle_request() called needlessly

virtio_rnd_get_entropy():
  entry -> pthread_cond_wait() -> spurious wakeup ->
  no avail ring processing ->
  virtio_rnd_notify() skips pthread_cond_signal() due to
  rnd->in_progress ->
  vq_endchains() called needlessly ->
  wait in pthread_cond_wait() indefinitely

Fix these uses of pthread_cond_wait() by using predicates.

The only use case without a clear predicate is the tx thread in
virtio-mei, because it works with two-dimensional linked lists.

v1 -> v2:
- fix bugs and comments
- reduce code redundancy

Tracked-On: #2763
Signed-off-by: Peter Fang <peter.fang@intel.com>
Reviewed-by: Shuo A Liu <shuo.a.liu@intel.com>
This commit is contained in:
Peter Fang 2019-03-17 02:36:26 -07:00 committed by wenlingz
parent e9261121b3
commit f412d52546
7 changed files with 118 additions and 72 deletions

View File

@ -163,29 +163,39 @@ virtio_coreu_thread(void *param)
struct coreu_msg *msg; struct coreu_msg *msg;
for (;;) { for (;;) {
ret = 0;
pthread_mutex_lock(&vcoreu->rx_mtx); pthread_mutex_lock(&vcoreu->rx_mtx);
ret = pthread_cond_wait(&vcoreu->rx_cond, &vcoreu->rx_mtx);
/*
* Checking the avail ring here serves two purposes:
* - avoid vring processing due to spurious wakeups
* - catch missing notifications before acquiring rx_mtx
*/
while (!ret && !vq_has_descs(rvq))
ret = pthread_cond_wait(&vcoreu->rx_cond, &vcoreu->rx_mtx);
pthread_mutex_unlock(&vcoreu->rx_mtx); pthread_mutex_unlock(&vcoreu->rx_mtx);
if (ret) if (ret)
break; break;
while(vq_has_descs(rvq)) { do {
vq_getchain(rvq, &idx, &iov, 1, NULL); ret = vq_getchain(rvq, &idx, &iov, 1, NULL);
assert(ret > 0);
msg = (struct coreu_msg *)(iov.iov_base); msg = (struct coreu_msg *)(iov.iov_base);
ret = send_and_receive(vcoreu->fd, msg); ret = send_and_receive(vcoreu->fd, msg);
if (ret < 0) if (ret < 0) {
{
close(vcoreu->fd); close(vcoreu->fd);
vcoreu->fd = -1; vcoreu->fd = -1;
} }
/* release this chain and handle more */ /* release this chain and handle more */
vq_relchain(rvq, idx, sizeof(struct coreu_msg)); vq_relchain(rvq, idx, sizeof(struct coreu_msg));
} } while (vq_has_descs(rvq));
/* at least one avail ring element has been processed */
vq_endchains(rvq, 1); vq_endchains(rvq, 1);
} }

View File

@ -306,28 +306,37 @@ virtio_hdcp_talk_to_daemon(void *param)
for (;;) { for (;;) {
pthread_mutex_lock(&vhdcp->rx_mtx); pthread_mutex_lock(&vhdcp->rx_mtx);
vhdcp->in_progress = 0; vhdcp->in_progress = 0;
ret = pthread_cond_wait(&vhdcp->rx_cond, &vhdcp->rx_mtx);
assert(ret == 0); /*
* Checking the avail ring here serves two purposes:
* - avoid vring processing due to spurious wakeups
* - catch missing notifications before acquiring rx_mtx
*/
while (!vq_has_descs(rvq)) {
ret = pthread_cond_wait(&vhdcp->rx_cond, &vhdcp->rx_mtx);
assert(ret == 0);
}
vhdcp->in_progress = 1; vhdcp->in_progress = 1;
pthread_mutex_unlock(&vhdcp->rx_mtx); pthread_mutex_unlock(&vhdcp->rx_mtx);
while(vq_has_descs(rvq)) { do {
vq_getchain(rvq, &idx, &iov, 1, NULL); ret = vq_getchain(rvq, &idx, &iov, 1, NULL);
assert(ret > 0);
msg = (struct SocketData*)(iov.iov_base); msg = (struct SocketData*)(iov.iov_base);
ret = performMessageTransaction(vhdcp->fd, *msg); ret = performMessageTransaction(vhdcp->fd, *msg);
if (ret < 0) if (ret < 0) {
{
close(vhdcp->fd); close(vhdcp->fd);
vhdcp->fd = -1; vhdcp->fd = -1;
} }
/* release this chain and handle more */ /* release this chain and handle more */
vq_relchain(rvq, idx, sizeof(struct SocketData)); vq_relchain(rvq, idx, sizeof(struct SocketData));
} } while (vq_has_descs(rvq));
/* at least one avail ring element has been processed */
vq_endchains(rvq, 1); vq_endchains(rvq, 1);
} }
} }

View File

@ -1556,11 +1556,10 @@ out:
vq_relchain(vq, idx, tlen); vq_relchain(vq, idx, tlen);
DPRINTF("TX: release OUT-vq idx[%d]\n", idx); DPRINTF("TX: release OUT-vq idx[%d]\n", idx);
if (vmei->rx_need_sched) { pthread_mutex_lock(&vmei->rx_mutex);
pthread_mutex_lock(&vmei->rx_mutex); if (vmei->rx_need_sched)
pthread_cond_signal(&vmei->rx_cond); pthread_cond_signal(&vmei->rx_cond);
pthread_mutex_unlock(&vmei->rx_mutex); pthread_mutex_unlock(&vmei->rx_mutex);
}
return; return;
@ -1617,20 +1616,34 @@ static void *vmei_tx_thread(void *param)
{ {
struct virtio_mei *vmei = param; struct virtio_mei *vmei = param;
struct timespec max_wait = {0, 0}; struct timespec max_wait = {0, 0};
int err; int err, pending_cnt = 0;
pthread_mutex_lock(&vmei->tx_mutex); pthread_mutex_lock(&vmei->tx_mutex);
err = pthread_cond_wait(&vmei->tx_cond, &vmei->tx_mutex);
assert(err == 0);
if (err)
goto out;
while (vmei->status != VMEI_STST_DEINIT) { while (vmei->status != VMEI_STST_DEINIT) {
struct vmei_me_client *me; struct vmei_me_client *me;
struct vmei_host_client *e; struct vmei_host_client *e;
ssize_t len; ssize_t len;
int pending_cnt = 0; int send_ready;
int send_ready = 0;
if (pending_cnt == 0) {
err = pthread_cond_wait(&vmei->tx_cond,
&vmei->tx_mutex);
assert(err == 0);
if (err)
goto out;
} else {
max_wait.tv_sec = time(NULL) + 2;
max_wait.tv_nsec = 0;
err = pthread_cond_timedwait(&vmei->tx_cond,
&vmei->tx_mutex,
&max_wait);
assert(err == 0 || err == ETIMEDOUT);
if (err && err != ETIMEDOUT)
goto out;
pending_cnt = 0;
}
pthread_mutex_lock(&vmei->list_mutex); pthread_mutex_lock(&vmei->list_mutex);
LIST_FOREACH(me, &vmei->active_clients, list) { LIST_FOREACH(me, &vmei->active_clients, list) {
@ -1660,21 +1673,8 @@ static void *vmei_tx_thread(void *param)
} }
unlock: unlock:
pthread_mutex_unlock(&vmei->list_mutex); pthread_mutex_unlock(&vmei->list_mutex);
if (pending_cnt == 0) {
err = pthread_cond_wait(&vmei->tx_cond,
&vmei->tx_mutex);
} else {
max_wait.tv_sec = time(NULL) + 2;
max_wait.tv_nsec = 0;
err = pthread_cond_timedwait(&vmei->tx_cond,
&vmei->tx_mutex,
&max_wait);
}
if (vmei->status == VMEI_STST_DEINIT)
goto out;
} }
out: out:
pthread_mutex_unlock(&vmei->tx_mutex); pthread_mutex_unlock(&vmei->tx_mutex);
pthread_exit(NULL); pthread_exit(NULL);
@ -1869,33 +1869,33 @@ found:
/* /*
* Thread which will handle processing of RX desc * Thread which will handle processing of RX desc
*/ */
static void *vmei_rx_thread(void *param) static void *vmei_rx_thread(void *param)
{ {
struct virtio_mei *vmei = param; struct virtio_mei *vmei = param;
struct virtio_vq_info *vq; struct virtio_vq_info *vq = &vmei->vqs[VMEI_RXQ];
int err; int err;
vq = &vmei->vqs[VMEI_RXQ];
/* /*
* Let us wait till the rx queue pointers get initialised & * Let us wait till the rx queue pointers get initialised &
* first tx signaled * first tx signaled
*/ */
pthread_mutex_lock(&vmei->rx_mutex); pthread_mutex_lock(&vmei->rx_mutex);
err = pthread_cond_wait(&vmei->rx_cond, &vmei->rx_mutex);
assert(err == 0); while (vmei->status != VMEI_STST_DEINIT && !vq_ring_ready(vq)) {
if (err) err = pthread_cond_wait(&vmei->rx_cond, &vmei->rx_mutex);
goto out; assert(err == 0);
if (err)
goto out;
}
while (vmei->status != VMEI_STST_DEINIT) { while (vmei->status != VMEI_STST_DEINIT) {
/* note - rx mutex is locked here */ /* note - rx mutex is locked here */
while (vq_ring_ready(vq)) { while (!vmei->rx_need_sched || !vq_has_descs(vq)) {
vq_clear_used_ring_flags(&vmei->base, vq); vq_clear_used_ring_flags(&vmei->base, vq);
mb(); mb();
if (vq_has_descs(vq) && if (vmei->rx_need_sched &&
vmei->rx_need_sched && vmei->status != VMEI_STS_RESET &&
vmei->status != VMEI_STS_RESET) vq_has_descs(vq))
break; break;
err = pthread_cond_wait(&vmei->rx_cond, err = pthread_cond_wait(&vmei->rx_cond,
@ -1904,15 +1904,17 @@ static void *vmei_rx_thread(void *param)
if (err || vmei->status == VMEI_STST_DEINIT) if (err || vmei->status == VMEI_STST_DEINIT)
goto out; goto out;
} }
vq->used->flags |= VRING_USED_F_NO_NOTIFY; vq->used->flags |= VRING_USED_F_NO_NOTIFY;
do { do {
vmei->rx_need_sched = vmei_proc_rx(vmei, vq); vmei->rx_need_sched = vmei_proc_rx(vmei, vq);
} while (vq_has_descs(vq) && vmei->rx_need_sched); } while (vmei->rx_need_sched && vq_has_descs(vq));
if (!vq_has_descs(vq)) /* at least one avail ring element has been processed */
vq_endchains(vq, 1); vq_endchains(vq, !vq_has_descs(vq));
} }
out: out:
pthread_mutex_unlock(&vmei->rx_mutex); pthread_mutex_unlock(&vmei->rx_mutex);
pthread_exit(NULL); pthread_exit(NULL);

View File

@ -536,18 +536,20 @@ static void *
virtio_net_tx_thread(void *param) virtio_net_tx_thread(void *param)
{ {
struct virtio_net *net = param; struct virtio_net *net = param;
struct virtio_vq_info *vq; struct virtio_vq_info *vq = &net->queues[VIRTIO_NET_TXQ];
int error; int error;
vq = &net->queues[VIRTIO_NET_TXQ];
/* /*
* Let us wait till the tx queue pointers get initialised & * Let us wait till the tx queue pointers get initialised &
* first tx signaled * first tx signaled
*/ */
pthread_mutex_lock(&net->tx_mtx); pthread_mutex_lock(&net->tx_mtx);
error = pthread_cond_wait(&net->tx_cond, &net->tx_mtx);
assert(error == 0); while (!net->closing && !vq_ring_ready(vq)) {
error = pthread_cond_wait(&net->tx_cond, &net->tx_mtx);
assert(error == 0);
}
if (net->closing) { if (net->closing) {
WPRINTF(("vtnet tx thread closing...\n")); WPRINTF(("vtnet tx thread closing...\n"));
pthread_mutex_unlock(&net->tx_mtx); pthread_mutex_unlock(&net->tx_mtx);
@ -556,6 +558,13 @@ virtio_net_tx_thread(void *param)
for (;;) { for (;;) {
/* note - tx mutex is locked here */ /* note - tx mutex is locked here */
net->tx_in_progress = 0;
/*
* Checking the avail ring here serves two purposes:
* - avoid vring processing due to spurious wakeups
* - catch missing notifications before acquiring tx_mtx
*/
while (net->resetting || !vq_has_descs(vq)) { while (net->resetting || !vq_has_descs(vq)) {
vq_clear_used_ring_flags(&net->base, vq); vq_clear_used_ring_flags(&net->base, vq);
/* memory barrier */ /* memory barrier */
@ -563,7 +572,6 @@ virtio_net_tx_thread(void *param)
if (!net->resetting && vq_has_descs(vq)) if (!net->resetting && vq_has_descs(vq))
break; break;
net->tx_in_progress = 0;
error = pthread_cond_wait(&net->tx_cond, &net->tx_mtx); error = pthread_cond_wait(&net->tx_cond, &net->tx_mtx);
assert(error == 0); assert(error == 0);
if (net->closing) { if (net->closing) {
@ -572,6 +580,7 @@ virtio_net_tx_thread(void *param)
return NULL; return NULL;
} }
} }
vq->used->flags |= VRING_USED_F_NO_NOTIFY; vq->used->flags |= VRING_USED_F_NO_NOTIFY;
net->tx_in_progress = 1; net->tx_in_progress = 1;
pthread_mutex_unlock(&net->tx_mtx); pthread_mutex_unlock(&net->tx_mtx);

View File

@ -304,27 +304,38 @@ virtio_rnd_get_entropy(void *param)
struct virtio_vq_info *vq = &rnd->vq; struct virtio_vq_info *vq = &rnd->vq;
struct iovec iov; struct iovec iov;
uint16_t idx; uint16_t idx;
int len, error; int ret;
ssize_t len;
for (;;) { for (;;) {
pthread_mutex_lock(&rnd->rx_mtx); pthread_mutex_lock(&rnd->rx_mtx);
rnd->in_progress = 0; rnd->in_progress = 0;
error = pthread_cond_wait(&rnd->rx_cond, &rnd->rx_mtx);
assert(error == 0); /*
* Checking the avail ring here serves two purposes:
* - avoid vring processing due to spurious wakeups
* - catch missing notifications before acquiring rx_mtx
*/
while (!vq_has_descs(vq)) {
ret = pthread_cond_wait(&rnd->rx_cond, &rnd->rx_mtx);
assert(ret == 0);
}
rnd->in_progress = 1; rnd->in_progress = 1;
pthread_mutex_unlock(&rnd->rx_mtx); pthread_mutex_unlock(&rnd->rx_mtx);
while(vq_has_descs(vq)) { do {
vq_getchain(vq, &idx, &iov, 1, NULL); ret = vq_getchain(vq, &idx, &iov, 1, NULL);
assert(ret > 0);
len = read(rnd->fd, iov.iov_base, iov.iov_len); len = read(rnd->fd, iov.iov_base, iov.iov_len);
assert(len > 0); assert(len > 0);
/* release this chain and handle more */ /* release this chain and handle more */
vq_relchain(vq, idx, len); vq_relchain(vq, idx, len);
} } while (vq_has_descs(vq));
/* at least one avail ring element has been processed */
vq_endchains(vq, 1); vq_endchains(vq, 1);
} }
} }

View File

@ -1390,7 +1390,7 @@ ioc_tx_thread(void *arg)
for (;;) { for (;;) {
pthread_mutex_lock(&ioc->tx_mtx); pthread_mutex_lock(&ioc->tx_mtx);
while (SIMPLEQ_EMPTY(&ioc->tx_qhead)) { while (SIMPLEQ_EMPTY(&ioc->tx_qhead)) {
err = pthread_cond_wait(&ioc->tx_cond, &ioc->tx_mtx); err = pthread_cond_wait(&ioc->tx_cond, &ioc->tx_mtx);
assert(err == 0); assert(err == 0);
if (ioc->closing) if (ioc->closing)
goto exit; goto exit;

View File

@ -265,7 +265,12 @@ static void tpm_crb_request_deliver(void *arg)
break; break;
} }
ret = pthread_cond_wait(&tpm_vdev->request_cond, &tpm_vdev->request_mutex); while (!ret &&
tpm_vdev->crb_regs.regs.ctrl_start == CRB_CTRL_CMD_COMPLETED) {
ret = pthread_cond_wait(
&tpm_vdev->request_cond, &tpm_vdev->request_mutex);
}
if (ret) { if (ret) {
DPRINTF("ERROR: Failed to wait condition(%d)\n", ret); DPRINTF("ERROR: Failed to wait condition(%d)\n", ret);
break; break;
@ -312,6 +317,11 @@ static void crb_reg_write(struct tpm_crb_vdev *tpm_vdev, uint64_t addr, int size
(tpm_vdev->crb_regs.regs.ctrl_sts.tpmIdle != 1) && (tpm_vdev->crb_regs.regs.ctrl_sts.tpmIdle != 1) &&
(get_active_locality(tpm_vdev) == target_loc)) { (get_active_locality(tpm_vdev) == target_loc)) {
if (pthread_mutex_lock(&tpm_vdev->request_mutex)) {
DPRINTF("ERROR: Failed to acquire mutex lock\n");
break;
}
tpm_vdev->crb_regs.regs.ctrl_start = CRB_CTRL_START_CMD; tpm_vdev->crb_regs.regs.ctrl_start = CRB_CTRL_START_CMD;
cmd_size = MIN(get_tpm_cmd_size(tpm_vdev->data_buffer), cmd_size = MIN(get_tpm_cmd_size(tpm_vdev->data_buffer),
TPM_CRB_DATA_BUFFER_SIZE); TPM_CRB_DATA_BUFFER_SIZE);
@ -322,11 +332,6 @@ static void crb_reg_write(struct tpm_crb_vdev *tpm_vdev, uint64_t addr, int size
tpm_vdev->cmd.out = &tpm_vdev->data_buffer[0]; tpm_vdev->cmd.out = &tpm_vdev->data_buffer[0];
tpm_vdev->cmd.out_len = TPM_CRB_DATA_BUFFER_SIZE; tpm_vdev->cmd.out_len = TPM_CRB_DATA_BUFFER_SIZE;
if (pthread_mutex_lock(&tpm_vdev->request_mutex)) {
DPRINTF("ERROR: Failed to acquire mutex lock\n");
break;
}
if (pthread_cond_signal(&tpm_vdev->request_cond)) { if (pthread_cond_signal(&tpm_vdev->request_cond)) {
DPRINTF("ERROR: Failed to wait condition\n"); DPRINTF("ERROR: Failed to wait condition\n");
break; break;