diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c index 7f7ead7e0..e5e469d8c 100644 --- a/hypervisor/arch/x86/guest/vcpu.c +++ b/hypervisor/arch/x86/guest/vcpu.c @@ -232,7 +232,7 @@ struct acrn_vcpu *get_running_vcpu(uint16_t pcpu_id) struct acrn_vcpu *vcpu = NULL; if ((curr != NULL) && (!is_idle_thread(curr))) { - vcpu = list_entry(curr, struct acrn_vcpu, thread_obj); + vcpu = container_of(curr, struct acrn_vcpu, thread_obj); } return vcpu; @@ -753,7 +753,7 @@ void rstore_xsave_area(const struct ext_context *ectx) */ static void context_switch_out(struct thread_object *prev) { - struct acrn_vcpu *vcpu = list_entry(prev, struct acrn_vcpu, thread_obj); + struct acrn_vcpu *vcpu = container_of(prev, struct acrn_vcpu, thread_obj); struct ext_context *ectx = &(vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx); /* We don't flush TLB as we assume each vcpu has different vpid */ @@ -769,7 +769,7 @@ static void context_switch_out(struct thread_object *prev) static void context_switch_in(struct thread_object *next) { - struct acrn_vcpu *vcpu = list_entry(next, struct acrn_vcpu, thread_obj); + struct acrn_vcpu *vcpu = container_of(next, struct acrn_vcpu, thread_obj); struct ext_context *ectx = &(vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx); load_vmcs(vcpu); diff --git a/hypervisor/arch/x86/timer.c b/hypervisor/arch/x86/timer.c index a190191f2..0f10ce327 100644 --- a/hypervisor/arch/x86/timer.c +++ b/hypervisor/arch/x86/timer.c @@ -50,7 +50,7 @@ static inline void update_physical_timer(struct per_cpu_timers *cpu_timer) /* find the next event timer */ if (!list_empty(&cpu_timer->timer_list)) { - timer = list_entry((&cpu_timer->timer_list)->next, + timer = container_of((&cpu_timer->timer_list)->next, struct hv_timer, node); /* it is okay to program a expired time */ @@ -70,7 +70,7 @@ static bool local_add_timer(struct per_cpu_timers *cpu_timer, prev = &cpu_timer->timer_list; list_for_each(pos, &cpu_timer->timer_list) { - tmp = list_entry(pos, struct hv_timer, node); + tmp = container_of(pos, struct hv_timer, node); if (tmp->fire_tsc < tsc) { prev = &tmp->node; } @@ -168,7 +168,7 @@ static void timer_softirq(uint16_t pcpu_id) * already passed due to previously func()'s delay. */ list_for_each_safe(pos, n, &cpu_timer->timer_list) { - timer = list_entry(pos, struct hv_timer, node); + timer = container_of(pos, struct hv_timer, node); /* timer expried */ tries--; if ((timer->fire_tsc <= current_tsc) && (tries != 0U)) { diff --git a/hypervisor/common/hv_main.c b/hypervisor/common/hv_main.c index 41ecc9c48..caab25e22 100644 --- a/hypervisor/common/hv_main.c +++ b/hypervisor/common/hv_main.c @@ -17,7 +17,7 @@ void vcpu_thread(struct thread_object *obj) { - struct acrn_vcpu *vcpu = list_entry(obj, struct acrn_vcpu, thread_obj); + struct acrn_vcpu *vcpu = container_of(obj, struct acrn_vcpu, thread_obj); uint32_t basic_exit_reason = 0U; int32_t ret = 0; diff --git a/hypervisor/common/sched_bvt.c b/hypervisor/common/sched_bvt.c index 4fec5e0f3..fc75b916e 100644 --- a/hypervisor/common/sched_bvt.c +++ b/hypervisor/common/sched_bvt.c @@ -65,7 +65,7 @@ static void runqueue_add(struct thread_object *obj) list_add(&data->list, &bvt_ctl->runqueue); } else { list_for_each(pos, &bvt_ctl->runqueue) { - iter_obj = list_entry(pos, struct thread_object, data); + iter_obj = container_of(pos, struct thread_object, data); iter_data = (struct sched_bvt_data *)iter_obj->data; if (iter_data->evt > data->evt) { list_add_node(&data->list, pos->prev, pos); @@ -240,7 +240,7 @@ static struct thread_object *sched_bvt_pick_next(struct sched_control *ctl) first = bvt_ctl->runqueue.next; sec = (first->next == &bvt_ctl->runqueue) ? NULL : first->next; - first_obj = list_entry(first, struct thread_object, data); + first_obj = container_of(first, struct thread_object, data); first_data = (struct sched_bvt_data *)first_obj->data; /* The run_countdown is used to store how may mcu the next thread @@ -253,7 +253,7 @@ static struct thread_object *sched_bvt_pick_next(struct sched_control *ctl) * UINT64_MAX can make it run for >100 years before rescheduled. */ if (sec != NULL) { - second_obj = list_entry(sec, struct thread_object, data); + second_obj = container_of(sec, struct thread_object, data); second_data = (struct sched_bvt_data *)second_obj->data; delta_mcu = second_data->evt - first_data->evt; first_data->run_countdown = v2p(delta_mcu, first_data->vt_ratio) + BVT_CSA_MCU; diff --git a/hypervisor/include/lib/list.h b/hypervisor/include/lib/list.h index 3578d79c6..9e0c2f6c3 100644 --- a/hypervisor/include/lib/list.h +++ b/hypervisor/include/lib/list.h @@ -108,8 +108,8 @@ static inline void list_splice_init(struct list_head *list, } } -#define list_entry(ptr, type, member) \ - ((type *)((char *)(ptr)-(uint64_t)(&((type *)0)->member))) +#define container_of(ptr, type, member) \ + ((type *)((char *)(ptr)-offsetof(type, member))) #define list_for_each(pos, head) \ for ((pos) = (head)->next; (pos) != (head); (pos) = (pos)->next)