mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-23 14:07:42 +00:00
hv: list: rename list_entry to container_of
This function casts a member of a structure out to the containing structure. So rename to container_of is more readable. Tracked-On: #4550 Signed-off-by: Li Fei1 <fei1.li@intel.com>
This commit is contained in:
parent
1328dcb205
commit
7f342bf62f
@ -232,7 +232,7 @@ struct acrn_vcpu *get_running_vcpu(uint16_t pcpu_id)
|
|||||||
struct acrn_vcpu *vcpu = NULL;
|
struct acrn_vcpu *vcpu = NULL;
|
||||||
|
|
||||||
if ((curr != NULL) && (!is_idle_thread(curr))) {
|
if ((curr != NULL) && (!is_idle_thread(curr))) {
|
||||||
vcpu = list_entry(curr, struct acrn_vcpu, thread_obj);
|
vcpu = container_of(curr, struct acrn_vcpu, thread_obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
return vcpu;
|
return vcpu;
|
||||||
@ -753,7 +753,7 @@ void rstore_xsave_area(const struct ext_context *ectx)
|
|||||||
*/
|
*/
|
||||||
static void context_switch_out(struct thread_object *prev)
|
static void context_switch_out(struct thread_object *prev)
|
||||||
{
|
{
|
||||||
struct acrn_vcpu *vcpu = list_entry(prev, struct acrn_vcpu, thread_obj);
|
struct acrn_vcpu *vcpu = container_of(prev, struct acrn_vcpu, thread_obj);
|
||||||
struct ext_context *ectx = &(vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx);
|
struct ext_context *ectx = &(vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx);
|
||||||
|
|
||||||
/* We don't flush TLB as we assume each vcpu has different vpid */
|
/* We don't flush TLB as we assume each vcpu has different vpid */
|
||||||
@ -769,7 +769,7 @@ static void context_switch_out(struct thread_object *prev)
|
|||||||
|
|
||||||
static void context_switch_in(struct thread_object *next)
|
static void context_switch_in(struct thread_object *next)
|
||||||
{
|
{
|
||||||
struct acrn_vcpu *vcpu = list_entry(next, struct acrn_vcpu, thread_obj);
|
struct acrn_vcpu *vcpu = container_of(next, struct acrn_vcpu, thread_obj);
|
||||||
struct ext_context *ectx = &(vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx);
|
struct ext_context *ectx = &(vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx);
|
||||||
|
|
||||||
load_vmcs(vcpu);
|
load_vmcs(vcpu);
|
||||||
|
@ -50,7 +50,7 @@ static inline void update_physical_timer(struct per_cpu_timers *cpu_timer)
|
|||||||
|
|
||||||
/* find the next event timer */
|
/* find the next event timer */
|
||||||
if (!list_empty(&cpu_timer->timer_list)) {
|
if (!list_empty(&cpu_timer->timer_list)) {
|
||||||
timer = list_entry((&cpu_timer->timer_list)->next,
|
timer = container_of((&cpu_timer->timer_list)->next,
|
||||||
struct hv_timer, node);
|
struct hv_timer, node);
|
||||||
|
|
||||||
/* it is okay to program a expired time */
|
/* it is okay to program a expired time */
|
||||||
@ -70,7 +70,7 @@ static bool local_add_timer(struct per_cpu_timers *cpu_timer,
|
|||||||
|
|
||||||
prev = &cpu_timer->timer_list;
|
prev = &cpu_timer->timer_list;
|
||||||
list_for_each(pos, &cpu_timer->timer_list) {
|
list_for_each(pos, &cpu_timer->timer_list) {
|
||||||
tmp = list_entry(pos, struct hv_timer, node);
|
tmp = container_of(pos, struct hv_timer, node);
|
||||||
if (tmp->fire_tsc < tsc) {
|
if (tmp->fire_tsc < tsc) {
|
||||||
prev = &tmp->node;
|
prev = &tmp->node;
|
||||||
}
|
}
|
||||||
@ -168,7 +168,7 @@ static void timer_softirq(uint16_t pcpu_id)
|
|||||||
* already passed due to previously func()'s delay.
|
* already passed due to previously func()'s delay.
|
||||||
*/
|
*/
|
||||||
list_for_each_safe(pos, n, &cpu_timer->timer_list) {
|
list_for_each_safe(pos, n, &cpu_timer->timer_list) {
|
||||||
timer = list_entry(pos, struct hv_timer, node);
|
timer = container_of(pos, struct hv_timer, node);
|
||||||
/* timer expried */
|
/* timer expried */
|
||||||
tries--;
|
tries--;
|
||||||
if ((timer->fire_tsc <= current_tsc) && (tries != 0U)) {
|
if ((timer->fire_tsc <= current_tsc) && (tries != 0U)) {
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
|
|
||||||
void vcpu_thread(struct thread_object *obj)
|
void vcpu_thread(struct thread_object *obj)
|
||||||
{
|
{
|
||||||
struct acrn_vcpu *vcpu = list_entry(obj, struct acrn_vcpu, thread_obj);
|
struct acrn_vcpu *vcpu = container_of(obj, struct acrn_vcpu, thread_obj);
|
||||||
uint32_t basic_exit_reason = 0U;
|
uint32_t basic_exit_reason = 0U;
|
||||||
int32_t ret = 0;
|
int32_t ret = 0;
|
||||||
|
|
||||||
|
@ -65,7 +65,7 @@ static void runqueue_add(struct thread_object *obj)
|
|||||||
list_add(&data->list, &bvt_ctl->runqueue);
|
list_add(&data->list, &bvt_ctl->runqueue);
|
||||||
} else {
|
} else {
|
||||||
list_for_each(pos, &bvt_ctl->runqueue) {
|
list_for_each(pos, &bvt_ctl->runqueue) {
|
||||||
iter_obj = list_entry(pos, struct thread_object, data);
|
iter_obj = container_of(pos, struct thread_object, data);
|
||||||
iter_data = (struct sched_bvt_data *)iter_obj->data;
|
iter_data = (struct sched_bvt_data *)iter_obj->data;
|
||||||
if (iter_data->evt > data->evt) {
|
if (iter_data->evt > data->evt) {
|
||||||
list_add_node(&data->list, pos->prev, pos);
|
list_add_node(&data->list, pos->prev, pos);
|
||||||
@ -240,7 +240,7 @@ static struct thread_object *sched_bvt_pick_next(struct sched_control *ctl)
|
|||||||
first = bvt_ctl->runqueue.next;
|
first = bvt_ctl->runqueue.next;
|
||||||
sec = (first->next == &bvt_ctl->runqueue) ? NULL : first->next;
|
sec = (first->next == &bvt_ctl->runqueue) ? NULL : first->next;
|
||||||
|
|
||||||
first_obj = list_entry(first, struct thread_object, data);
|
first_obj = container_of(first, struct thread_object, data);
|
||||||
first_data = (struct sched_bvt_data *)first_obj->data;
|
first_data = (struct sched_bvt_data *)first_obj->data;
|
||||||
|
|
||||||
/* The run_countdown is used to store how may mcu the next thread
|
/* The run_countdown is used to store how may mcu the next thread
|
||||||
@ -253,7 +253,7 @@ static struct thread_object *sched_bvt_pick_next(struct sched_control *ctl)
|
|||||||
* UINT64_MAX can make it run for >100 years before rescheduled.
|
* UINT64_MAX can make it run for >100 years before rescheduled.
|
||||||
*/
|
*/
|
||||||
if (sec != NULL) {
|
if (sec != NULL) {
|
||||||
second_obj = list_entry(sec, struct thread_object, data);
|
second_obj = container_of(sec, struct thread_object, data);
|
||||||
second_data = (struct sched_bvt_data *)second_obj->data;
|
second_data = (struct sched_bvt_data *)second_obj->data;
|
||||||
delta_mcu = second_data->evt - first_data->evt;
|
delta_mcu = second_data->evt - first_data->evt;
|
||||||
first_data->run_countdown = v2p(delta_mcu, first_data->vt_ratio) + BVT_CSA_MCU;
|
first_data->run_countdown = v2p(delta_mcu, first_data->vt_ratio) + BVT_CSA_MCU;
|
||||||
|
@ -108,8 +108,8 @@ static inline void list_splice_init(struct list_head *list,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define list_entry(ptr, type, member) \
|
#define container_of(ptr, type, member) \
|
||||||
((type *)((char *)(ptr)-(uint64_t)(&((type *)0)->member)))
|
((type *)((char *)(ptr)-offsetof(type, member)))
|
||||||
|
|
||||||
#define list_for_each(pos, head) \
|
#define list_for_each(pos, head) \
|
||||||
for ((pos) = (head)->next; (pos) != (head); (pos) = (pos)->next)
|
for ((pos) = (head)->next; (pos) != (head); (pos) = (pos)->next)
|
||||||
|
Loading…
Reference in New Issue
Block a user