summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '0074-xen-sched-fix-restore_vcpu_affinity-by-removing-it.patch')
-rw-r--r--0074-xen-sched-fix-restore_vcpu_affinity-by-removing-it.patch158
1 files changed, 158 insertions, 0 deletions
diff --git a/0074-xen-sched-fix-restore_vcpu_affinity-by-removing-it.patch b/0074-xen-sched-fix-restore_vcpu_affinity-by-removing-it.patch
new file mode 100644
index 0000000..9085f67
--- /dev/null
+++ b/0074-xen-sched-fix-restore_vcpu_affinity-by-removing-it.patch
@@ -0,0 +1,158 @@
+From 9c5114696c6f7773b7f3691f27aaa7a0636c916d Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Mon, 31 Oct 2022 13:34:28 +0100
+Subject: [PATCH 074/126] xen/sched: fix restore_vcpu_affinity() by removing it
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+When the system is coming up after having been suspended,
+restore_vcpu_affinity() is called for each domain in order to adjust
+the vcpu's affinity settings in case a cpu didn't come to live again.
+
+The way restore_vcpu_affinity() is doing that is wrong, because the
+specific scheduler isn't being informed about a possible migration of
+the vcpu to another cpu. Additionally the migration is often even
+happening if all cpus are running again, as it is done without check
+whether it is really needed.
+
+As cpupool management is already calling cpu_disable_scheduler() for
+cpus not having come up again, and cpu_disable_scheduler() is taking
+care of eventually needed vcpu migration in the proper way, there is
+simply no need for restore_vcpu_affinity().
+
+So just remove restore_vcpu_affinity() completely, together with the
+no longer used sched_reset_affinity_broken().
+
+Fixes: 8a04eaa8ea83 ("xen/sched: move some per-vcpu items to struct sched_unit")
+Reported-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Acked-by: Dario Faggioli <dfaggioli@suse.com>
+Tested-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
+master commit: fce1f381f7388daaa3e96dbb0d67d7a3e4bb2d2d
+master date: 2022-10-24 11:16:27 +0100
+---
+ xen/arch/x86/acpi/power.c | 3 --
+ xen/common/sched/core.c | 78 ---------------------------------------
+ xen/include/xen/sched.h | 1 -
+ 3 files changed, 82 deletions(-)
+
+diff --git a/xen/arch/x86/acpi/power.c b/xen/arch/x86/acpi/power.c
+index dd397f713067..1a7baeebe6d0 100644
+--- a/xen/arch/x86/acpi/power.c
++++ b/xen/arch/x86/acpi/power.c
+@@ -159,10 +159,7 @@ static void thaw_domains(void)
+
+ rcu_read_lock(&domlist_read_lock);
+ for_each_domain ( d )
+- {
+- restore_vcpu_affinity(d);
+ domain_unpause(d);
+- }
+ rcu_read_unlock(&domlist_read_lock);
+ }
+
+diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
+index 900aab8f66a7..9173cf690c72 100644
+--- a/xen/common/sched/core.c
++++ b/xen/common/sched/core.c
+@@ -1188,84 +1188,6 @@ static bool sched_check_affinity_broken(const struct sched_unit *unit)
+ return false;
+ }
+
+-static void sched_reset_affinity_broken(const struct sched_unit *unit)
+-{
+- struct vcpu *v;
+-
+- for_each_sched_unit_vcpu ( unit, v )
+- v->affinity_broken = false;
+-}
+-
+-void restore_vcpu_affinity(struct domain *d)
+-{
+- unsigned int cpu = smp_processor_id();
+- struct sched_unit *unit;
+-
+- ASSERT(system_state == SYS_STATE_resume);
+-
+- rcu_read_lock(&sched_res_rculock);
+-
+- for_each_sched_unit ( d, unit )
+- {
+- spinlock_t *lock;
+- unsigned int old_cpu = sched_unit_master(unit);
+- struct sched_resource *res;
+-
+- ASSERT(!unit_runnable(unit));
+-
+- /*
+- * Re-assign the initial processor as after resume we have no
+- * guarantee the old processor has come back to life again.
+- *
+- * Therefore, here, before actually unpausing the domains, we should
+- * set v->processor of each of their vCPUs to something that will
+- * make sense for the scheduler of the cpupool in which they are in.
+- */
+- lock = unit_schedule_lock_irq(unit);
+-
+- cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
+- cpupool_domain_master_cpumask(d));
+- if ( cpumask_empty(cpumask_scratch_cpu(cpu)) )
+- {
+- if ( sched_check_affinity_broken(unit) )
+- {
+- sched_set_affinity(unit, unit->cpu_hard_affinity_saved, NULL);
+- sched_reset_affinity_broken(unit);
+- cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
+- cpupool_domain_master_cpumask(d));
+- }
+-
+- if ( cpumask_empty(cpumask_scratch_cpu(cpu)) )
+- {
+- /* Affinity settings of one vcpu are for the complete unit. */
+- printk(XENLOG_DEBUG "Breaking affinity for %pv\n",
+- unit->vcpu_list);
+- sched_set_affinity(unit, &cpumask_all, NULL);
+- cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
+- cpupool_domain_master_cpumask(d));
+- }
+- }
+-
+- res = get_sched_res(cpumask_any(cpumask_scratch_cpu(cpu)));
+- sched_set_res(unit, res);
+-
+- spin_unlock_irq(lock);
+-
+- /* v->processor might have changed, so reacquire the lock. */
+- lock = unit_schedule_lock_irq(unit);
+- res = sched_pick_resource(unit_scheduler(unit), unit);
+- sched_set_res(unit, res);
+- spin_unlock_irq(lock);
+-
+- if ( old_cpu != sched_unit_master(unit) )
+- sched_move_irqs(unit);
+- }
+-
+- rcu_read_unlock(&sched_res_rculock);
+-
+- domain_update_node_affinity(d);
+-}
+-
+ /*
+ * This function is used by cpu_hotplug code via cpu notifier chain
+ * and from cpupools to switch schedulers on a cpu.
+diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
+index 4e25627d9685..bb05d167ae0f 100644
+--- a/xen/include/xen/sched.h
++++ b/xen/include/xen/sched.h
+@@ -993,7 +993,6 @@ void vcpu_set_periodic_timer(struct vcpu *v, s_time_t value);
+ void sched_setup_dom0_vcpus(struct domain *d);
+ int vcpu_temporary_affinity(struct vcpu *v, unsigned int cpu, uint8_t reason);
+ int vcpu_set_hard_affinity(struct vcpu *v, const cpumask_t *affinity);
+-void restore_vcpu_affinity(struct domain *d);
+ int vcpu_affinity_domctl(struct domain *d, uint32_t cmd,
+ struct xen_domctl_vcpuaffinity *vcpuaff);
+
+--
+2.37.4
+