|
11 | 11 | #include <linux/vmalloc.h>
|
12 | 12 | #include <linux/sizes.h>
|
13 | 13 | #include <linux/kexec.h>
|
| 14 | +#include <linux/memory.h> |
| 15 | +#include <linux/cpuhotplug.h> |
14 | 16 |
|
15 | 17 | #include <asm/page.h>
|
16 | 18 | #include <asm/sections.h>
|
17 | 19 |
|
18 | 20 | #include <crypto/sha1.h>
|
19 | 21 |
|
20 | 22 | #include "kallsyms_internal.h"
|
| 23 | +#include "kexec_internal.h" |
21 | 24 |
|
22 | 25 | /* Per cpu memory for storing cpu states in case of system crash. */
|
23 | 26 | note_buf_t __percpu *crash_notes;
|
@@ -733,3 +736,142 @@ static int __init crash_notes_memory_init(void)
|
733 | 736 | return 0;
|
734 | 737 | }
|
735 | 738 | subsys_initcall(crash_notes_memory_init);
|
| 739 | + |
| 740 | +#ifdef CONFIG_CRASH_HOTPLUG |
| 741 | +#undef pr_fmt |
| 742 | +#define pr_fmt(fmt) "crash hp: " fmt |
| 743 | +/* |
| 744 | + * To accurately reflect hot un/plug changes of cpu and memory resources |
| 745 | + * (including onling and offlining of those resources), the elfcorehdr |
| 746 | + * (which is passed to the crash kernel via the elfcorehdr= parameter) |
| 747 | + * must be updated with the new list of CPUs and memories. |
| 748 | + * |
| 749 | + * In order to make changes to elfcorehdr, two conditions are needed: |
| 750 | + * First, the segment containing the elfcorehdr must be large enough |
| 751 | + * to permit a growing number of resources; the elfcorehdr memory size |
| 752 | + * is based on NR_CPUS_DEFAULT and CRASH_MAX_MEMORY_RANGES. |
| 753 | + * Second, purgatory must explicitly exclude the elfcorehdr from the |
| 754 | + * list of segments it checks (since the elfcorehdr changes and thus |
| 755 | + * would require an update to purgatory itself to update the digest). |
| 756 | + */ |
| 757 | +static void crash_handle_hotplug_event(unsigned int hp_action, unsigned int cpu) |
| 758 | +{ |
| 759 | + struct kimage *image; |
| 760 | + |
| 761 | + /* Obtain lock while changing crash information */ |
| 762 | + if (!kexec_trylock()) { |
| 763 | + pr_info("kexec_trylock() failed, elfcorehdr may be inaccurate\n"); |
| 764 | + return; |
| 765 | + } |
| 766 | + |
| 767 | + /* Check kdump is not loaded */ |
| 768 | + if (!kexec_crash_image) |
| 769 | + goto out; |
| 770 | + |
| 771 | + image = kexec_crash_image; |
| 772 | + |
| 773 | + if (hp_action == KEXEC_CRASH_HP_ADD_CPU || |
| 774 | + hp_action == KEXEC_CRASH_HP_REMOVE_CPU) |
| 775 | + pr_debug("hp_action %u, cpu %u\n", hp_action, cpu); |
| 776 | + else |
| 777 | + pr_debug("hp_action %u\n", hp_action); |
| 778 | + |
| 779 | + /* |
| 780 | + * The elfcorehdr_index is set to -1 when the struct kimage |
| 781 | + * is allocated. Find the segment containing the elfcorehdr, |
| 782 | + * if not already found. |
| 783 | + */ |
| 784 | + if (image->elfcorehdr_index < 0) { |
| 785 | + unsigned long mem; |
| 786 | + unsigned char *ptr; |
| 787 | + unsigned int n; |
| 788 | + |
| 789 | + for (n = 0; n < image->nr_segments; n++) { |
| 790 | + mem = image->segment[n].mem; |
| 791 | + ptr = kmap_local_page(pfn_to_page(mem >> PAGE_SHIFT)); |
| 792 | + if (ptr) { |
| 793 | + /* The segment containing elfcorehdr */ |
| 794 | + if (memcmp(ptr, ELFMAG, SELFMAG) == 0) |
| 795 | + image->elfcorehdr_index = (int)n; |
| 796 | + kunmap_local(ptr); |
| 797 | + } |
| 798 | + } |
| 799 | + } |
| 800 | + |
| 801 | + if (image->elfcorehdr_index < 0) { |
| 802 | + pr_err("unable to locate elfcorehdr segment"); |
| 803 | + goto out; |
| 804 | + } |
| 805 | + |
| 806 | + /* Needed in order for the segments to be updated */ |
| 807 | + arch_kexec_unprotect_crashkres(); |
| 808 | + |
| 809 | + /* Differentiate between normal load and hotplug update */ |
| 810 | + image->hp_action = hp_action; |
| 811 | + |
| 812 | + /* Now invoke arch-specific update handler */ |
| 813 | + arch_crash_handle_hotplug_event(image); |
| 814 | + |
| 815 | + /* No longer handling a hotplug event */ |
| 816 | + image->hp_action = KEXEC_CRASH_HP_NONE; |
| 817 | + image->elfcorehdr_updated = true; |
| 818 | + |
| 819 | + /* Change back to read-only */ |
| 820 | + arch_kexec_protect_crashkres(); |
| 821 | + |
| 822 | + /* Errors in the callback is not a reason to rollback state */ |
| 823 | +out: |
| 824 | + /* Release lock now that update complete */ |
| 825 | + kexec_unlock(); |
| 826 | +} |
| 827 | + |
| 828 | +static int crash_memhp_notifier(struct notifier_block *nb, unsigned long val, void *v) |
| 829 | +{ |
| 830 | + switch (val) { |
| 831 | + case MEM_ONLINE: |
| 832 | + crash_handle_hotplug_event(KEXEC_CRASH_HP_ADD_MEMORY, |
| 833 | + KEXEC_CRASH_HP_INVALID_CPU); |
| 834 | + break; |
| 835 | + |
| 836 | + case MEM_OFFLINE: |
| 837 | + crash_handle_hotplug_event(KEXEC_CRASH_HP_REMOVE_MEMORY, |
| 838 | + KEXEC_CRASH_HP_INVALID_CPU); |
| 839 | + break; |
| 840 | + } |
| 841 | + return NOTIFY_OK; |
| 842 | +} |
| 843 | + |
| 844 | +static struct notifier_block crash_memhp_nb = { |
| 845 | + .notifier_call = crash_memhp_notifier, |
| 846 | + .priority = 0 |
| 847 | +}; |
| 848 | + |
| 849 | +static int crash_cpuhp_online(unsigned int cpu) |
| 850 | +{ |
| 851 | + crash_handle_hotplug_event(KEXEC_CRASH_HP_ADD_CPU, cpu); |
| 852 | + return 0; |
| 853 | +} |
| 854 | + |
| 855 | +static int crash_cpuhp_offline(unsigned int cpu) |
| 856 | +{ |
| 857 | + crash_handle_hotplug_event(KEXEC_CRASH_HP_REMOVE_CPU, cpu); |
| 858 | + return 0; |
| 859 | +} |
| 860 | + |
| 861 | +static int __init crash_hotplug_init(void) |
| 862 | +{ |
| 863 | + int result = 0; |
| 864 | + |
| 865 | + if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) |
| 866 | + register_memory_notifier(&crash_memhp_nb); |
| 867 | + |
| 868 | + if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) { |
| 869 | + result = cpuhp_setup_state_nocalls(CPUHP_BP_PREPARE_DYN, |
| 870 | + "crash/cpuhp", crash_cpuhp_online, crash_cpuhp_offline); |
| 871 | + } |
| 872 | + |
| 873 | + return result; |
| 874 | +} |
| 875 | + |
| 876 | +subsys_initcall(crash_hotplug_init); |
| 877 | +#endif |
0 commit comments