]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - mm/memory_hotplug.c
Merge tag 'kbuild-fixes-v5.6' of git://git.kernel.org/pub/scm/linux/kernel/git/masahi...
[linux.git] / mm / memory_hotplug.c
index a91a072f2b2ce6baf432f09c3cfe74dbaec0033b..0a54ffac8c682ae5c940642c6b5d45cfb22cada5 100644 (file)
@@ -355,7 +355,7 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
                if (unlikely(pfn_to_nid(start_pfn) != nid))
                        continue;
 
-               if (zone && zone != page_zone(pfn_to_page(start_pfn)))
+               if (zone != page_zone(pfn_to_page(start_pfn)))
                        continue;
 
                return start_pfn;
@@ -380,7 +380,7 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
                if (unlikely(pfn_to_nid(pfn) != nid))
                        continue;
 
-               if (zone && zone != page_zone(pfn_to_page(pfn)))
+               if (zone != page_zone(pfn_to_page(pfn)))
                        continue;
 
                return pfn;
@@ -392,14 +392,11 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
                             unsigned long end_pfn)
 {
-       unsigned long zone_start_pfn = zone->zone_start_pfn;
-       unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
-       unsigned long zone_end_pfn = z;
        unsigned long pfn;
        int nid = zone_to_nid(zone);
 
        zone_span_writelock(zone);
-       if (zone_start_pfn == start_pfn) {
+       if (zone->zone_start_pfn == start_pfn) {
                /*
                 * If the section is smallest section in the zone, it need
                 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
@@ -407,50 +404,30 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
                 * for shrinking zone.
                 */
                pfn = find_smallest_section_pfn(nid, zone, end_pfn,
-                                               zone_end_pfn);
+                                               zone_end_pfn(zone));
                if (pfn) {
+                       zone->spanned_pages = zone_end_pfn(zone) - pfn;
                        zone->zone_start_pfn = pfn;
-                       zone->spanned_pages = zone_end_pfn - pfn;
+               } else {
+                       zone->zone_start_pfn = 0;
+                       zone->spanned_pages = 0;
                }
-       } else if (zone_end_pfn == end_pfn) {
+       } else if (zone_end_pfn(zone) == end_pfn) {
                /*
                 * If the section is biggest section in the zone, it need
                 * shrink zone->spanned_pages.
                 * In this case, we find second biggest valid mem_section for
                 * shrinking zone.
                 */
-               pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
+               pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn,
                                               start_pfn);
                if (pfn)
-                       zone->spanned_pages = pfn - zone_start_pfn + 1;
-       }
-
-       /*
-        * The section is not biggest or smallest mem_section in the zone, it
-        * only creates a hole in the zone. So in this case, we need not
-        * change the zone. But perhaps, the zone has only hole data. Thus
-        * it check the zone has only hole or not.
-        */
-       pfn = zone_start_pfn;
-       for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) {
-               if (unlikely(!pfn_to_online_page(pfn)))
-                       continue;
-
-               if (page_zone(pfn_to_page(pfn)) != zone)
-                       continue;
-
-               /* Skip range to be removed */
-               if (pfn >= start_pfn && pfn < end_pfn)
-                       continue;
-
-               /* If we find valid section, we have nothing to do */
-               zone_span_writeunlock(zone);
-               return;
+                       zone->spanned_pages = pfn - zone->zone_start_pfn + 1;
+               else {
+                       zone->zone_start_pfn = 0;
+                       zone->spanned_pages = 0;
+               }
        }
-
-       /* The zone has no valid section */
-       zone->zone_start_pfn = 0;
-       zone->spanned_pages = 0;
        zone_span_writeunlock(zone);
 }
 
@@ -490,6 +467,9 @@ void __ref remove_pfn_range_from_zone(struct zone *zone,
        struct pglist_data *pgdat = zone->zone_pgdat;
        unsigned long flags;
 
+       /* Poison struct pages because they are now uninitialized again. */
+       page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages);
+
 #ifdef CONFIG_ZONE_DEVICE
        /*
         * Zone shrinking code cannot properly deal with ZONE_DEVICE. So
@@ -536,25 +516,20 @@ static void __remove_section(unsigned long pfn, unsigned long nr_pages,
 void __remove_pages(unsigned long pfn, unsigned long nr_pages,
                    struct vmem_altmap *altmap)
 {
+       const unsigned long end_pfn = pfn + nr_pages;
+       unsigned long cur_nr_pages;
        unsigned long map_offset = 0;
-       unsigned long nr, start_sec, end_sec;
 
        map_offset = vmem_altmap_offset(altmap);
 
        if (check_pfn_span(pfn, nr_pages, "remove"))
                return;
 
-       start_sec = pfn_to_section_nr(pfn);
-       end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
-       for (nr = start_sec; nr <= end_sec; nr++) {
-               unsigned long pfns;
-
+       for (; pfn < end_pfn; pfn += cur_nr_pages) {
                cond_resched();
-               pfns = min(nr_pages, PAGES_PER_SECTION
-                               - (pfn & ~PAGE_SECTION_MASK));
-               __remove_section(pfn, pfns, map_offset, altmap);
-               pfn += pfns;
-               nr_pages -= pfns;
+               /* Select all remaining pages up to the next section boundary */
+               cur_nr_pages = min(end_pfn - pfn, -(pfn | PAGE_SECTION_MASK));
+               __remove_section(pfn, cur_nr_pages, map_offset, altmap);
                map_offset = 0;
        }
 }
@@ -783,27 +758,18 @@ struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
        return default_zone_for_pfn(nid, start_pfn, nr_pages);
 }
 
-int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
+int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
+                      int online_type, int nid)
 {
        unsigned long flags;
        unsigned long onlined_pages = 0;
        struct zone *zone;
        int need_zonelists_rebuild = 0;
-       int nid;
        int ret;
        struct memory_notify arg;
-       struct memory_block *mem;
 
        mem_hotplug_begin();
 
-       /*
-        * We can't use pfn_to_nid() because nid might be stored in struct page
-        * which is not yet initialized. Instead, we find nid from memory block.
-        */
-       mem = find_memory_block(__pfn_to_section(pfn));
-       nid = mem->nid;
-       put_device(&mem->dev);
-
        /* associate pfn range with the zone */
        zone = zone_for_pfn_range(online_type, nid, pfn, nr_pages);
        move_pfn_range_to_zone(zone, pfn, nr_pages, NULL);
@@ -1182,7 +1148,7 @@ static bool is_pageblock_removable_nolock(unsigned long pfn)
        if (!zone_spans_pfn(zone, pfn))
                return false;
 
-       return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE,
+       return !has_unmovable_pages(zone, page, MIGRATE_MOVABLE,
                                    MEMORY_OFFLINE);
 }
 
@@ -1206,14 +1172,13 @@ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
 }
 
 /*
- * Confirm all pages in a range [start, end) belong to the same zone.
- * When true, return its valid [start, end).
+ * Confirm all pages in a range [start, end) belong to the same zone (skipping
+ * memory holes). When true, return the zone.
  */
-int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
-                        unsigned long *valid_start, unsigned long *valid_end)
+struct zone *test_pages_in_a_zone(unsigned long start_pfn,
+                                 unsigned long end_pfn)
 {
        unsigned long pfn, sec_end_pfn;
-       unsigned long start, end;
        struct zone *zone = NULL;
        struct page *page;
        int i;
@@ -1234,24 +1199,15 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
                                continue;
                        /* Check if we got outside of the zone */
                        if (zone && !zone_spans_pfn(zone, pfn + i))
-                               return 0;
+                               return NULL;
                        page = pfn_to_page(pfn + i);
                        if (zone && page_zone(page) != zone)
-                               return 0;
-                       if (!zone)
-                               start = pfn + i;
+                               return NULL;
                        zone = page_zone(page);
-                       end = pfn + MAX_ORDER_NR_PAGES;
                }
        }
 
-       if (zone) {
-               *valid_start = start;
-               *valid_end = min(end, end_pfn);
-               return 1;
-       } else {
-               return 0;
-       }
+       return zone;
 }
 
 /*
@@ -1496,7 +1452,6 @@ static int __ref __offline_pages(unsigned long start_pfn,
        unsigned long offlined_pages = 0;
        int ret, node, nr_isolate_pageblock;
        unsigned long flags;
-       unsigned long valid_start, valid_end;
        struct zone *zone;
        struct memory_notify arg;
        char *reason;
@@ -1521,14 +1476,12 @@ static int __ref __offline_pages(unsigned long start_pfn,
 
        /* This makes hotplug much easier...and readable.
           we assume this for now. .*/
-       if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start,
-                                 &valid_end)) {
+       zone = test_pages_in_a_zone(start_pfn, end_pfn);
+       if (!zone) {
                ret = -EINVAL;
                reason = "multizone range";
                goto failed_removal;
        }
-
-       zone = page_zone(pfn_to_page(valid_start));
        node = zone_to_nid(zone);
 
        /* set above range as isolated */
@@ -1764,8 +1717,6 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
 
        BUG_ON(check_hotplug_memory_range(start, size));
 
-       mem_hotplug_begin();
-
        /*
         * All memory blocks must be offlined before removing memory.  Check
         * whether all memory blocks in question are offline and return error
@@ -1778,9 +1729,14 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
        /* remove memmap entry */
        firmware_map_remove(start, start + size, "System RAM");
 
-       /* remove memory block devices before removing memory */
+       /*
+        * Memory block device removal under the device_hotplug_lock is
+        * a barrier against racing online attempts.
+        */
        remove_memory_block_devices(start, size);
 
+       mem_hotplug_begin();
+
        arch_remove_memory(nid, start, size, NULL);
        memblock_free(start, size);
        memblock_remove(start, size);