Skip to content

Commit c58ec70

Browse files
author
Fox Snowpatch
committed
1 parent 7170d5d commit c58ec70

61 files changed

Lines changed: 219 additions & 394 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

Documentation/driver-api/cxl/linux/early-boot.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ The contiguous memory allocator (CMA) enables reservation of contiguous memory
125125
regions on NUMA nodes during early boot. However, CMA cannot reserve memory
126126
on NUMA nodes that are not online during early boot. ::
127127

128-
void __init hugetlb_cma_reserve(int order) {
128+
void __init hugetlb_cma_reserve(void) {
129129
if (!node_online(nid))
130130
/* do not allow reservations */
131131
}

Documentation/mm/memory-model.rst

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -97,9 +97,6 @@ sections:
9797
`mem_section` objects and the number of rows is calculated to fit
9898
all the memory sections.
9999

100-
The architecture setup code should call sparse_init() to
101-
initialize the memory sections and the memory maps.
102-
103100
With SPARSEMEM there are two possible ways to convert a PFN to the
104101
corresponding `struct page` - a "classic sparse" and "sparse
105102
vmemmap". The selection is made at build time and it is determined by

Documentation/translations/zh_CN/mm/memory-model.rst

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,8 +83,6 @@ SPARSEMEM模型将物理内存显示为一个部分的集合。一个区段用me
8383
每一行包含价值 `PAGE_SIZE` 的 `mem_section` 对象,行数的计算是为了适应所有的
8484
内存区。
8585

86-
架构设置代码应该调用sparse_init()来初始化内存区和内存映射。
87-
8886
通过SPARSEMEM,有两种可能的方式将PFN转换为相应的 `struct page` --"classic sparse"和
8987
"sparse vmemmap"。选择是在构建时进行的,它由 `CONFIG_SPARSEMEM_VMEMMAP` 的
9088
值决定。

arch/alpha/kernel/setup.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -607,7 +607,6 @@ setup_arch(char **cmdline_p)
607607
/* Find our memory. */
608608
setup_memory(kernel_end);
609609
memblock_set_bottom_up(true);
610-
sparse_init();
611610

612611
/* First guess at cpu cache sizes. Do this before init_arch. */
613612
determine_cpu_caches(cpu->type);

arch/alpha/mm/init.c

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -208,24 +208,22 @@ callback_init(void * kernel_end)
208208
return kernel_end;
209209
}
210210

211-
/*
212-
* paging_init() sets up the memory map.
213-
*/
214-
void __init paging_init(void)
211+
void __init arch_zone_limits_init(unsigned long *max_zone_pfn)
215212
{
216-
unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
217213
unsigned long dma_pfn;
218214

219215
dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
220216
max_pfn = max_low_pfn;
221217

222218
max_zone_pfn[ZONE_DMA] = dma_pfn;
223219
max_zone_pfn[ZONE_NORMAL] = max_pfn;
220+
}
224221

225-
/* Initialize mem_map[]. */
226-
free_area_init(max_zone_pfn);
227-
228-
/* Initialize the kernel's ZERO_PGE. */
222+
/*
223+
* paging_init() initializes the kernel's ZERO_PGE.
224+
*/
225+
void __init paging_init(void)
226+
{
229227
memset(absolute_pointer(ZERO_PGE), 0, PAGE_SIZE);
230228
}
231229

arch/arc/mm/init.c

Lines changed: 19 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,25 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
7575
base, TO_MB(size), !in_use ? "Not used":"");
7676
}
7777

78+
void __init arch_zone_limits_init(unsigned long *max_zone_pfn)
79+
{
80+
/*----------------- node/zones setup --------------------------*/
81+
max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
82+
83+
#ifdef CONFIG_HIGHMEM
84+
/*
85+
* max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
86+
* For HIGHMEM without PAE max_high_pfn should be less than
87+
* min_low_pfn to guarantee that these two regions don't overlap.
88+
* For PAE case highmem is greater than lowmem, so it is natural
89+
* to use max_high_pfn.
90+
*
91+
* In both cases, holes should be handled by pfn_valid().
92+
*/
93+
max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
94+
#endif
95+
}
96+
7897
/*
7998
* First memory setup routine called from setup_arch()
8099
* 1. setup swapper's mm @init_mm
@@ -83,8 +102,6 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
83102
*/
84103
void __init setup_arch_memory(void)
85104
{
86-
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
87-
88105
setup_initial_init_mm(_text, _etext, _edata, _end);
89106

90107
/* first page of system - kernel .vector starts here */
@@ -122,9 +139,6 @@ void __init setup_arch_memory(void)
122139

123140
memblock_dump_all();
124141

125-
/*----------------- node/zones setup --------------------------*/
126-
max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
127-
128142
#ifdef CONFIG_HIGHMEM
129143
/*
130144
* On ARC (w/o PAE) HIGHMEM addresses are actually smaller (0 based)
@@ -139,22 +153,9 @@ void __init setup_arch_memory(void)
139153
min_high_pfn = PFN_DOWN(high_mem_start);
140154
max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
141155

142-
/*
143-
* max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
144-
* For HIGHMEM without PAE max_high_pfn should be less than
145-
* min_low_pfn to guarantee that these two regions don't overlap.
146-
* For PAE case highmem is greater than lowmem, so it is natural
147-
* to use max_high_pfn.
148-
*
149-
* In both cases, holes should be handled by pfn_valid().
150-
*/
151-
max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
152-
153156
arch_pfn_offset = min(min_low_pfn, min_high_pfn);
154157
kmap_init();
155158
#endif /* CONFIG_HIGHMEM */
156-
157-
free_area_init(max_zone_pfn);
158159
}
159160

160161
void __init arch_mm_preinit(void)

arch/arm/mm/init.c

Lines changed: 4 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -107,19 +107,15 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
107107
#endif
108108
}
109109

110-
static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
111-
unsigned long max_high)
110+
void __init arch_zone_limits_init(unsigned long *max_zone_pfn)
112111
{
113-
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
114-
115112
#ifdef CONFIG_ZONE_DMA
116-
max_zone_pfn[ZONE_DMA] = min(arm_dma_pfn_limit, max_low);
113+
max_zone_pfn[ZONE_DMA] = min(arm_dma_pfn_limit, max_low_pfn);
117114
#endif
118-
max_zone_pfn[ZONE_NORMAL] = max_low;
115+
max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
119116
#ifdef CONFIG_HIGHMEM
120-
max_zone_pfn[ZONE_HIGHMEM] = max_high;
117+
max_zone_pfn[ZONE_HIGHMEM] = max_pfn;
121118
#endif
122-
free_area_init(max_zone_pfn);
123119
}
124120

125121
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
@@ -211,19 +207,6 @@ void __init bootmem_init(void)
211207

212208
early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
213209
(phys_addr_t)max_low_pfn << PAGE_SHIFT);
214-
215-
/*
216-
* sparse_init() tries to allocate memory from memblock, so must be
217-
* done after the fixed reservations
218-
*/
219-
sparse_init();
220-
221-
/*
222-
* Now free the memory - free_area_init needs
223-
* the sparse mem_map arrays initialized by sparse_init()
224-
* for memmap_init_zone(), otherwise all PFNs are invalid.
225-
*/
226-
zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
227210
}
228211

229212
/*

arch/arm64/include/asm/hugetlb.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,8 +56,6 @@ extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
5656
#define __HAVE_ARCH_HUGE_PTEP_GET
5757
extern pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
5858

59-
void __init arm64_hugetlb_cma_reserve(void);
60-
6159
#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
6260
extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
6361
unsigned long addr, pte_t *ptep);

arch/arm64/mm/hugetlbpage.c

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -36,16 +36,12 @@
3636
* huge pages could still be served from those areas.
3737
*/
3838
#ifdef CONFIG_CMA
39-
void __init arm64_hugetlb_cma_reserve(void)
39+
unsigned int arch_hugetlb_cma_order(void)
4040
{
41-
int order;
42-
4341
if (pud_sect_supported())
44-
order = PUD_SHIFT - PAGE_SHIFT;
45-
else
46-
order = CONT_PMD_SHIFT - PAGE_SHIFT;
42+
return PUD_SHIFT - PAGE_SHIFT;
4743

48-
hugetlb_cma_reserve(order);
44+
return CONT_PMD_SHIFT - PAGE_SHIFT;
4945
}
5046
#endif /* CONFIG_CMA */
5147

arch/arm64/mm/init.c

Lines changed: 16 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -118,9 +118,22 @@ static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit)
118118
return min(zone_limit, memblock_end_of_DRAM() - 1) + 1;
119119
}
120120

121-
static void __init zone_sizes_init(void)
121+
void __init arch_zone_limits_init(unsigned long *max_zone_pfns)
122+
{
123+
phys_addr_t __maybe_unused dma32_phys_limit =
124+
max_zone_phys(DMA_BIT_MASK(32));
125+
126+
#ifdef CONFIG_ZONE_DMA
127+
max_zone_pfns[ZONE_DMA] = PFN_DOWN(max_zone_phys(zone_dma_limit));
128+
#endif
129+
#ifdef CONFIG_ZONE_DMA32
130+
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
131+
#endif
132+
max_zone_pfns[ZONE_NORMAL] = max_pfn;
133+
}
134+
135+
static void __init dma_limits_init(void)
122136
{
123-
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
124137
phys_addr_t __maybe_unused acpi_zone_dma_limit;
125138
phys_addr_t __maybe_unused dt_zone_dma_limit;
126139
phys_addr_t __maybe_unused dma32_phys_limit =
@@ -139,18 +152,13 @@ static void __init zone_sizes_init(void)
139152
if (memblock_start_of_DRAM() < U32_MAX)
140153
zone_dma_limit = min(zone_dma_limit, U32_MAX);
141154
arm64_dma_phys_limit = max_zone_phys(zone_dma_limit);
142-
max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
143155
#endif
144156
#ifdef CONFIG_ZONE_DMA32
145-
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
146157
if (!arm64_dma_phys_limit)
147158
arm64_dma_phys_limit = dma32_phys_limit;
148159
#endif
149160
if (!arm64_dma_phys_limit)
150161
arm64_dma_phys_limit = PHYS_MASK + 1;
151-
max_zone_pfns[ZONE_NORMAL] = max_pfn;
152-
153-
free_area_init(max_zone_pfns);
154162
}
155163

156164
int pfn_is_map_memory(unsigned long pfn)
@@ -303,23 +311,8 @@ void __init bootmem_init(void)
303311

304312
arch_numa_init();
305313

306-
/*
307-
* must be done after arch_numa_init() which calls numa_init() to
308-
* initialize node_online_map that gets used in hugetlb_cma_reserve()
309-
* while allocating required CMA size across online nodes.
310-
*/
311-
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
312-
arm64_hugetlb_cma_reserve();
313-
#endif
314-
315314
kvm_hyp_reserve();
316-
317-
/*
318-
* sparse_init() tries to allocate memory from memblock, so must be
319-
* done after the fixed reservations
320-
*/
321-
sparse_init();
322-
zone_sizes_init();
315+
dma_limits_init();
323316

324317
/*
325318
* Reserve the CMA area after arm64_dma_phys_limit was initialised.

0 commit comments

Comments
 (0)