linux-3.2.36内核启动4-setup_arch中的内存初始化3(arm平台 bootmem_init源码分析)
void__init bootmem_init(void)
{
unsigned long min, max_low, max_high;
max_low = max_high = 0;
find_limits(&min, &max_low,&max_high);
static void __init find_limits(unsigned long *min,unsigned long *max_low,
unsignedlong *max_high)
{
structmeminfo *mi = &meminfo;
int i;
*min =-1UL;
*max_low =*max_high = 0;
for_each_bank (i, mi) {
struct membank *bank = &mi->bank[i];
unsigned long start, end;
start = bank_pfn_start(bank);
end= bank_pfn_end(bank);
if(*min > start)
*min = start;
if(*max_high < end)
*max_high = end;
if(bank->highmem)
continue;
if(*max_low < end)
*max_low = end;
}
}
这个函数没有必要一句一句看,可以看出min是内起始页框地址,max_low是低端内存的结束页框地址,max_high是高端内存的结束页框地址。
我们打印一下:
//wxl add
printk(KERN_NOTICE "min = %lx max_low = %lx max_high =%lx\n", (unsigned long)min, (unsigned long)max_low, (unsignedlong)max_high);
打印结果:
min = 30000 max_low = 34000 max_high = 34000
我的没有高端地址
arm_bootmem_init(min, max_low);
从这个函数的参数,我们就知道启动时只分配低端内存,不能申请高端内存。
static void __init arm_bootmem_init(unsigned long start_pfn,
unsigned longend_pfn)
{
structmemblock_region *reg;
unsigned intboot_pages;
phys_addr_tbitmap;
pg_data_t *pgdat;
/*
* Allocate thebootmem bitmap page. This must be in aregion
* of memory whichhas already been mapped.
*/
boot_pages =bootmem_bootmap_pages(end_pfn - start_pfn);计算bitmap页大小
所谓的Bit-map就是用一个bit位来标记某个元素对应的Value
一个字节八位,一个位表示1一个字节,总共0x4000字节,除以8不到4096,所以只要一页就够了,下面打印看到。
bitmap =memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
__pfn_to_phys(end_pfn));
我的平台L1_CACHE_BYTES是32
主要就是
phys_addr_t __init __memblock_alloc_base(phys_addr_t size,phys_addr_t align, phys_addr_t max_addr)
{
phys_addr_t found;
/* We align thesize to limit fragmentation. Without this, a lot of
* small allocsquickly eat up the whole reserve array on sparc
*/
为了防止小碎片,先要对齐处理
size =memblock_align_up(size, align);
向上对齐公式
(size + (align - 1)) & ~( align - 1); 其实就是向上取整
found =memblock_find_base(size, align, 0, max_addr);
static phys_addr_t __init_memblock memblock_find_base(phys_addr_tsize,
phys_addr_t align, phys_addr_t start, phys_addr_t end)
{
long i;
BUG_ON(0 == size);
/* Pump up max_addr*/
if (end ==MEMBLOCK_ALLOC_ACCESSIBLE)
end =memblock.current_limit;
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
/* We do a top-downsearch, this tends to limit memory
* fragmentation bykeeping early boot allocs near the
* top of memory
*/
我们做了自上而下的搜索,通过保持早期bootallocs在内存的顶部附近往往可以限制内存碎片
for (i = memblock.memory.cnt - 1; i >=0; i--) {
phys_addr_tmemblockbase = memblock.memory.regions[i].base;
phys_addr_tmemblocksize = memblock.memory.regions[i].size;
phys_addr_tbottom, top, found;
if (memblocksize < size) 此block不够大,找下一个
continue;
if((memblockbase + memblocksize) <= start)因为我们是从上到下的,所以地址在此block上,就不用再向下找了
break;
bottom =max(memblockbase, start);
top =min(memblockbase + memblocksize, end);
if (bottom>= top)
continue;
found =memblock_find_region(bottom, top, size, align);
这个函数返回申请的基地址,错误返回MEMBLOCK_ERROR,同时会做一些对齐处理,使用的是向下对齐
公式
addr& ~(size - 1)
里面还防止start为0,应为0表示错误
这个返回的found是end-size,遵循从上到下的原则
if (found!= MEMBLOCK_ERROR)
return found;
}
returnMEMBLOCK_ERROR;
}
if (found !=MEMBLOCK_ERROR &&
!memblock_add_region(&memblock.reserved,found, size))
returnfound;
把这个block加到memblock.reserved里
return 0;
}
//wxl add
printk(KERN_NOTICE "boot_pages =%lx bitmap = %lx\n", (unsigned long)boot_pages, (unsigned long)bitmap);
结果:
boot_pages = 1 bitmap = 33ffb000
我们说明一下
按理说是从上到下,size为0x1000,从0x33fff000开始就可以了
这是因为之前我们没有分析early_alloc,在它你们也调用了__memblock_alloc_base进行分配,所以我们在此就等于分析了early_alloc,memlock.reserved记录未分配的空间。
/*
* Initialise thebootmem allocator, handing the
* memory banksover to bootmem.
*/
初始化bootmem分配器
http://www.ibm.com/developerworks/cn/linux/l-numa/
http://blog.chinaunix.net/uid-7295895-id-3076420.html
这个网址说明了linux的NUMA技术,不细看,作为拓展
NUMA(Non Uniform Memory Access)即非一致内存访问架构,市面上主要有X86_64(JASPER)和MIPS64(XLP)体系。arm还不是
Linux采用存储节点(Node)、管理区(Zone)和页面(Page)三级结构来描述物理内存的。
其中Node就是为NUMA提供的。
说说它们的结构体吧
Page就是typedef struct page mem_map_t,这个结构体中有个_count来纪录此页的使用情况,不过你不能直接访问它,要通过page_count()。0表示空闲,正整数表示页在使用
Zone 就是struct zone 或zone_t,一般会有3个区
? 专供DMA 使用的ZONE_DMA 区(小于16MB);
? 常规的ZONE_NORMAL 区(大于16MB 小于896MB);
? 内核不能直接映射的区ZONE_HIGME 区(大于896MB) 。
区的划分没有任何物理意义,这只不过是内核为了管理页而采取的一种逻辑上的分组
Node 就是typedef struct pglist_data pg_data_t
它的结构体包涵一个node_next
显然若干的pglist_data数据结构可以通过node_next形成一个单链表队列。
为了指向多个管理区,还有个zonelist_t结构体
Linux用一个struct pg_data_t结构来描述系统的内存,系统中每个结点都挂接在一个pgdat_list列表中,对UMA(一致存储结构)体系结构,则只有一个静态的 pg_data_t结构contig_page_data。对NUMA系统来说则非常容易扩充,NUMA系统中一个结点可以对应Linux存储描述中的一 个结点.
node_set_online(0);
我的平台就只有一个node,所以就是0
#define node_set_online(node) node_set_state((node), N_ONLINE)
static inline void node_set_state(int node, enum node_statesstate)
{
__node_set(node,&node_states[state]);
}
static inline void __node_set(int node, volatile nodemask_t*dstp)
{
set_bit(node,dstp->bits);
}
node_states[N_ONLINE]的第0位记为1,记node0为on_line
pgdat =NODE_DATA(0);
NODE_DATA(i)宏返回节点i的struct pglist_data结构此处就是node 0
init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
unsigned long __init init_bootmem_node(pg_data_t *pgdat,unsigned long freepfn,
unsigned long startpfn, unsigned long endpfn)
{
returninit_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
}
pgdat->bdata是bootmem结构的描述信息
结构体bootmem_data_t
为了对页面管理机制作出初步准备,Linux使用了一种叫 bootmem分配器(Bootmem
Allocator)的机制,这种机制仅仅用在系统引导时,它为整个物理内存建立起一个页面位图。
这个位图建立在从 start_pfn开始的地方,也就是说,内核映像终点_end上方的地方。这个
位图用来管理低区(例如小于 896MB),因为在 0到 896MB 的范围内,有些页面可能保留,有些页面可能有空洞,因此,建立这个位图的目的就是要搞清楚哪一些物理页面是可以动态分配的。用来存放位图的数据结构为 bootmem_data
typedef struct bootmem_data {
unsigned longnode_min_pfn;
unsigned longnode_low_pfn;
void*node_bootmem_map;
unsigned longlast_end_off;
unsigned longhint_idx;
struct list_headlist;
} bootmem_data_t;
node_min_pfn 表示存放 bootmem位图的第一个页面(即内核映像结束处的第一个页
面)。
node_low_pfn 表示物理内存的顶点,最高不超过 896MB。
node_bootmem_map 指向bootmem位图
static unsigned long __init init_bootmem_core(bootmem_data_t*bdata,
unsigned longmapstart, unsigned long start, unsigned long end)
{
unsigned longmapsize;
mminit_validate_memmodel_limits(&start, &end);
上面的limits在有的平台存在,我的平台是个空函数。
bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
bdata->node_min_pfn = start;
bdata->node_low_pfn = end;
这三个可以根据之前的打印获得
bdata->node_bootmem_map为0xc3ffb000
bdata->node_min_pfn 为 0x30000
bdata->node_low_pfn 为 0x34000
link_bootmem(bdata);
这个link简单不贴源码了,就是利用bdata->list和一个全局的链表bdata_list建立联系
/*
* Initially allpages are reserved - setup_arch() has to
* register freeRAM areas explicitly.
*/
初始化所有被保留的页面
mapsize = bootmap_bytes(end - start);
static unsigned long __init bootmap_bytes(unsigned long pages)
{
unsigned long bytes= (pages + 7) / 8;
return ALIGN(bytes,sizeof(long));
}
变量 mapsize 存放位图的大小。(end - start)给出现有的页面数,再加个 7是为了
向上取整,除以8 就获得了所需的字节数(因为每个字节映射 8个页面)。
同时sizeof(long)对齐,就是与cpu字节对齐
memset(bdata->node_bootmem_map, 0xff, mapsize);
0xff就是把八位都置位,所谓初始化保留的页面,就是通过把页面中的所有位都置为 1来标记保留的页面
bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
bdata - bootmem_node_data,start, mapstart, end, mapsize);
nid=0 start=30000 map=33ffb end=34000 mapsize=800
return mapsize;为2K
}
/* Free the lowmemregions from memblock into bootmem. */
for_each_memblock(memory, reg) {
unsignedlong start = memblock_region_memory_base_pfn(reg);
unsignedlong end = memblock_region_memory_end_pfn(reg);
if (end>= end_pfn)
end = end_pfn;
if (start>= end)
break;
free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
标记为可用的页面范围
void __init free_bootmem(unsigned long addr, unsigned long size)
{
unsigned longstart, end;
kmemleak_free_part(__va(addr), size);
我的平台没有选kmemleak所以这个也是一个空的函数
CONFIG_DEBUG_KMEMLEAK 在Kernel hacking中被使能
Kmemleak 提供了一种可选的内核泄漏检测,其方法类似于跟踪内存收集器
http://blog.csdn.net/lishenglong666/article/details/8287783
参考这位大大讲解的使用方法
start =PFN_UP(addr);
end =PFN_DOWN(addr + size);
#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
PFN_UP()和 PFN_DOWN()都是将地址 x转换为页面号(PFN 即 Page Frame Number
的缩写),二者之间的区别为:PFN_UP()返回大于 x的第 1 个页面号,而 PFN_DOWN()返回小于 x的第 1 个页面号。宏 PFN_PHYS()返回页面号 x的物理地址。
mark_bootmem(start,end, 0, 0);
static int __init mark_bootmem(unsigned long start, unsigned longend,
int reserve, int flags)
{
unsigned long pos;
bootmem_data_t*bdata;
pos = start;
list_for_each_entry(bdata, &bdata_list, list) {还记得之前link_bootmem吧
int err;
unsignedlong max;
if (pos < bdata->node_min_pfn ||
pos>= bdata->node_low_pfn) {
BUG_ON(pos != start);
continue;
}判断是否在这个节点内
max =min(bdata->node_low_pfn, end);不能超过物理内存的顶点
err =mark_bootmem_node(bdata, pos, max, reserve, flags);
static int __init mark_bootmem_node(bootmem_data_t *bdata,
unsigned long start, unsigned long end,
int reserve, int flags)
{
unsigned longsidx, eidx;
bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
bdata -bootmem_node_data, start, end, reserve, flags);
BUG_ON(start <bdata->node_min_pfn);
BUG_ON(end >bdata->node_low_pfn);
sidx = start -bdata->node_min_pfn;
eidx = end -bdata->node_min_pfn;
sidx和eidx是页引索,就是相对于bdata->node_min_pfn
if (reserve)
return__reserve(bdata, sidx, eidx, flags);
else
__free(bdata,sidx, eidx);
上面的__reserve 和__free不贴代码了,简单分析
__reserve主要是test_and_set_bit(idx, bdata->node_bootmem_map) (idx从sidx到eidx)
__free主要是test_and_clear_bit(idx, bdata->node_bootmem_map) (idx从sidx到eidx)
return 0;
}
if (reserve&& err) {
mark_bootmem(start, pos, 0, 0);reserve为0,要把之前reserve的释放
return err;
}
if (max ==end)
return 0;
pos =bdata->node_low_pfn;
}
BUG();
}
我们可以看出就是把start到end的mem对应的位图清零,表示可用
}
}
/* Reserve thelowmem memblock reserved regions in bootmem. */
for_each_memblock(reserved, reg) {
unsignedlong start = memblock_region_reserved_base_pfn(reg);
unsignedlong end = memblock_region_reserved_end_pfn(reg);
if (end>= end_pfn)
end = end_pfn;
if (start>= end)
break;
reserve_bootmem(__pfn_to_phys(start),
(end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
int __init reserve_bootmem(unsigned long addr, unsigned longsize,
int flags)
{
unsigned longstart, end;
start =PFN_DOWN(addr);
end = PFN_UP(addr+ size);
returnmark_bootmem(start, end, 1, flags);
}
可以看出,reserve_bootmem就是把页面置位不可用,例如页表的地址,内核占用的内存
}
}
/*
* Sparsemem triesto allocate bootmem in memory_present(),
* so must be doneafter the fixed reservations
*/
arm_memory_present();
我的平台CONFIG_SPARSEMEM设置为0,这个也是一个空函数
/*
* sparse_init()needs the bootmem allocator up and running.
*/
sparse_init();
下面的spare_init和我的平台也没什么关系,不过里面涉及了bootmem分配
会调用下面这行
#define alloc_bootmem(x) \
__alloc_bootmem(x,SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
void * __init __alloc_bootmem(unsignedlong size, unsigned long align,
unsigned long goal)
{
unsigned long limit = 0;
return___alloc_bootmem(size, align, goal, limit);
}
最终是这个
___alloc_bootmem_nopanic(size, SMP_CACHE_BYTES,BOOTMEM_LOW_LIMIT, 0);
static void * __init ___alloc_bootmem_nopanic(unsigned long size,
unsigned long align,
unsigned long goal,
unsigned long limit)
{
bootmem_data_t*bdata;
void *region;
restart:
region =alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
if (region)
returnregion;
list_for_each_entry(bdata, &bdata_list, list) {
if (goal&& bdata->node_low_pfn <= PFN_DOWN(goal))
continue;
if (limit&& bdata->node_min_pfn >= PFN_DOWN(limit))
break;
region =alloc_bootmem_core(bdata, size, align, goal, limit);
if (region)
return region;
}
if (goal) {
goal = 0;
gotorestart;
}
return NULL;
}
重点看alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES,BOOTMEM_LOW_LIMIT, 0);
SMP_CACHE_BYTES为 32
#define BOOTMEM_LOW_LIMIT __pa(MAX_DMA_ADDRESS)
:#define MAX_DMA_ADDRESS PAGE_OFFSET
就是内存起始地址。
static void * __init alloc_bootmem_core(structbootmem_data *bdata,
unsigned long size, unsigned long align,
unsigned long goal, unsigned long limit)
{
unsigned longfallback = 0;
unsigned long min,max, start, sidx, midx, step;
bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lxlimit=%lx\n",
bdata -bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
align,goal, limit);
BUG_ON(!size);
BUG_ON(align &(align - 1));
BUG_ON(limit&& goal + size > limit);
if(!bdata->node_bootmem_map)
returnNULL;
min = bdata->node_min_pfn;
max =bdata->node_low_pfn;
goal >>= PAGE_SHIFT; 0x3000
limit >>=PAGE_SHIFT; 0x0000
if (limit&& max > limit) __alloc_bootmem没有限制
max =limit;
if (max <= min)
returnNULL;
step = max(align>> PAGE_SHIFT, 1UL);
if (goal&& min < goal && goal < max)
start =ALIGN(goal, step);
else
start =ALIGN(min, step);
start最后是0x3000
sidx = start -bdata->node_min_pfn;
midx = max - bdata->node_min_pfn;
还记得上面这两句吧
if(bdata->hint_idx > sidx) {
bdata->hint_idx记录了已分配的引索,所以sidx要大于它
/*
* Handlethe valid case of sidx being zero and still
* catchthe fallback below.
*/
fallback =sidx + 1;
sidx =align_idx(bdata, bdata->hint_idx, step);
已bdata->hint_idx重新计算sidx
}
while (1) {
int merge;
void*region;
unsignedlong eidx, i, start_off, end_off;
find_block:
sidx =find_next_zero_bit(bdata->node_bootmem_map, midx, sidx);
找到下一个对应位图为0的页
sidx =align_idx(bdata, sidx, step);
eidx =sidx + PFN_UP(size);
if (sidx >= midx || eidx > midx)
break;
for (i =sidx; i < eidx; i++)
这个是测试从sidx到eidx之间有没有不可用的页
if(test_bit(i, bdata->node_bootmem_map)) {
sidx = align_idx(bdata, i, step);
if (sidx == i)
sidx +=step;
有不可用的,就移一个位图,重新找对应位图为0的页
goto find_block;
}
if(bdata->last_end_off & (PAGE_SIZE - 1) &&
PFN_DOWN(bdata->last_end_off) + 1 == sidx)
如果bdata->last_end_off为0,则说明前一次分配达到了一个非常好的页面
边界,没有内部碎片
+1是检查这次请求的内存是否与前一次请求的内存是相临的,如果是,则把两次分配合
在一起进行。
start_off = align_off(bdata, bdata->last_end_off, align);
看到start_off是按上次的为标准
else
start_off = PFN_PHYS(sidx);
merge =PFN_DOWN(start_off) < sidx;为1说明要合并
end_off =start_off + size;
bdata->last_end_off = end_off;
bdata->hint_idx = PFN_UP(end_off);
/*
* Reservethe area now:
*/
if(__reserve(bdata, PFN_DOWN(start_off) + merge,
PFN_UP(end_off), BOOTMEM_EXCLUSIVE))
BUG();
__reserve()已经分析过,把也分配的页对应的位图置为1
region =phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
start_off);计算分配的开始虚拟地址
memset(region, 0, size);
/*
* Themin_count is set to 0 so that bootmem allocated blocks
* arenever reported as leaks.
*/
kmemleak_alloc(region, size, 0, 0);
kmemleak_alloc检测不管了
returnregion;
}
if (fallback) {
sidx =align_idx(bdata, fallback - 1, step);
fallback =0;
gotofind_block;
没有分配成功,sidx回复原值并回到find_block在此尝试分配
}
return NULL;
}
/*
* Now free the memory -free_area_init_node needs
* the sparsemem_map arrays initialized by sparse_init()
* formemmap_init_zone(), otherwise all PFNs are invalid.
*/
arm_bootmem_free(min, max_low, max_high);
static void __init arm_bootmem_free(unsigned long min, unsignedlong max_low,
unsigned longmax_high)
{
unsigned longzone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
structmemblock_region *reg;
/*
* initialise thezones.
*/
memset(zone_size,0, sizeof(zone_size));
/*
* The memory sizehas already been determined. If we need
* to do anythingfancy with the allocation of this memory
* to the zones,now is the time to do it.
*/
zone_size[0] =max_low - min;低端内存大小
#ifdef CONFIG_HIGHMEM
zone_size[ZONE_HIGHMEM] = max_high - max_low;
#endif
/*
* Calculate thesize of the holes.
* holes = node_size - sum(bank_sizes)
*/
计算缝隙大小,为节点大小减去块大小总和
memcpy(zhole_size,zone_size, sizeof(zhole_size));
for_each_memblock(memory, reg) {
unsignedlong start = memblock_region_memory_base_pfn(reg);
unsignedlong end = memblock_region_memory_end_pfn(reg);
if (start< max_low) {
unsigned long low_end = min(end, max_low);
zhole_size[0] -= low_end - start;
}
#ifdef CONFIG_HIGHMEM
if (end> max_low) {
unsigned long high_start = max(start, max_low);
zhole_size[ZONE_HIGHMEM] -= end - high_start;
}
#endif
}
上面的循环就是反映了holes = node_size - sum(bank_sizes)
分别计算了低端和高端的缝隙大小,zhole_size来记录
#ifdef CONFIG_ZONE_DMA
/*
* Adjust thesizes according to any special requirements for
* this machinetype.
*/
if(arm_dma_zone_size) {
arm_adjust_dma_zone(zone_size, zhole_size,
arm_dma_zone_size >>PAGE_SHIFT);
static void __init arm_adjust_dma_zone(unsigned long *size,unsigned long *hole,
unsigned longdma_size)
{
if (size[0] <=dma_size)
return;
size[ZONE_NORMAL]= size[0] - dma_size;
size[ZONE_DMA] =dma_size;
hole[ZONE_NORMAL]= hole[0];
hole[ZONE_DMA] =0;
}
计算ZONE中的ZONE_DMA大小和ZONE_NORMAL大小
arm_dma_limit 为用GFP_DMA分配时DMA掩码对应的最大总线地址
例如判断dma是否支持的函数
int dma_supported(struct device *dev, u64 mask)
{
if (mask < (u64)arm_dma_limit)
return 0;
return 1;
}
如果你是24位总线地址函数,mask为0x00ffffff带入判断
下面是如果arm_dma_zone_size为0,arm_dma_limit = 0xffffffff;那肯定是不支持
arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
} else
arm_dma_limit = 0xffffffff;
#endif
free_area_init_node(0, zone_size, min, zhole_size);
void __paginginit free_area_init_node(int nid, unsigned long*zones_size,
unsignedlong node_start_pfn, unsigned long *zholes_size)
{
pg_data_t *pgdat =NODE_DATA(nid);
节点0对应的pg_data_t
pgdat->node_id= nid; 0
pgdat->node_start_pfn = node_start_pfn; 就是上面的min
calculate_node_totalpages(pgdat, zones_size, zholes_size);
static void __meminit calculate_node_totalpages(structpglist_data *pgdat,
unsignedlong *zones_size, unsigned long *zholes_size)
{
unsigned longrealtotalpages, totalpages = 0;
enum zone_type i;
for (i = 0; i <MAX_NR_ZONES; i++)
totalpages +=zone_spanned_pages_in_node(pgdat->node_id, i,
zones_size);
返回一个区域横跨一个节点的页面数,包括缝隙的。这个函数有几个,我的平台
static inline unsigned long __meminitzone_spanned_pages_in_node(int nid,
unsignedlong zone_type,
unsigned long *zones_size)
{
returnzones_size[zone_type];
}
就是这个区大小,没有减去缝隙
可以看出totlpages就是从0~MAX_NR_ZONES的页面总和,包括缝隙
pgdat->node_spanned_pages = totalpages;
realtotalpages =totalpages;
for (i = 0; i <MAX_NR_ZONES; i++)
realtotalpages -=
zone_absent_pages_in_node(pgdat->node_id, i,
zholes_size);
static inline unsigned long __meminitzone_absent_pages_in_node(int nid,
unsigned long zone_type,
unsigned long *zholes_size)
{
if (!zholes_size)
return 0;
returnzholes_size[zone_type];
}
看函数就知道realtotalpages是不包括缝隙的页面总和
pgdat->node_present_pages = realtotalpages;
printk(KERN_DEBUG"On node %d totalpages: %lu\n", pgdat->node_id,
realtotalpages);
打印结果
On node 0 totalpages: 16384
64M,我的没有缝隙
}
经过calculate_node_totalpages之后
pgdat->node_spanned_pages为节点0的页面总和包括缝隙
pgdat->node_present_pages为节点0的页面总和不包括缝隙
alloc_node_mem_map(pgdat);
static void __init_refok alloc_node_mem_map(struct pglist_data*pgdat)
{
/* Skip empty nodes*/
if(!pgdat->node_spanned_pages)
return;
#ifdef CONFIG_FLAT_NODE_MEM_MAP
/* ia64 gets itsown node_mem_map, before this, without bootmem */
if(!pgdat->node_mem_map) {
unsignedlong size, start, end;
struct page*map;
/*
* Thezone's endpoints aren't required to be MAX_ORDER
* alignedbut the node_mem_map endpoints must be in order
* for thebuddy allocator to function correctly.
*/
上面说了zone的端点不是必须和MAX_ORDER对齐,但是为了buddy分配器正常工作,node_mem_map端点要对齐
start =pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
end =pgdat->node_start_pfn + pgdat->node_spanned_pages;
end =ALIGN(end, MAX_ORDER_NR_PAGES);
看到上面和MAX_ORDER_NR_PAGES对齐处理
size = (end - start) * sizeof(struct page);
map =alloc_remap(pgdat->node_id, size);
alloc_remap这个函数有的平台实现,arm平台没实现,所以最终alloc_bootmem_node_nopanic
来分配
if (!map)
map= alloc_bootmem_node_nopanic(pgdat, size);
这个玩意先尝试用kzalloc分配再用alloc_bootmem_core;如果再细说,又是一大堆关于slab的东西。
pgdat->node_mem_map = map +(pgdat->node_start_pfn - start);
}
#ifndef CONFIG_NEED_MULTIPLE_NODES
/*
* With noDISCONTIG, the global mem_map is just set as node 0's
*/
if (pgdat ==NODE_DATA(0)) {
mem_map =NODE_DATA(0)->node_mem_map;
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP这个我没有,不看了
if(page_to_pfn(mem_map) != pgdat->node_start_pfn)
mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
}
#endif
#endif /* CONFIG_FLAT_NODE_MEM_MAP */
}
#ifdef CONFIG_FLAT_NODE_MEM_MAP
printk(KERN_DEBUG"free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
nid,(unsigned long)pgdat,
(unsignedlong)pgdat->node_mem_map);
#endif
打印结果
free_area_init_node: node 0, pgdat c0335e40, node_mem_map c0368000
free_area_init_core(pgdat, zones_size, zholes_size);
标记所有页面reserved
标记所有内存队列空
清除内存的bitmaps
static void __paginginit free_area_init_core(structpglist_data *pgdat,
unsigned long *zones_size, unsigned long *zholes_size)
{
enumzone_type j;
int nid =pgdat->node_id;
unsignedlong zone_start_pfn = pgdat->node_start_pfn;
int ret;
pgdat_resize_init(pgdat);
就是spin_lock_init(&pgdat->node_size_lock);
pgdat->nr_zones = 0;
init_waitqueue_head(&pgdat->kswapd_wait);
pgdat->kswapd_max_order = 0;
kswapd是页面交换守护进程
pgdat_page_cgroup_init(pgdat);空函数
for (j = 0; j < MAX_NR_ZONES; j++) {
struct zone *zone = pgdat->node_zones + j;
unsigned long size, realsize, memmap_pages;
enumlru_list l;
size= zone_spanned_pages_in_node(nid, j, zones_size);
realsize = size - zone_absent_pages_in_node(nid, j,
zholes_size);
这个上面都说过了,realsize是减去缝隙的后的大小
/*
*Adjust realsize so that it accounts for how much memory
*is used by this zone for memmap. This affects the watermark
*and per-cpu initialisations
*/
memmap_pages =
PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
memmap_pages就是上面说的为存储struct page分配的大小
if(realsize >= memmap_pages) {
realsize -= memmap_pages;
if (memmap_pages)
printk(KERN_DEBUG
" %s zone: %lu pages used for memmap\n",
zone_names[j], memmap_pages);
结果:Normal zone: 128 pages used formemmap
512K
}else
printk(KERN_WARNING
" %s zone: %lu pages exceeds realsize%lu\n",
zone_names[j],memmap_pages, realsize);
/*Account for reserved pages */
if(j == 0 && realsize > dma_reserve) {
realsize -= dma_reserve;
printk(KERN_DEBUG " %s zone:%lu pages reserved\n",
zone_names[0], dma_reserve);
打印:Normal zone: 0 pages reserved
}
看到realsize = size –缝隙 – mem_map – dma预留
if(!is_highmem_idx(j))是否为高端引索,不是的话加入内核页
nr_kernel_pages += realsize;
nr_all_pages += realsize;
zone->spanned_pages = size;
zone->present_pages = realsize;
#ifdef CONFIG_NUMA 没有不看
zone->node = nid;
zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
/ 100;
zone->min_slab_pages = (realsize *sysctl_min_slab_ratio) / 100;
#endif
zone->name = zone_names[j];就是DMA Normal HIGH
spin_lock_init(&zone->lock);
spin_lock_init(&zone->lru_lock);
zone_seqlock_init(zone);
就是seqlock_init(&zone->span_seqlock);,seqlock估计大家用的比较少
给个定义
顺序锁是对读写锁的一种优化,特点是读执行单元可以在写执行单元对被
顺序锁保护的共享资源进行写操作时仍然可以读,而不必等待写执行单元完成
写操作,写执行单元也不需要等待所有读执行单元完成读操作才去进行写操作。
但是,写与写之间是互斥的。
如果读执行单元在读操作期间,写执行单元已经发生写操作,那么,读执
行单元必须重新读取数据。读写同时进行的概率比较小。
zone->zone_pgdat = pgdat;
zone_pcp_init(zone);
static __meminit void zone_pcp_init(struct zone *zone)
{
/*
* per cpusubsystem is not up at this point. The following code
* relies on the ability of the linkerto provide the
* offset ofa (static) per cpu variable into the per cpu area.
*/
zone->pageset = &boot_pageset;
在内核启动之初per_cpu机制还没有初始化,用于动态分配per_cpu变量的空间还没有分配,所以定义了一个静态的per_cpu变量boot_pageset,用以暂时管理内存域的per_cpu缓存
http://chxxxyg.blog.163.com/blog/static/15028119320121176315845/
这个文章对per_cpu说的比较清楚
if(zone->present_pages)
printk(KERN_DEBUG " %s zone:%lu pages, LIFO batch:%u\n",
zone->name, zone->present_pages,
zone_batchsize(zone));
结果:Normal zone: 16256 pages, LIFO batch:3
}
for_each_lru(l)
INIT_LIST_HEAD(&zone->lru[l].list);
初始化list
#define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS;l++)
#define LRU_BASE 0
#define LRU_ACTIVE 1
#define LRU_FILE 2
enum lru_list {
LRU_INACTIVE_ANON = LRU_BASE,
LRU_ACTIVE_ANON= LRU_BASE + LRU_ACTIVE,
LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
LRU_UNEVICTABLE,
NR_LRU_LISTS
};
就是上面对应的链表初始化。这些是内存管理要用的
zone->reclaim_stat.recent_rotated[0] = 0;
zone->reclaim_stat.recent_rotated[1] = 0;
zone->reclaim_stat.recent_scanned[0] = 0;
zone->reclaim_stat.recent_scanned[1] = 0;
zap_zone_vm_stats(zone);
zap_zone_vm_stats就是memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
zone->flags = 0;
if (!size)
continue;
set_pageblock_order(pageblock_default_order());
setup_usemap(pgdat, zone, size);
上面两个都是我的是空函数
ret= init_currently_empty_zone(zone, zone_start_pfn,
size,MEMMAP_EARLY);
BUG_ON(ret);
memmap_init(size, nid, j, zone_start_pfn);这里就是初始化所以页面为reserved
zone_start_pfn += size;
}
}
}
}
high_memory =__va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1;
高端内存虚拟地址
/*
* This doesn't seemto be used by the Linux memory manager any
* more, but is usedby ll_rw_block. If we can get rid of it,we
* also get rid ofsome of the stuff above as well.
*
* Note: max_low_pfn and max_pfn reflectthe number of _pages_ in
* the system, notthe maximum PFN.
*/
注max_low_pfn和max_pfn是反映系统中pages的数目,不是最大的PFN
max_low_pfn =max_low - PHYS_PFN_OFFSET;
max_pfn = max_high -PHYS_PFN_OFFSET;
}
启动在下面还有mem_init来调用free_all_bootmem会释放所有的低区内存,从此以后,bootmem不再使用。