二、虚拟地址空间布局
ARM64处理器不支持64位完全虚拟地址。在ARM64结构的linux内核中,内核虚拟地址和用户虚拟地址都是48位,并没有占用前面的16位。所有进程共享内核虚拟地址:ffff 0 0 0 - ffff ffff ffff ffff。每个进程拥有独立的用户空间:0 0 0 0 -- 0 ffff ffff ffff 。同一个进程底下的线程组共享用户的虚拟地址,内核线程不具备用户态的虚拟地址空间。
1、用户虚拟地址的划分
进程的用户虚拟空间的起始地址是0, 长度是TASK_SIZE,由每种处理器架构定义自己的宏TASK_SIZE。ARM64架构定义的宏如下:
D:\linux-4.1.2\Linux-4.12\arch\arm64\include\asm\memory.h
32位用户空间程序:TASK_SIZE == TASK_SIZE_32 == 0x100000000等于4GB
64位用户空间程序:TASK_SIZE == TASK_SIZE_64,即2^VA_BITS字节。一般情况是编译的时候配置VA_BITS的值。
?aston@ubuntu$?grep -rn --colour 'CONFIG_ARM64_VA_BITS' . --include=* ./arch/arm64/configs/defconfig:74:CONFIG_ARM64_VA_BITS_48=y
aston@$ grep -rnw --colour 'CONFIG_ARM64_VA_BITS' . --include=* ./arch/arm64/include/asm/memory.h:66:#define VA_BITS?? ??? ??? ?(CONFIG_ARM64_VA_BITS) ./arch/arm64/Makefile:90:?? ??? ??? ?(0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \ ./arch/arm64/Makefile:91:?? ??? ??? ?+ (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - 3)) \ aston@ubuntu:/mnt/hgfs/share/025-linux-4.12/Linux-4.12$
2、内核地址空间布局
3、内存描述结构
struct mm_struct {
struct vm_area_struct *mmap;/* 虚拟内存区域链表,每个进程都有list of VMAs */
struct rb_root mm_rb;//虚拟内存区域的红黑树
u32 vmacache_seqnum;/* per-thread vmacache */
#ifdef CONFIG_MMU //在内存映射区域找到一个没有映射的区域
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);
#endif
unsigned long mmap_base;/*内存映射区域的起始地址 base of mmap area */
unsigned long mmap_legacy_base;/* base of mmap area in bottom-up allocations */
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
/* Base adresses for compatible mmap() */
unsigned long mmap_compat_base;
unsigned long mmap_compat_legacy_base;
#endif
unsigned long task_size;/*用户虚拟地址空间的长度 size of task vm space */
unsigned long highest_vm_end; /* highest vma end address */
pgd_t * pgd;//指向页全局目录,也就是一级页表
/**
* @mm_users: The number of users including userspace.
*
* Use mmget()/mmget_not_zero()/mmput() to modify. When this drops
* to 0 (i.e. when the task exits and there are no other temporary
* reference holders), we also release a reference on @mm_count
* (which may then free the &struct mm_struct if @mm_count also
* drops to 0).
*/
atomic_t mm_users;//共享一个用户虚拟地址空间的线程的数量,也就是线程组包含的线程的数量
/**
* @mm_count: The number of references to &struct mm_struct
* (@mm_users count as 1).
*
* Use mmgrab()/mmdrop() to modify. When this drops to 0, the
* &struct mm_struct is freed.
*/
atomic_t mm_count;//内存描述符的引用计数
atomic_long_t nr_ptes; /* PTE page table pages */
#if CONFIG_PGTABLE_LEVELS > 2
atomic_long_t nr_pmds; /* PMD page table pages */
#endif
int map_count; /* number of VMAs */
spinlock_t page_table_lock; /* Protects page tables and some counters */
struct rw_semaphore mmap_sem;
struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung
* together off init_mm.mmlist, and are protected
* by mmlist_lock
*/
unsigned long hiwater_rss; /* 进程所拥有的最大页框数 High-watermark of RSS usage */
unsigned long hiwater_vm; /* 进程线性区中最大页数 High-water virtual memory usage */
unsigned long total_vm; /* 进程地址空间的大小 Total pages mapped */
unsigned long locked_vm; /* 锁住而不能换出的页的个数 Pages that have PG_mlocked set */
unsigned long pinned_vm; /* Refcount permanently increased */
unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
unsigned long stack_vm; /* VM_STACK */
unsigned long def_flags;
/*代码段的起始地址和结束地址 数据段的起始和结束地址*/
unsigned long start_code, end_code, start_data, end_data;
/*堆的起始地址和结束地址,栈的起始地址*/
unsigned long start_brk, brk, start_stack;
/*参数字符串的起始地址和结束地址,环境变量的起始地址和结束地址*/
unsigned long arg_start, arg_end, env_start, env_end;
/**/
unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
/*
* Special counters, in some configurations protected by the
* page_table_lock, in other configurations by being atomic.
*/
struct mm_rss_stat rss_stat;
struct linux_binfmt *binfmt;
cpumask_var_t cpu_vm_mask_var;
/* 处理器的特定内存管理上下文 Architecture-specific MM context */
mm_context_t context;
unsigned long flags; /* Must use atomic bitops to access the bits */
struct core_state *core_state; /* coredumping support */
#ifdef CONFIG_AIO
spinlock_t ioctx_lock;
struct kioctx_table __rcu *ioctx_table;
#endif
#ifdef CONFIG_MEMCG
/*
* "owner" points to a task that is regarded as the canonical
* user/owner of this mm. All of the following must be true in
* order for it to be changed:
*
* current == mm->owner
* current->mm != mm
* new_owner->mm == mm
* new_owner->alloc_lock is held
*/
struct task_struct __rcu *owner;
#endif
struct user_namespace *user_ns;
/* store ref to file /proc/<pid>/exe symlink points to */
struct file __rcu *exe_file;
#ifdef CONFIG_MMU_NOTIFIER
struct mmu_notifier_mm *mmu_notifier_mm;
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
pgtable_t pmd_huge_pte; /* protected by page_table_lock */
#endif
#ifdef CONFIG_CPUMASK_OFFSTACK
struct cpumask cpumask_allocation;
#endif
#ifdef CONFIG_NUMA_BALANCING
/*
* numa_next_scan is the next time that the PTEs will be marked
* pte_numa. NUMA hinting faults will gather statistics and migrate
* pages to new nodes if necessary.
*/
unsigned long numa_next_scan;
/* Restart point for scanning and setting pte_numa */
unsigned long numa_scan_offset;
/* numa_scan_seq prevents two threads setting pte_numa */
int numa_scan_seq;
#endif
#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
/*
* An operation with batched TLB flushing is going on. Anything that
* can move process memory needs to flush the TLB when moving a
* PROT_NONE or PROT_NUMA mapped page.
*/
bool tlb_flush_pending;
#endif
struct uprobes_state uprobes_state;
#ifdef CONFIG_HUGETLB_PAGE
atomic_long_t hugetlb_usage;
#endif
struct work_struct async_put_work;
};
?
|