前往小程序,Get更优阅读体验!
立即前往
首页
学习
活动
专区
工具
TVP
发布
社区首页 >专栏 >Linux | 内存管理之mmap函数

Linux | 内存管理之mmap函数

作者头像
heidsoft
发布2023-03-18 17:17:27
1.6K0
发布2023-03-18 17:17:27
举报

~/Downloads/research/linux-5.15.4/include/linux/mm_types.h

代码语言:javascript
复制
struct page {
  unsigned long flags;    /* Atomic flags, some possibly
           * updated asynchronously */
  /*
   * Five words (20/40 bytes) are available in this union.
   * WARNING: bit 0 of the first word is used for PageTail(). That
   * means the other users of this union MUST NOT use the bit to
   * avoid collision and false-positive PageTail().
   */
  union {
    struct {  /* Page cache and anonymous pages */
      /**
       * @lru: Pageout list, eg. active_list protected by
       * lruvec->lru_lock.  Sometimes used as a generic list
       * by the page owner.
       */
      struct list_head lru;
      /* See page-flags.h for PAGE_MAPPING_FLAGS */
      struct address_space *mapping;
      pgoff_t index;    /* Our offset within mapping. */
      /**
       * @private: Mapping-private opaque data.
       * Usually used for buffer_heads if PagePrivate.
       * Used for swp_entry_t if PageSwapCache.
       * Indicates order in the buddy system if PageBuddy.
       */
      unsigned long private;
    };
    struct {  /* page_pool used by netstack */
      /**
       * @pp_magic: magic value to avoid recycling non
       * page_pool allocated pages.
       */
      unsigned long pp_magic;
      struct page_pool *pp;
      unsigned long _pp_mapping_pad;
      unsigned long dma_addr;
      union {
        /**
         * dma_addr_upper: might require a 64-bit
         * value on 32-bit architectures.
         */
        unsigned long dma_addr_upper;
        /**
         * For frag page support, not supported in
         * 32-bit architectures with 64-bit DMA.
         */
        atomic_long_t pp_frag_count;
      };
    };
    struct {  /* slab, slob and slub */
      union {
        struct list_head slab_list;
        struct {  /* Partial pages */
          struct page *next;
#ifdef CONFIG_64BIT
          int pages;  /* Nr of pages left */
          int pobjects;  /* Approximate count */
#else
          short int pages;
          short int pobjects;
#endif
        };
      };
      struct kmem_cache *slab_cache; /* not slob */
      /* Double-word boundary */
      void *freelist;    /* first free object */
      union {
        void *s_mem;  /* slab: first object */
        unsigned long counters;    /* SLUB */
        struct {      /* SLUB */
          unsigned inuse:16;
          unsigned objects:15;
          unsigned frozen:1;
        };
      };
    };
    struct {  /* Tail pages of compound page */
      unsigned long compound_head;  /* Bit zero is set */

      /* First tail page only */
      unsigned char compound_dtor;
      unsigned char compound_order;
      atomic_t compound_mapcount;
      unsigned int compound_nr; /* 1 << compound_order */
    };
    struct {  /* Second tail page of compound page */
      unsigned long _compound_pad_1;  /* compound_head */
      atomic_t hpage_pinned_refcount;
      /* For both global and memcg */
      struct list_head deferred_list;
    };
    struct {  /* Page table pages */
      unsigned long _pt_pad_1;  /* compound_head */
      pgtable_t pmd_huge_pte; /* protected by page->ptl */
      unsigned long _pt_pad_2;  /* mapping */
      union {
        struct mm_struct *pt_mm; /* x86 pgds only */
        atomic_t pt_frag_refcount; /* powerpc */
      };
#if ALLOC_SPLIT_PTLOCKS
      spinlock_t *ptl;
#else
      spinlock_t ptl;
#endif
    };
    struct {  /* ZONE_DEVICE pages */
      /** @pgmap: Points to the hosting device page map. */
      struct dev_pagemap *pgmap;
      void *zone_device_data;
      /*
       * ZONE_DEVICE private pages are counted as being
       * mapped so the next 3 words hold the mapping, index,
       * and private fields from the source anonymous or
       * page cache page while the page is migrated to device
       * private memory.
       * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
       * use the mapping, index, and private fields when
       * pmem backed DAX files are mapped.
       */
    };

    /** @rcu_head: You can use this to free a page by RCU. */
    struct rcu_head rcu_head;
  };

  union {    /* This union is 4 bytes in size. */
    /*
     * If the page can be mapped to userspace, encodes the number
     * of times this page is referenced by a page table.
     */
    atomic_t _mapcount;

    /*
     * If the page is neither PageSlab nor mappable to userspace,
     * the value stored here may help determine what this page
     * is used for.  See page-flags.h for a list of page types
     * which are currently stored here.
     */
    unsigned int page_type;

    unsigned int active;    /* SLAB */
    int units;      /* SLOB */
  };

  /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
  atomic_t _refcount;

#ifdef CONFIG_MEMCG
  unsigned long memcg_data;
#endif

  /*
   * On machines where all RAM is mapped into kernel address space,
   * we can simply calculate the virtual address. On machines with
   * highmem some memory is mapped into kernel virtual memory
   * dynamically, so we need a place to store that address.
   * Note that this field could be 16 bits on x86 ... ;)
   *
   * Architectures with slow multiplication can define
   * WANT_PAGE_VIRTUAL in asm/page.h
   */
#if defined(WANT_PAGE_VIRTUAL)
  void *virtual;      /* Kernel virtual address (NULL if
             not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */

#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
  int _last_cpupid;
#endif
} _struct_page_alignment;
本文参与 腾讯云自媒体分享计划,分享自微信公众号。
原始发表:2022-11-23,如有侵权请联系 cloudcommunity@tencent.com 删除

本文分享自 云数智圈 微信公众号,前往查看

如有侵权,请联系 cloudcommunity@tencent.com 删除。

本文参与 腾讯云自媒体分享计划  ,欢迎热爱写作的你一起参与!

评论
登录后参与评论
0 条评论
热度
最新
推荐阅读
领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档