Files
kernel_xiaomi_sm8250/mm/mempolicy.c
Srinivasarao P 20912a8acc Merge android-4.19-stable.157 (8ee67bc) into msm-4.19
* refs/heads/tmp-8ee67bc
  Revert "nl80211: fix non-split wiphy information"
  Reverting usb changes
  Linux 4.19.157
  powercap: restrict energy meter to root access
  Revert "ANDROID: Kbuild, LLVMLinux: allow overriding clang target triple"
  Linux 4.19.156
  arm64: dts: marvell: espressobin: Add ethernet switch aliases
  net: dsa: read mac address from DT for slave device
  tools: perf: Fix build error in v4.19.y
  perf/core: Fix a memory leak in perf_event_parse_addr_filter()
  PM: runtime: Resume the device earlier in __device_release_driver()
  Revert "ARC: entry: fix potential EFA clobber when TIF_SYSCALL_TRACE"
  ARC: stack unwinding: avoid indefinite looping
  usb: mtu3: fix panic in mtu3_gadget_stop()
  USB: Add NO_LPM quirk for Kingston flash drive
  USB: serial: option: add Telit FN980 composition 0x1055
  USB: serial: option: add LE910Cx compositions 0x1203, 0x1230, 0x1231
  USB: serial: option: add Quectel EC200T module support
  USB: serial: cyberjack: fix write-URB completion race
  serial: txx9: add missing platform_driver_unregister() on error in serial_txx9_init
  serial: 8250_mtk: Fix uart_get_baud_rate warning
  fork: fix copy_process(CLONE_PARENT) race with the exiting ->real_parent
  vt: Disable KD_FONT_OP_COPY
  ACPI: NFIT: Fix comparison to '-ENXIO'
  drm/vc4: drv: Add error handding for bind
  vsock: use ns_capable_noaudit() on socket create
  scsi: core: Don't start concurrent async scan on same host
  blk-cgroup: Pre-allocate tree node on blkg_conf_prep
  blk-cgroup: Fix memleak on error path
  of: Fix reserved-memory overlap detection
  x86/kexec: Use up-to-dated screen_info copy to fill boot params
  ARM: dts: sun4i-a10: fix cpu_alert temperature
  futex: Handle transient "ownerless" rtmutex state correctly
  tracing: Fix out of bounds write in get_trace_buf
  ftrace: Handle tracing when switching between context
  ftrace: Fix recursion check for NMI test
  ring-buffer: Fix recursion protection transitions between interrupt context
  gfs2: Wake up when sd_glock_disposal becomes zero
  mm: always have io_remap_pfn_range() set pgprot_decrypted()
  kthread_worker: prevent queuing delayed work from timer_fn when it is being canceled
  lib/crc32test: remove extra local_irq_disable/enable
  mm: mempolicy: fix potential pte_unmap_unlock pte error
  ALSA: usb-audio: Add implicit feedback quirk for MODX
  ALSA: usb-audio: Add implicit feedback quirk for Qu-16
  ALSA: usb-audio: add usb vendor id as DSD-capable for Khadas devices
  ALSA: usb-audio: Add implicit feedback quirk for Zoom UAC-2
  Fonts: Replace discarded const qualifier
  btrfs: tree-checker: fix the error message for transid error
  btrfs: tree-checker: Verify inode item
  btrfs: tree-checker: Enhance chunk checker to validate chunk profile
  btrfs: tree-checker: Fix wrong check on max devid
  btrfs: tree-checker: Verify dev item
  btrfs: tree-checker: Check chunk item at tree block read time
  btrfs: tree-checker: Make btrfs_check_chunk_valid() return EUCLEAN instead of EIO
  btrfs: tree-checker: Make chunk item checker messages more readable
  btrfs: Move btrfs_check_chunk_valid() to tree-check.[ch] and export it
  btrfs: Don't submit any btree write bio if the fs has errors
  Btrfs: fix unwritten extent buffers and hangs on future writeback attempts
  btrfs: extent_io: add proper error handling to lock_extent_buffer_for_io()
  btrfs: extent_io: Handle errors better in btree_write_cache_pages()
  btrfs: extent_io: Handle errors better in extent_write_full_page()
  btrfs: flush write bio if we loop in extent_write_cache_pages
  Revert "btrfs: flush write bio if we loop in extent_write_cache_pages"
  btrfs: extent_io: Move the BUG_ON() in flush_write_bio() one level up
  btrfs: extent_io: Kill the forward declaration of flush_write_bio
  blktrace: fix debugfs use after free
  sfp: Fix error handing in sfp_probe()
  sctp: Fix COMM_LOST/CANT_STR_ASSOC err reporting on big-endian platforms
  net: usb: qmi_wwan: add Telit LE910Cx 0x1230 composition
  gianfar: Account for Tx PTP timestamp in the skb headroom
  gianfar: Replace skb_realloc_headroom with skb_cow_head for PTP
  chelsio/chtls: fix always leaking ctrl_skb
  chelsio/chtls: fix memory leaks caused by a race
  cadence: force nonlinear buffers to be cloned
  ptrace: fix task_join_group_stop() for the case when current is traced
  tipc: fix use-after-free in tipc_bcast_get_mode
  drm/i915: Break up error capture compression loops with cond_resched()
  ANDROID: fuse: Add support for d_canonical_path
  ANDROID: vfs: add d_canonical_path for stacked filesystem support
  ANDROID: Temporarily disable XFRM_USER_COMPAT filtering
  Linux 4.19.155
  staging: octeon: Drop on uncorrectable alignment or FCS error
  staging: octeon: repair "fixed-link" support
  staging: comedi: cb_pcidas: Allow 2-channel commands for AO subdevice
  KVM: arm64: Fix AArch32 handling of DBGD{CCINT,SCRext} and DBGVCR
  device property: Don't clear secondary pointer for shared primary firmware node
  device property: Keep secondary firmware node secondary by type
  ARM: s3c24xx: fix missing system reset
  ARM: samsung: fix PM debug build with DEBUG_LL but !MMU
  arm: dts: mt7623: add missing pause for switchport
  hil/parisc: Disable HIL driver when it gets stuck
  cachefiles: Handle readpage error correctly
  arm64: berlin: Select DW_APB_TIMER_OF
  tty: make FONTX ioctl use the tty pointer they were actually passed
  rtc: rx8010: don't modify the global rtc ops
  drm/ttm: fix eviction valuable range check.
  ext4: fix invalid inode checksum
  ext4: fix error handling code in add_new_gdb
  ext4: fix leaking sysfs kobject after failed mount
  vringh: fix __vringh_iov() when riov and wiov are different
  ring-buffer: Return 0 on success from ring_buffer_resize()
  9P: Cast to loff_t before multiplying
  libceph: clear con->out_msg on Policy::stateful_server faults
  ceph: promote to unsigned long long before shifting
  drm/amd/display: Don't invoke kgdb_breakpoint() unconditionally
  drm/amdgpu: don't map BO in reserved region
  i2c: imx: Fix external abort on interrupt in exit paths
  ia64: fix build error with !COREDUMP
  ubi: check kthread_should_stop() after the setting of task state
  perf python scripting: Fix printable strings in python3 scripts
  ubifs: dent: Fix some potential memory leaks while iterating entries
  NFSD: Add missing NFSv2 .pc_func methods
  NFSv4.2: support EXCHGID4_FLAG_SUPP_FENCE_OPS 4.2 EXCHANGE_ID flag
  powerpc: Fix undetected data corruption with P9N DD2.1 VSX CI load emulation
  powerpc/powernv/elog: Fix race while processing OPAL error log event.
  powerpc: Warn about use of smt_snooze_delay
  powerpc/rtas: Restrict RTAS requests from userspace
  s390/stp: add locking to sysfs functions
  powerpc/drmem: Make lmb_size 64 bit
  iio:gyro:itg3200: Fix timestamp alignment and prevent data leak.
  iio:adc:ti-adc12138 Fix alignment issue with timestamp
  iio:adc:ti-adc0832 Fix alignment issue with timestamp
  iio:light:si1145: Fix timestamp alignment and prevent data leak.
  dmaengine: dma-jz4780: Fix race in jz4780_dma_tx_status
  udf: Fix memory leak when mounting
  HID: wacom: Avoid entering wacom_wac_pen_report for pad / battery
  vt: keyboard, extend func_buf_lock to readers
  vt: keyboard, simplify vt_kdgkbsent
  drm/i915: Force VT'd workarounds when running as a guest OS
  usb: host: fsl-mph-dr-of: check return of dma_set_mask()
  usb: typec: tcpm: reset hard_reset_count for any disconnect
  usb: cdc-acm: fix cooldown mechanism
  usb: dwc3: core: don't trigger runtime pm when remove driver
  usb: dwc3: core: add phy cleanup for probe error handling
  usb: dwc3: gadget: Check MPS of the request length
  usb: dwc3: ep0: Fix ZLP for OUT ep0 requests
  usb: xhci: Workaround for S3 issue on AMD SNPS 3.0 xHC
  btrfs: fix use-after-free on readahead extent after failure to create it
  btrfs: cleanup cow block on error
  btrfs: use kvzalloc() to allocate clone_roots in btrfs_ioctl_send()
  btrfs: send, recompute reference path after orphanization of a directory
  btrfs: reschedule if necessary when logging directory items
  btrfs: improve device scanning messages
  btrfs: qgroup: fix wrong qgroup metadata reserve for delayed inode
  scsi: qla2xxx: Fix crash on session cleanup with unload
  scsi: mptfusion: Fix null pointer dereferences in mptscsih_remove()
  w1: mxc_w1: Fix timeout resolution problem leading to bus error
  acpi-cpufreq: Honor _PSD table setting on new AMD CPUs
  ACPI: debug: don't allow debugging when ACPI is disabled
  ACPI: video: use ACPI backlight for HP 635 Notebook
  ACPI / extlog: Check for RDMSR failure
  ACPI: button: fix handling lid state changes when input device closed
  NFS: fix nfs_path in case of a rename retry
  fs: Don't invalidate page buffers in block_write_full_page()
  media: uvcvideo: Fix uvc_ctrl_fixup_xu_info() not having any effect
  leds: bcm6328, bcm6358: use devres LED registering function
  perf/x86/amd/ibs: Fix raw sample data accumulation
  perf/x86/amd/ibs: Don't include randomized bits in get_ibs_op_count()
  mmc: sdhci-acpi: AMDI0040: Set SDHCI_QUIRK2_PRESET_VALUE_BROKEN
  md/raid5: fix oops during stripe resizing
  nvme-rdma: fix crash when connect rejected
  sgl_alloc_order: fix memory leak
  nbd: make the config put is called before the notifying the waiter
  ARM: dts: s5pv210: remove dedicated 'audio-subsystem' node
  ARM: dts: s5pv210: move PMU node out of clock controller
  ARM: dts: s5pv210: remove DMA controller bus node name to fix dtschema warnings
  memory: emif: Remove bogus debugfs error handling
  ARM: dts: omap4: Fix sgx clock rate for 4430
  arm64: dts: renesas: ulcb: add full-pwr-cycle-in-suspend into eMMC nodes
  cifs: handle -EINTR in cifs_setattr
  gfs2: add validation checks for size of superblock
  ext4: Detect already used quota file early
  drivers: watchdog: rdc321x_wdt: Fix race condition bugs
  net: 9p: initialize sun_server.sun_path to have addr's value only when addr is valid
  clk: ti: clockdomain: fix static checker warning
  rpmsg: glink: Use complete_all for open states
  bnxt_en: Log unknown link speed appropriately.
  md/bitmap: md_bitmap_get_counter returns wrong blocks
  btrfs: fix replace of seed device
  drm/amd/display: HDMI remote sink need mode validation for Linux
  power: supply: test_power: add missing newlines when printing parameters by sysfs
  bus/fsl_mc: Do not rely on caller to provide non NULL mc_io
  drivers/net/wan/hdlc_fr: Correctly handle special skb->protocol values
  ACPI: Add out of bounds and numa_off protections to pxm_to_node()
  xfs: don't free rt blocks when we're doing a REMAP bunmapi call
  arm64/mm: return cpu_all_mask when node is NUMA_NO_NODE
  usb: xhci: omit duplicate actions when suspending a runtime suspended host.
  uio: free uio id after uio file node is freed
  USB: adutux: fix debugging
  cpufreq: sti-cpufreq: add stih418 support
  riscv: Define AT_VECTOR_SIZE_ARCH for ARCH_DLINFO
  media: uvcvideo: Fix dereference of out-of-bound list iterator
  kgdb: Make "kgdbcon" work properly with "kgdb_earlycon"
  ia64: kprobes: Use generic kretprobe trampoline handler
  printk: reduce LOG_BUF_SHIFT range for H8300
  arm64: topology: Stop using MPIDR for topology information
  drm/bridge/synopsys: dsi: add support for non-continuous HS clock
  mmc: via-sdmmc: Fix data race bug
  media: imx274: fix frame interval handling
  media: tw5864: check status of tw5864_frameinterval_get
  usb: typec: tcpm: During PR_SWAP, source caps should be sent only after tSwapSourceStart
  media: platform: Improve queue set up flow for bug fixing
  media: videodev2.h: RGB BT2020 and HSV are always full range
  drm/brige/megachips: Add checking if ge_b850v3_lvds_init() is working correctly
  ath10k: fix VHT NSS calculation when STBC is enabled
  ath10k: start recovery process when payload length exceeds max htc length for sdio
  video: fbdev: pvr2fb: initialize variables
  xfs: fix realtime bitmap/summary file truncation when growing rt volume
  power: supply: bq27xxx: report "not charging" on all types
  ARM: 8997/2: hw_breakpoint: Handle inexact watchpoint addresses
  um: change sigio_spinlock to a mutex
  f2fs: fix to check segment boundary during SIT page readahead
  f2fs: fix uninit-value in f2fs_lookup
  f2fs: add trace exit in exception path
  sparc64: remove mm_cpumask clearing to fix kthread_use_mm race
  powerpc: select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
  mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
  powerpc/powernv/smp: Fix spurious DBG() warning
  futex: Fix incorrect should_fail_futex() handling
  ata: sata_nv: Fix retrieving of active qcs
  RDMA/qedr: Fix memory leak in iWARP CM
  mlxsw: core: Fix use-after-free in mlxsw_emad_trans_finish()
  x86/unwind/orc: Fix inactive tasks with stack pointer in %sp on GCC 10 compiled kernels
  xen/events: block rogue events for some time
  xen/events: defer eoi in case of excessive number of events
  xen/events: use a common cpu hotplug hook for event channels
  xen/events: switch user event channels to lateeoi model
  xen/pciback: use lateeoi irq binding
  xen/pvcallsback: use lateeoi irq binding
  xen/scsiback: use lateeoi irq binding
  xen/netback: use lateeoi irq binding
  xen/blkback: use lateeoi irq binding
  xen/events: add a new "late EOI" evtchn framework
  xen/events: fix race in evtchn_fifo_unmask()
  xen/events: add a proper barrier to 2-level uevent unmasking
  xen/events: avoid removing an event channel while handling it
  xen/events: don't use chip_data for legacy IRQs
  Revert "block: ratelimit handle_bad_sector() message"
  fscrypt: fix race where ->lookup() marks plaintext dentry as ciphertext
  fscrypt: only set dentry_operations on ciphertext dentries
  fs, fscrypt: clear DCACHE_ENCRYPTED_NAME when unaliasing directory
  fscrypt: fix race allowing rename() and link() of ciphertext dentries
  fscrypt: clean up and improve dentry revalidation
  fscrypt: return -EXDEV for incompatible rename or link into encrypted dir
  ata: sata_rcar: Fix DMA boundary mask
  serial: pl011: Fix lockdep splat when handling magic-sysrq interrupt
  mtd: lpddr: Fix bad logic in print_drs_error
  RDMA/addr: Fix race with netevent_callback()/rdma_addr_cancel()
  cxl: Rework error message for incompatible slots
  p54: avoid accessing the data mapped to streaming DMA
  evm: Check size of security.evm before using it
  bpf: Fix comment for helper bpf_current_task_under_cgroup()
  fuse: fix page dereference after free
  x86/xen: disable Firmware First mode for correctable memory errors
  arch/x86/amd/ibs: Fix re-arming IBS Fetch
  cxgb4: set up filter action after rewrites
  r8169: fix issue with forced threading in combination with shared interrupts
  tipc: fix memory leak caused by tipc_buf_append()
  tcp: Prevent low rmem stalls with SO_RCVLOWAT.
  ravb: Fix bit fields checking in ravb_hwtstamp_get()
  netem: fix zero division in tabledist
  mlxsw: core: Fix memory leak on module removal
  gtp: fix an use-before-init in gtp_newlink()
  chelsio/chtls: fix tls record info to user
  chelsio/chtls: fix memory leaks in CPL handlers
  chelsio/chtls: fix deadlock issue
  efivarfs: Replace invalid slashes with exclamation marks in dentries.
  x86/PCI: Fix intel_mid_pci.c build error when ACPI is not enabled
  arm64: link with -z norelro regardless of CONFIG_RELOCATABLE
  arm64: Run ARCH_WORKAROUND_1 enabling code on all CPUs
  scripts/setlocalversion: make git describe output more reliable
  objtool: Support Clang non-section symbols in ORC generation
  ANDROID: GKI: Enable DEBUG_INFO_DWARF4
  UPSTREAM: mm/sl[uo]b: export __kmalloc_track(_node)_caller
  BACKPORT: xfrm/compat: Translate 32-bit user_policy from sockptr
  BACKPORT: xfrm/compat: Add 32=>64-bit messages translator
  UPSTREAM: xfrm/compat: Attach xfrm dumps to 64=>32 bit translator
  UPSTREAM: xfrm/compat: Add 64=>32-bit messages translator
  BACKPORT: xfrm: Provide API to register translator module
  ANDROID: Publish uncompressed Image on aarch64
  FROMLIST: crypto: arm64/poly1305-neon - reorder PAC authentication with SP update
  UPSTREAM: crypto: arm64/chacha - fix chacha_4block_xor_neon() for big endian
  UPSTREAM: crypto: arm64/chacha - fix hchacha_block_neon() for big endian
  Linux 4.19.154
  usb: gadget: f_ncm: allow using NCM in SuperSpeed Plus gadgets.
  eeprom: at25: set minimum read/write access stride to 1
  USB: cdc-wdm: Make wdm_flush() interruptible and add wdm_fsync().
  usb: cdc-acm: add quirk to blacklist ETAS ES58X devices
  tty: serial: fsl_lpuart: fix lpuart32_poll_get_char
  net: korina: cast KSEG0 address to pointer in kfree
  ath10k: check idx validity in __ath10k_htt_rx_ring_fill_n()
  scsi: ufs: ufs-qcom: Fix race conditions caused by ufs_qcom_testbus_config()
  usb: core: Solve race condition in anchor cleanup functions
  brcm80211: fix possible memleak in brcmf_proto_msgbuf_attach
  mwifiex: don't call del_timer_sync() on uninitialized timer
  reiserfs: Fix memory leak in reiserfs_parse_options()
  ipvs: Fix uninit-value in do_ip_vs_set_ctl()
  tty: ipwireless: fix error handling
  scsi: qedi: Fix list_del corruption while removing active I/O
  scsi: qedi: Protect active command list to avoid list corruption
  Fix use after free in get_capset_info callback.
  rtl8xxxu: prevent potential memory leak
  brcmsmac: fix memory leak in wlc_phy_attach_lcnphy
  scsi: ibmvfc: Fix error return in ibmvfc_probe()
  Bluetooth: Only mark socket zapped after unlocking
  usb: ohci: Default to per-port over-current protection
  xfs: make sure the rt allocator doesn't run off the end
  reiserfs: only call unlock_new_inode() if I_NEW
  misc: rtsx: Fix memory leak in rtsx_pci_probe
  ath9k: hif_usb: fix race condition between usb_get_urb() and usb_kill_anchored_urbs()
  can: flexcan: flexcan_chip_stop(): add error handling and propagate error value
  usb: dwc3: simple: add support for Hikey 970
  USB: cdc-acm: handle broken union descriptors
  udf: Avoid accessing uninitialized data on failed inode read
  udf: Limit sparing table size
  usb: gadget: function: printer: fix use-after-free in __lock_acquire
  misc: vop: add round_up(x,4) for vring_size to avoid kernel panic
  mic: vop: copy data to kernel space then write to io memory
  scsi: target: core: Add CONTROL field for trace events
  scsi: mvumi: Fix error return in mvumi_io_attach()
  PM: hibernate: remove the bogus call to get_gendisk() in software_resume()
  mac80211: handle lack of sband->bitrates in rates
  ip_gre: set dev->hard_header_len and dev->needed_headroom properly
  ntfs: add check for mft record size in superblock
  media: venus: core: Fix runtime PM imbalance in venus_probe
  fs: dlm: fix configfs memory leak
  media: saa7134: avoid a shift overflow
  mmc: sdio: Check for CISTPL_VERS_1 buffer size
  media: uvcvideo: Ensure all probed info is returned to v4l2
  media: media/pci: prevent memory leak in bttv_probe
  media: bdisp: Fix runtime PM imbalance on error
  media: platform: sti: hva: Fix runtime PM imbalance on error
  media: platform: s3c-camif: Fix runtime PM imbalance on error
  media: vsp1: Fix runtime PM imbalance on error
  media: exynos4-is: Fix a reference count leak
  media: exynos4-is: Fix a reference count leak due to pm_runtime_get_sync
  media: exynos4-is: Fix several reference count leaks due to pm_runtime_get_sync
  media: sti: Fix reference count leaks
  media: st-delta: Fix reference count leak in delta_run_work
  media: ati_remote: sanity check for both endpoints
  media: firewire: fix memory leak
  crypto: ccp - fix error handling
  block: ratelimit handle_bad_sector() message
  i2c: core: Restore acpi_walk_dep_device_list() getting called after registering the ACPI i2c devs
  perf: correct SNOOPX field offset
  sched/features: Fix !CONFIG_JUMP_LABEL case
  NTB: hw: amd: fix an issue about leak system resources
  nvmet: fix uninitialized work for zero kato
  powerpc/powernv/dump: Fix race while processing OPAL dump
  arm64: dts: zynqmp: Remove additional compatible string for i2c IPs
  ARM: dts: owl-s500: Fix incorrect PPI interrupt specifiers
  arm64: dts: qcom: msm8916: Fix MDP/DSI interrupts
  arm64: dts: qcom: pm8916: Remove invalid reg size from wcd_codec
  memory: fsl-corenet-cf: Fix handling of platform_get_irq() error
  memory: omap-gpmc: Fix build error without CONFIG_OF
  memory: omap-gpmc: Fix a couple off by ones
  ARM: dts: sun8i: r40: bananapi-m2-ultra: Fix dcdc1 regulator
  ARM: dts: imx6sl: fix rng node
  netfilter: nf_fwd_netdev: clear timestamp in forwarding path
  netfilter: conntrack: connection timeout after re-register
  KVM: x86: emulating RDPID failure shall return #UD rather than #GP
  Input: sun4i-ps2 - fix handling of platform_get_irq() error
  Input: twl4030_keypad - fix handling of platform_get_irq() error
  Input: omap4-keypad - fix handling of platform_get_irq() error
  Input: ep93xx_keypad - fix handling of platform_get_irq() error
  Input: stmfts - fix a & vs && typo
  Input: imx6ul_tsc - clean up some errors in imx6ul_tsc_resume()
  SUNRPC: fix copying of multiple pages in gss_read_proxy_verf()
  vfio iommu type1: Fix memory leak in vfio_iommu_type1_pin_pages
  vfio/pci: Clear token on bypass registration failure
  ext4: limit entries returned when counting fsmap records
  svcrdma: fix bounce buffers for unaligned offsets and multiple pages
  watchdog: sp5100: Fix definition of EFCH_PM_DECODEEN3
  watchdog: Use put_device on error
  watchdog: Fix memleak in watchdog_cdev_register
  clk: bcm2835: add missing release if devm_clk_hw_register fails
  clk: at91: clk-main: update key before writing AT91_CKGR_MOR
  clk: rockchip: Initialize hw to error to avoid undefined behavior
  pwm: img: Fix null pointer access in probe
  rpmsg: smd: Fix a kobj leak in in qcom_smd_parse_edge()
  PCI: iproc: Set affinity mask on MSI interrupts
  i2c: rcar: Auto select RESET_CONTROLLER
  mailbox: avoid timer start from callback
  rapidio: fix the missed put_device() for rio_mport_add_riodev
  rapidio: fix error handling path
  ramfs: fix nommu mmap with gaps in the page cache
  lib/crc32.c: fix trivial typo in preprocessor condition
  f2fs: wait for sysfs kobject removal before freeing f2fs_sb_info
  IB/rdmavt: Fix sizeof mismatch
  cpufreq: powernv: Fix frame-size-overflow in powernv_cpufreq_reboot_notifier
  powerpc/perf/hv-gpci: Fix starting index value
  powerpc/perf: Exclude pmc5/6 from the irrelevant PMU group constraints
  overflow: Include header file with SIZE_MAX declaration
  kdb: Fix pager search for multi-line strings
  RDMA/hns: Fix missing sq_sig_type when querying QP
  RDMA/hns: Set the unsupported wr opcode
  perf intel-pt: Fix "context_switch event has no tid" error
  RDMA/cma: Consolidate the destruction of a cma_multicast in one place
  RDMA/cma: Remove dead code for kernel rdmacm multicast
  powerpc/64s/radix: Fix mm_cpumask trimming race vs kthread_use_mm
  powerpc/tau: Disable TAU between measurements
  powerpc/tau: Check processor type before enabling TAU interrupt
  ANDROID: GKI: update the ABI xml
  Linux 4.19.153
  powerpc/tau: Remove duplicated set_thresholds() call
  powerpc/tau: Convert from timer to workqueue
  powerpc/tau: Use appropriate temperature sample interval
  RDMA/qedr: Fix inline size returned for iWARP
  RDMA/qedr: Fix use of uninitialized field
  xfs: fix high key handling in the rt allocator's query_range function
  xfs: limit entries returned when counting fsmap records
  arc: plat-hsdk: fix kconfig dependency warning when !RESET_CONTROLLER
  ARM: 9007/1: l2c: fix prefetch bits init in L2X0_AUX_CTRL using DT values
  mtd: mtdoops: Don't write panic data twice
  powerpc/pseries: explicitly reschedule during drmem_lmb list traversal
  mtd: lpddr: fix excessive stack usage with clang
  RDMA/ucma: Add missing locking around rdma_leave_multicast()
  RDMA/ucma: Fix locking for ctx->events_reported
  powerpc/icp-hv: Fix missing of_node_put() in success path
  powerpc/pseries: Fix missing of_node_put() in rng_init()
  IB/mlx4: Adjust delayed work when a dup is observed
  IB/mlx4: Fix starvation in paravirt mux/demux
  mm, oom_adj: don't loop through tasks in __set_oom_adj when not necessary
  mm/memcg: fix device private memcg accounting
  netfilter: nf_log: missing vlan offload tag and proto
  net: korina: fix kfree of rx/tx descriptor array
  ipvs: clear skb->tstamp in forwarding path
  mwifiex: fix double free
  platform/x86: mlx-platform: Remove PSU EEPROM configuration
  scsi: be2iscsi: Fix a theoretical leak in beiscsi_create_eqs()
  scsi: target: tcmu: Fix warning: 'page' may be used uninitialized
  usb: dwc2: Fix INTR OUT transfers in DDMA mode.
  nl80211: fix non-split wiphy information
  usb: gadget: u_ether: enable qmult on SuperSpeed Plus as well
  usb: gadget: f_ncm: fix ncm_bitrate for SuperSpeed and above.
  iwlwifi: mvm: split a print to avoid a WARNING in ROC
  mfd: sm501: Fix leaks in probe()
  net: enic: Cure the enic api locking trainwreck
  qtnfmac: fix resource leaks on unsupported iftype error return path
  HID: hid-input: fix stylus battery reporting
  slimbus: qcom-ngd-ctrl: disable ngd in qmi server down callback
  slimbus: core: do not enter to clock pause mode in core
  slimbus: core: check get_addr before removing laddr ida
  quota: clear padding in v2r1_mem2diskdqb()
  usb: dwc2: Fix parameter type in function pointer prototype
  ALSA: seq: oss: Avoid mutex lock for a long-time ioctl
  misc: mic: scif: Fix error handling path
  ath6kl: wmi: prevent a shift wrapping bug in ath6kl_wmi_delete_pstream_cmd()
  net: dsa: rtl8366rb: Support all 4096 VLANs
  net: dsa: rtl8366: Skip PVID setting if not requested
  net: dsa: rtl8366: Refactor VLAN/PVID init
  net: dsa: rtl8366: Check validity of passed VLANs
  cpufreq: armada-37xx: Add missing MODULE_DEVICE_TABLE
  net: stmmac: use netif_tx_start|stop_all_queues() function
  net/mlx5: Don't call timecounter cyc2time directly from 1PPS flow
  pinctrl: mcp23s08: Fix mcp23x17 precious range
  pinctrl: mcp23s08: Fix mcp23x17_regmap initialiser
  HID: roccat: add bounds checking in kone_sysfs_write_settings()
  video: fbdev: radeon: Fix memleak in radeonfb_pci_register
  video: fbdev: sis: fix null ptr dereference
  video: fbdev: vga16fb: fix setting of pixclock because a pass-by-value error
  drivers/virt/fsl_hypervisor: Fix error handling path
  pwm: lpss: Add range limit check for the base_unit register value
  pwm: lpss: Fix off by one error in base_unit math in pwm_lpss_prepare()
  pty: do tty_flip_buffer_push without port->lock in pty_write
  tty: hvcs: Don't NULL tty->driver_data until hvcs_cleanup()
  tty: serial: earlycon dependency
  VMCI: check return value of get_user_pages_fast() for errors
  backlight: sky81452-backlight: Fix refcount imbalance on error
  scsi: csiostor: Fix wrong return value in csio_hw_prep_fw()
  scsi: qla2xxx: Fix wrong return value in qla_nvme_register_hba()
  scsi: qla4xxx: Fix an error handling path in 'qla4xxx_get_host_stats()'
  drm/gma500: fix error check
  staging: rtl8192u: Do not use GFP_KERNEL in atomic context
  mwifiex: Do not use GFP_KERNEL in atomic context
  brcmfmac: check ndev pointer
  ASoC: qcom: lpass-cpu: fix concurrency issue
  ASoC: qcom: lpass-platform: fix memory leak
  wcn36xx: Fix reported 802.11n rx_highest rate wcn3660/wcn3680
  ath10k: Fix the size used in a 'dma_free_coherent()' call in an error handling path
  ath9k: Fix potential out of bounds in ath9k_htc_txcompletion_cb()
  ath6kl: prevent potential array overflow in ath6kl_add_new_sta()
  Bluetooth: hci_uart: Cancel init work before unregistering
  ath10k: provide survey info as accumulated data
  spi: spi-s3c64xx: Check return values
  spi: spi-s3c64xx: swap s3c64xx_spi_set_cs() and s3c64xx_enable_datapath()
  pinctrl: bcm: fix kconfig dependency warning when !GPIOLIB
  regulator: resolve supply after creating regulator
  media: ti-vpe: Fix a missing check and reference count leak
  media: stm32-dcmi: Fix a reference count leak
  media: s5p-mfc: Fix a reference count leak
  media: camss: Fix a reference count leak.
  media: platform: fcp: Fix a reference count leak.
  media: rockchip/rga: Fix a reference count leak.
  media: rcar-vin: Fix a reference count leak.
  media: tc358743: cleanup tc358743_cec_isr
  media: tc358743: initialize variable
  media: mx2_emmaprp: Fix memleak in emmaprp_probe
  cypto: mediatek - fix leaks in mtk_desc_ring_alloc
  hwmon: (pmbus/max34440) Fix status register reads for MAX344{51,60,61}
  crypto: omap-sham - fix digcnt register handling with export/import
  media: omap3isp: Fix memleak in isp_probe
  media: uvcvideo: Silence shift-out-of-bounds warning
  media: uvcvideo: Set media controller entity functions
  media: m5mols: Check function pointer in m5mols_sensor_power
  media: Revert "media: exynos4-is: Add missed check for pinctrl_lookup_state()"
  media: tuner-simple: fix regression in simple_set_radio_freq
  crypto: picoxcell - Fix potential race condition bug
  crypto: ixp4xx - Fix the size used in a 'dma_free_coherent()' call
  crypto: mediatek - Fix wrong return value in mtk_desc_ring_alloc()
  crypto: algif_skcipher - EBUSY on aio should be an error
  x86/events/amd/iommu: Fix sizeof mismatch
  x86/nmi: Fix nmi_handle() duration miscalculation
  drivers/perf: xgene_pmu: Fix uninitialized resource struct
  x86/fpu: Allow multiple bits in clearcpuid= parameter
  EDAC/ti: Fix handling of platform_get_irq() error
  EDAC/i5100: Fix error handling order in i5100_init_one()
  crypto: algif_aead - Do not set MAY_BACKLOG on the async path
  ima: Don't ignore errors from crypto_shash_update()
  KVM: SVM: Initialize prev_ga_tag before use
  KVM: x86/mmu: Commit zap of remaining invalid pages when recovering lpages
  cifs: Return the error from crypt_message when enc/dec key not found.
  cifs: remove bogus debug code
  ALSA: hda/realtek: Enable audio jacks of ASUS D700SA with ALC887
  icmp: randomize the global rate limiter
  r8169: fix operation under forced interrupt threading
  tcp: fix to update snd_wl1 in bulk receiver fast path
  nfc: Ensure presence of NFC_ATTR_FIRMWARE_NAME attribute in nfc_genl_fw_download()
  net/sched: act_tunnel_key: fix OOB write in case of IPv6 ERSPAN tunnels
  net: hdlc_raw_eth: Clear the IFF_TX_SKB_SHARING flag after calling ether_setup
  net: hdlc: In hdlc_rcv, check to make sure dev is an HDLC device
  chelsio/chtls: correct function return and return type
  chelsio/chtls: correct netdevice for vlan interface
  chelsio/chtls: fix socket lock
  ALSA: bebob: potential info leak in hwdep_read()
  binder: fix UAF when releasing todo list
  net/tls: sendfile fails with ktls offload
  r8169: fix data corruption issue on RTL8402
  net/ipv4: always honour route mtu during forwarding
  tipc: fix the skb_unshare() in tipc_buf_append()
  net: usb: qmi_wwan: add Cellient MPL200 card
  net/smc: fix valid DMBE buffer sizes
  net: fix pos incrementment in ipv6_route_seq_next
  net: fec: Fix PHY init after phy_reset_after_clk_enable()
  net: fec: Fix phy_device lookup for phy_reset_after_clk_enable()
  mlx4: handle non-napi callers to napi_poll
  ipv4: Restore flowi4_oif update before call to xfrm_lookup_route
  ibmveth: Identify ingress large send packets.
  ibmveth: Switch order of ibmveth_helper calls.
  ANDROID: clang: update to 11.0.5
  FROMLIST: arm64: link with -z norelro regardless of CONFIG_RELOCATABLE
  ANDROID: GKI: enable CONFIG_WIREGUARD
  UPSTREAM: wireguard: peerlookup: take lock before checking hash in replace operation
  UPSTREAM: wireguard: noise: take lock when removing handshake entry from table
  UPSTREAM: wireguard: queueing: make use of ip_tunnel_parse_protocol
  UPSTREAM: net: ip_tunnel: add header_ops for layer 3 devices
  UPSTREAM: wireguard: receive: account for napi_gro_receive never returning GRO_DROP
  UPSTREAM: wireguard: device: avoid circular netns references
  UPSTREAM: wireguard: noise: do not assign initiation time in if condition
  UPSTREAM: wireguard: noise: separate receive counter from send counter
  UPSTREAM: wireguard: queueing: preserve flow hash across packet scrubbing
  UPSTREAM: wireguard: noise: read preshared key while taking lock
  UPSTREAM: wireguard: selftests: use newer iproute2 for gcc-10
  UPSTREAM: wireguard: send/receive: use explicit unlikely branch instead of implicit coalescing
  UPSTREAM: wireguard: selftests: initalize ipv6 members to NULL to squelch clang warning
  UPSTREAM: wireguard: send/receive: cond_resched() when processing worker ringbuffers
  UPSTREAM: wireguard: socket: remove errant restriction on looping to self
  UPSTREAM: wireguard: selftests: use normal kernel stack size on ppc64
  UPSTREAM: wireguard: receive: use tunnel helpers for decapsulating ECN markings
  UPSTREAM: wireguard: queueing: cleanup ptr_ring in error path of packet_queue_init
  UPSTREAM: wireguard: send: remove errant newline from packet_encrypt_worker
  UPSTREAM: wireguard: noise: error out precomputed DH during handshake rather than config
  UPSTREAM: wireguard: receive: remove dead code from default packet type case
  UPSTREAM: wireguard: queueing: account for skb->protocol==0
  UPSTREAM: wireguard: selftests: remove duplicated include <sys/types.h>
  UPSTREAM: wireguard: socket: remove extra call to synchronize_net
  UPSTREAM: wireguard: send: account for mtu=0 devices
  UPSTREAM: wireguard: receive: reset last_under_load to zero
  UPSTREAM: wireguard: selftests: reduce complexity and fix make races
  UPSTREAM: wireguard: device: use icmp_ndo_send helper
  UPSTREAM: wireguard: selftests: tie socket waiting to target pid
  UPSTREAM: wireguard: selftests: ensure non-addition of peers with failed precomputation
  UPSTREAM: wireguard: noise: reject peers with low order public keys
  UPSTREAM: wireguard: allowedips: fix use-after-free in root_remove_peer_lists
  UPSTREAM: net: skbuff: disambiguate argument and member for skb_list_walk_safe helper
  UPSTREAM: net: introduce skb_list_walk_safe for skb segment walking
  UPSTREAM: wireguard: socket: mark skbs as not on list when receiving via gro
  UPSTREAM: wireguard: queueing: do not account for pfmemalloc when clearing skb header
  UPSTREAM: wireguard: selftests: remove ancient kernel compatibility code
  UPSTREAM: wireguard: allowedips: use kfree_rcu() instead of call_rcu()
  UPSTREAM: wireguard: main: remove unused include <linux/version.h>
  UPSTREAM: wireguard: global: fix spelling mistakes in comments
  UPSTREAM: wireguard: Kconfig: select parent dependency for crypto
  UPSTREAM: wireguard: selftests: import harness makefile for test suite
  UPSTREAM: net: WireGuard secure network tunnel
  UPSTREAM: timekeeping: Boot should be boottime for coarse ns accessor
  UPSTREAM: timekeeping: Add missing _ns functions for coarse accessors
  UPSTREAM: icmp: introduce helper for nat'd source address in network device context
  UPSTREAM: crypto: poly1305-x86_64 - Use XORL r32,32
  UPSTREAM: crypto: curve25519-x86_64 - Use XORL r32,32
  UPSTREAM: crypto: arm/poly1305 - Add prototype for poly1305_blocks_neon
  UPSTREAM: crypto: arm/curve25519 - include <linux/scatterlist.h>
  UPSTREAM: crypto: x86/curve25519 - Remove unused carry variables
  UPSTREAM: crypto: x86/chacha-sse3 - use unaligned loads for state array
  UPSTREAM: crypto: lib/chacha20poly1305 - Add missing function declaration
  UPSTREAM: crypto: arch/lib - limit simd usage to 4k chunks
  UPSTREAM: crypto: arm[64]/poly1305 - add artifact to .gitignore files
  UPSTREAM: crypto: x86/curve25519 - leave r12 as spare register
  UPSTREAM: crypto: x86/curve25519 - replace with formally verified implementation
  UPSTREAM: crypto: arm64/chacha - correctly walk through blocks
  UPSTREAM: crypto: x86/curve25519 - support assemblers with no adx support
  UPSTREAM: crypto: chacha20poly1305 - prevent integer overflow on large input
  UPSTREAM: crypto: Kconfig - allow tests to be disabled when manager is disabled
  UPSTREAM: crypto: arm/chacha - fix build failured when kernel mode NEON is disabled
  UPSTREAM: crypto: x86/poly1305 - emit does base conversion itself
  UPSTREAM: crypto: chacha20poly1305 - add back missing test vectors and test chunking
  UPSTREAM: crypto: x86/poly1305 - fix .gitignore typo
  UPSTREAM: crypto: curve25519 - Fix selftest build error
  UPSTREAM: crypto: {arm,arm64,mips}/poly1305 - remove redundant non-reduction from emit
  UPSTREAM: crypto: x86/poly1305 - wire up faster implementations for kernel
  UPSTREAM: crypto: x86/poly1305 - import unmodified cryptogams implementation
  UPSTREAM: crypto: poly1305 - add new 32 and 64-bit generic versions
  UPSTREAM: crypto: lib/curve25519 - re-add selftests
  UPSTREAM: crypto: arm/curve25519 - add arch-specific key generation function
  UPSTREAM: crypto: chacha - fix warning message in header file
  UPSTREAM: crypto: arch - conditionalize crypto api in arch glue for lib code
  UPSTREAM: crypto: lib/chacha20poly1305 - use chacha20_crypt()
  UPSTREAM: crypto: x86/chacha - only unregister algorithms if registered
  UPSTREAM: crypto: chacha_generic - remove unnecessary setkey() functions
  UPSTREAM: crypto: lib/chacha20poly1305 - reimplement crypt_from_sg() routine
  UPSTREAM: crypto: chacha20poly1305 - import construction and selftest from Zinc
  UPSTREAM: crypto: arm/curve25519 - wire up NEON implementation
  UPSTREAM: crypto: arm/curve25519 - import Bernstein and Schwabe's Curve25519 ARM implementation
  UPSTREAM: crypto: curve25519 - x86_64 library and KPP implementations
  UPSTREAM: crypto: lib/curve25519 - work around Clang stack spilling issue
  UPSTREAM: crypto: curve25519 - implement generic KPP driver
  UPSTREAM: crypto: curve25519 - add kpp selftest
  UPSTREAM: crypto: curve25519 - generic C library implementations
  UPSTREAM: crypto: blake2s - x86_64 SIMD implementation
  UPSTREAM: crypto: blake2s - implement generic shash driver
  UPSTREAM: crypto: testmgr - add test cases for Blake2s
  UPSTREAM: crypto: blake2s - generic C library implementation and selftest
  UPSTREAM: crypto: mips/poly1305 - incorporate OpenSSL/CRYPTOGAMS optimized implementation
  UPSTREAM: crypto: arm/poly1305 - incorporate OpenSSL/CRYPTOGAMS NEON implementation
  UPSTREAM: crypto: arm64/poly1305 - incorporate OpenSSL/CRYPTOGAMS NEON implementation
  UPSTREAM: crypto: x86/poly1305 - expose existing driver as poly1305 library
  UPSTREAM: crypto: x86/poly1305 - depend on generic library not generic shash
  UPSTREAM: crypto: poly1305 - expose init/update/final library interface
  UPSTREAM: crypto: x86/poly1305 - unify Poly1305 state struct with generic code
  UPSTREAM: crypto: poly1305 - move core routines into a separate library
  UPSTREAM: crypto: chacha - unexport chacha_generic routines
  UPSTREAM: crypto: mips/chacha - wire up accelerated 32r2 code from Zinc
  UPSTREAM: crypto: mips/chacha - import 32r2 ChaCha code from Zinc
  UPSTREAM: crypto: arm/chacha - expose ARM ChaCha routine as library function
  UPSTREAM: crypto: arm/chacha - remove dependency on generic ChaCha driver
  UPSTREAM: crypto: arm/chacha - import Eric Biggers's scalar accelerated ChaCha code
  UPSTREAM: crypto: arm64/chacha - expose arm64 ChaCha routine as library function
  UPSTREAM: crypto: arm64/chacha - depend on generic chacha library instead of crypto driver
  UPSTREAM: crypto: arm64/chacha - use combined SIMD/ALU routine for more speed
  UPSTREAM: crypto: arm64/chacha - optimize for arbitrary length inputs
  UPSTREAM: crypto: x86/chacha - expose SIMD ChaCha routine as library function
  UPSTREAM: crypto: x86/chacha - depend on generic chacha library instead of crypto driver
  UPSTREAM: crypto: chacha - move existing library code into lib/crypto
  UPSTREAM: crypto: lib - tidy up lib/crypto Kconfig and Makefile
  UPSTREAM: crypto: chacha - constify ctx and iv arguments
  UPSTREAM: crypto: x86/poly1305 - Clear key material from stack in SSE2 variant
  UPSTREAM: crypto: xchacha20 - fix comments for test vectors
  UPSTREAM: crypto: xchacha - add test vector from XChaCha20 draft RFC
  UPSTREAM: crypto: arm64/chacha - add XChaCha12 support
  UPSTREAM: crypto: arm64/chacha20 - refactor to allow varying number of rounds
  UPSTREAM: crypto: arm64/chacha20 - add XChaCha20 support
  UPSTREAM: crypto: x86/chacha - avoid sleeping under kernel_fpu_begin()
  UPSTREAM: crypto: x86/chacha - yield the FPU occasionally
  UPSTREAM: crypto: x86/chacha - add XChaCha12 support
  UPSTREAM: crypto: x86/chacha20 - refactor to allow varying number of rounds
  UPSTREAM: crypto: x86/chacha20 - add XChaCha20 support
  UPSTREAM: crypto: x86/chacha20 - Add a 4-block AVX-512VL variant
  UPSTREAM: crypto: x86/chacha20 - Add a 2-block AVX-512VL variant
  UPSTREAM: crypto: x86/chacha20 - Add a 8-block AVX-512VL variant
  UPSTREAM: crypto: x86/chacha20 - Add a 4-block AVX2 variant
  UPSTREAM: crypto: x86/chacha20 - Add a 2-block AVX2 variant
  UPSTREAM: crypto: x86/chacha20 - Use larger block functions more aggressively
  UPSTREAM: crypto: x86/chacha20 - Support partial lengths in 8-block AVX2 variant
  UPSTREAM: crypto: x86/chacha20 - Support partial lengths in 4-block SSSE3 variant
  UPSTREAM: crypto: x86/chacha20 - Support partial lengths in 1-block SSSE3 variant
  ANDROID: GKI: Enable CONFIG_USB_ANNOUNCE_NEW_DEVICES
  ANDROID: GKI: Enable CONFIG_X86_X2APIC
  ANDROID: move builds to use gas prebuilts
  UPSTREAM: binder: fix UAF when releasing todo list

 Conflicts:
	crypto/algif_aead.c
	drivers/rpmsg/qcom_glink_native.c
	drivers/scsi/ufs/ufs-qcom.c
	drivers/slimbus/qcom-ngd-ctrl.c
	fs/notify/inotify/inotify_user.c
	include/linux/dcache.h
	include/linux/fsnotify.h
	mm/oom_kill.c

 Fixed build errors:
	fs/fuse/dir.c

Change-Id: I95bdbb1b183fa2c569023f18e09799d9cb96fc9f
Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
2020-12-18 18:35:06 +05:30

3008 lines
75 KiB
C

/*
* Simple NUMA memory policy for the Linux kernel.
*
* Copyright 2003,2004 Andi Kleen, SuSE Labs.
* (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
* Subject to the GNU Public License, version 2.
*
* NUMA policy allows the user to give hints in which node(s) memory should
* be allocated.
*
* Support four policies per VMA and per process:
*
* The VMA policy has priority over the process policy for a page fault.
*
* interleave Allocate memory interleaved over a set of nodes,
* with normal fallback if it fails.
* For VMA based allocations this interleaves based on the
* offset into the backing object or offset into the mapping
* for anonymous memory. For process policy an process counter
* is used.
*
* bind Only allocate memory on a specific set of nodes,
* no fallback.
* FIXME: memory is allocated starting with the first node
* to the last. It would be better if bind would truly restrict
* the allocation to memory nodes instead
*
* preferred Try a specific node first before normal fallback.
* As a special case NUMA_NO_NODE here means do the allocation
* on the local CPU. This is normally identical to default,
* but useful to set in a VMA when you have a non default
* process policy.
*
* default Allocate on the local node first, or when on a VMA
* use the process policy. This is what Linux always did
* in a NUMA aware kernel and still does by, ahem, default.
*
* The process policy is applied for most non interrupt memory allocations
* in that process' context. Interrupts ignore the policies and always
* try to allocate on the local CPU. The VMA policy is only applied for memory
* allocations for a VMA in the VM.
*
* Currently there are a few corner cases in swapping where the policy
* is not applied, but the majority should be handled. When process policy
* is used it is not remembered over swap outs/swap ins.
*
* Only the highest zone in the zone hierarchy gets policied. Allocations
* requesting a lower zone just use default policy. This implies that
* on systems with highmem kernel lowmem allocation don't get policied.
* Same with GFP_DMA allocations.
*
* For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
* all users and remembered even when nobody has memory mapped.
*/
/* Notebook:
fix mmap readahead to honour policy and enable policy for any page cache
object
statistics for bigpages
global policy for page cache? currently it uses process policy. Requires
first item above.
handle mremap for shared memory (currently ignored for the policy)
grows down?
make bind policy root only? It can trigger oom much faster and the
kernel is not always grateful with that.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/mempolicy.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/sched/numa_balancing.h>
#include <linux/sched/task.h>
#include <linux/nodemask.h>
#include <linux/cpuset.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/nsproxy.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/compat.h>
#include <linux/ptrace.h>
#include <linux/swap.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/migrate.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/ctype.h>
#include <linux/mm_inline.h>
#include <linux/mmu_notifier.h>
#include <linux/printk.h>
#include <linux/swapops.h>
#include <asm/tlbflush.h>
#include <linux/uaccess.h>
#include "internal.h"
/* Internal flags */
#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
static struct kmem_cache *policy_cache;
static struct kmem_cache *sn_cache;
/* Highest zone. An specific allocation for a zone below that is not
policied. */
enum zone_type policy_zone = 0;
/*
* run-time system-wide default policy => local allocation
*/
static struct mempolicy default_policy = {
.refcnt = ATOMIC_INIT(1), /* never free it */
.mode = MPOL_PREFERRED,
.flags = MPOL_F_LOCAL,
};
static struct mempolicy preferred_node_policy[MAX_NUMNODES];
struct mempolicy *get_task_policy(struct task_struct *p)
{
struct mempolicy *pol = p->mempolicy;
int node;
if (pol)
return pol;
node = numa_node_id();
if (node != NUMA_NO_NODE) {
pol = &preferred_node_policy[node];
/* preferred_node_policy is not initialised early in boot */
if (pol->mode)
return pol;
}
return &default_policy;
}
static const struct mempolicy_operations {
int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
} mpol_ops[MPOL_MAX];
static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
{
return pol->flags & MPOL_MODE_FLAGS;
}
static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
const nodemask_t *rel)
{
nodemask_t tmp;
nodes_fold(tmp, *orig, nodes_weight(*rel));
nodes_onto(*ret, tmp, *rel);
}
static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
{
if (nodes_empty(*nodes))
return -EINVAL;
pol->v.nodes = *nodes;
return 0;
}
static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
{
if (!nodes)
pol->flags |= MPOL_F_LOCAL; /* local allocation */
else if (nodes_empty(*nodes))
return -EINVAL; /* no allowed nodes */
else
pol->v.preferred_node = first_node(*nodes);
return 0;
}
static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
{
if (nodes_empty(*nodes))
return -EINVAL;
pol->v.nodes = *nodes;
return 0;
}
/*
* mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
* any, for the new policy. mpol_new() has already validated the nodes
* parameter with respect to the policy mode and flags. But, we need to
* handle an empty nodemask with MPOL_PREFERRED here.
*
* Must be called holding task's alloc_lock to protect task's mems_allowed
* and mempolicy. May also be called holding the mmap_semaphore for write.
*/
static int mpol_set_nodemask(struct mempolicy *pol,
const nodemask_t *nodes, struct nodemask_scratch *nsc)
{
int ret;
/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
if (pol == NULL)
return 0;
/* Check N_MEMORY */
nodes_and(nsc->mask1,
cpuset_current_mems_allowed, node_states[N_MEMORY]);
VM_BUG_ON(!nodes);
if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
nodes = NULL; /* explicit local allocation */
else {
if (pol->flags & MPOL_F_RELATIVE_NODES)
mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
else
nodes_and(nsc->mask2, *nodes, nsc->mask1);
if (mpol_store_user_nodemask(pol))
pol->w.user_nodemask = *nodes;
else
pol->w.cpuset_mems_allowed =
cpuset_current_mems_allowed;
}
if (nodes)
ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
else
ret = mpol_ops[pol->mode].create(pol, NULL);
return ret;
}
/*
* This function just creates a new policy, does some check and simple
* initialization. You must invoke mpol_set_nodemask() to set nodes.
*/
static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
nodemask_t *nodes)
{
struct mempolicy *policy;
pr_debug("setting mode %d flags %d nodes[0] %lx\n",
mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
if (mode == MPOL_DEFAULT) {
if (nodes && !nodes_empty(*nodes))
return ERR_PTR(-EINVAL);
return NULL;
}
VM_BUG_ON(!nodes);
/*
* MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
* MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
* All other modes require a valid pointer to a non-empty nodemask.
*/
if (mode == MPOL_PREFERRED) {
if (nodes_empty(*nodes)) {
if (((flags & MPOL_F_STATIC_NODES) ||
(flags & MPOL_F_RELATIVE_NODES)))
return ERR_PTR(-EINVAL);
}
} else if (mode == MPOL_LOCAL) {
if (!nodes_empty(*nodes) ||
(flags & MPOL_F_STATIC_NODES) ||
(flags & MPOL_F_RELATIVE_NODES))
return ERR_PTR(-EINVAL);
mode = MPOL_PREFERRED;
} else if (nodes_empty(*nodes))
return ERR_PTR(-EINVAL);
policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
if (!policy)
return ERR_PTR(-ENOMEM);
atomic_set(&policy->refcnt, 1);
policy->mode = mode;
policy->flags = flags;
return policy;
}
/* Slow path of a mpol destructor. */
void __mpol_put(struct mempolicy *p)
{
if (!atomic_dec_and_test(&p->refcnt))
return;
kmem_cache_free(policy_cache, p);
}
static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
{
}
static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
{
nodemask_t tmp;
if (pol->flags & MPOL_F_STATIC_NODES)
nodes_and(tmp, pol->w.user_nodemask, *nodes);
else if (pol->flags & MPOL_F_RELATIVE_NODES)
mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
else {
nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
*nodes);
pol->w.cpuset_mems_allowed = *nodes;
}
if (nodes_empty(tmp))
tmp = *nodes;
pol->v.nodes = tmp;
}
static void mpol_rebind_preferred(struct mempolicy *pol,
const nodemask_t *nodes)
{
nodemask_t tmp;
if (pol->flags & MPOL_F_STATIC_NODES) {
int node = first_node(pol->w.user_nodemask);
if (node_isset(node, *nodes)) {
pol->v.preferred_node = node;
pol->flags &= ~MPOL_F_LOCAL;
} else
pol->flags |= MPOL_F_LOCAL;
} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
pol->v.preferred_node = first_node(tmp);
} else if (!(pol->flags & MPOL_F_LOCAL)) {
pol->v.preferred_node = node_remap(pol->v.preferred_node,
pol->w.cpuset_mems_allowed,
*nodes);
pol->w.cpuset_mems_allowed = *nodes;
}
}
/*
* mpol_rebind_policy - Migrate a policy to a different set of nodes
*
* Per-vma policies are protected by mmap_sem. Allocations using per-task
* policies are protected by task->mems_allowed_seq to prevent a premature
* OOM/allocation failure due to parallel nodemask modification.
*/
static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
{
if (!pol)
return;
if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
return;
mpol_ops[pol->mode].rebind(pol, newmask);
}
/*
* Wrapper for mpol_rebind_policy() that just requires task
* pointer, and updates task mempolicy.
*
* Called with task's alloc_lock held.
*/
void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
{
mpol_rebind_policy(tsk->mempolicy, new);
}
/*
* Rebind each vma in mm to new nodemask.
*
* Call holding a reference to mm. Takes mm->mmap_sem during call.
*/
void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
{
struct vm_area_struct *vma;
down_write(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
vm_write_begin(vma);
mpol_rebind_policy(vma->vm_policy, new);
vm_write_end(vma);
}
up_write(&mm->mmap_sem);
}
static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
[MPOL_DEFAULT] = {
.rebind = mpol_rebind_default,
},
[MPOL_INTERLEAVE] = {
.create = mpol_new_interleave,
.rebind = mpol_rebind_nodemask,
},
[MPOL_PREFERRED] = {
.create = mpol_new_preferred,
.rebind = mpol_rebind_preferred,
},
[MPOL_BIND] = {
.create = mpol_new_bind,
.rebind = mpol_rebind_nodemask,
},
};
static int migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags);
struct queue_pages {
struct list_head *pagelist;
unsigned long flags;
nodemask_t *nmask;
struct vm_area_struct *prev;
};
/*
* Check if the page's nid is in qp->nmask.
*
* If MPOL_MF_INVERT is set in qp->flags, check if the nid is
* in the invert of qp->nmask.
*/
static inline bool queue_pages_required(struct page *page,
struct queue_pages *qp)
{
int nid = page_to_nid(page);
unsigned long flags = qp->flags;
return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
}
/*
* queue_pages_pmd() has four possible return values:
* 0 - pages are placed on the right node or queued successfully.
* 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
* specified.
* 2 - THP was split.
* -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
* existing page was already on a node that does not follow the
* policy.
*/
static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
int ret = 0;
struct page *page;
struct queue_pages *qp = walk->private;
unsigned long flags;
if (unlikely(is_pmd_migration_entry(*pmd))) {
ret = -EIO;
goto unlock;
}
page = pmd_page(*pmd);
if (is_huge_zero_page(page)) {
spin_unlock(ptl);
__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
ret = 2;
goto out;
}
if (!queue_pages_required(page, qp))
goto unlock;
flags = qp->flags;
/* go to thp migration */
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
if (!vma_migratable(walk->vma) ||
migrate_page_add(page, qp->pagelist, flags)) {
ret = 1;
goto unlock;
}
} else
ret = -EIO;
unlock:
spin_unlock(ptl);
out:
return ret;
}
/*
* Scan through pages checking if pages follow certain conditions,
* and move them to the pagelist if they do.
*
* queue_pages_pte_range() has three possible return values:
* 0 - pages are placed on the right node or queued successfully.
* 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
* specified.
* -EIO - only MPOL_MF_STRICT was specified and an existing page was already
* on a node that does not follow the policy.
*/
static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->vma;
struct page *page;
struct queue_pages *qp = walk->private;
unsigned long flags = qp->flags;
int ret;
bool has_unmovable = false;
pte_t *pte, *mapped_pte;
spinlock_t *ptl;
ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
if (ret != 2)
return ret;
}
/* THP was split, fall through to pte walk */
if (pmd_trans_unstable(pmd))
return 0;
mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
if (!pte_present(*pte))
continue;
page = vm_normal_page(vma, addr, *pte);
if (!page)
continue;
/*
* vm_normal_page() filters out zero pages, but there might
* still be PageReserved pages to skip, perhaps in a VDSO.
*/
if (PageReserved(page))
continue;
if (!queue_pages_required(page, qp))
continue;
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
/* MPOL_MF_STRICT must be specified if we get here */
if (!vma_migratable(vma)) {
has_unmovable = true;
break;
}
/*
* Do not abort immediately since there may be
* temporary off LRU pages in the range. Still
* need migrate other LRU pages.
*/
if (migrate_page_add(page, qp->pagelist, flags))
has_unmovable = true;
} else
break;
}
pte_unmap_unlock(mapped_pte, ptl);
cond_resched();
if (has_unmovable)
return 1;
return addr != end ? -EIO : 0;
}
static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
#ifdef CONFIG_HUGETLB_PAGE
struct queue_pages *qp = walk->private;
unsigned long flags = qp->flags;
struct page *page;
spinlock_t *ptl;
pte_t entry;
ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
entry = huge_ptep_get(pte);
if (!pte_present(entry))
goto unlock;
page = pte_page(entry);
if (!queue_pages_required(page, qp))
goto unlock;
/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
if (flags & (MPOL_MF_MOVE_ALL) ||
(flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
isolate_huge_page(page, qp->pagelist);
unlock:
spin_unlock(ptl);
#else
BUG();
#endif
return 0;
}
#ifdef CONFIG_NUMA_BALANCING
/*
* This is used to mark a range of virtual addresses to be inaccessible.
* These are later cleared by a NUMA hinting fault. Depending on these
* faults, pages may be migrated for better NUMA placement.
*
* This is assuming that NUMA faults are handled using PROT_NONE. If
* an architecture makes a different choice, it will need further
* changes to the core.
*/
unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
int nr_updated;
vm_write_begin(vma);
nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
if (nr_updated)
count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
vm_write_end(vma);
return nr_updated;
}
#else
static unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
return 0;
}
#endif /* CONFIG_NUMA_BALANCING */
static int queue_pages_test_walk(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->vma;
struct queue_pages *qp = walk->private;
unsigned long endvma = vma->vm_end;
unsigned long flags = qp->flags;
/*
* Need check MPOL_MF_STRICT to return -EIO if possible
* regardless of vma_migratable
*/
if (!vma_migratable(vma) &&
!(flags & MPOL_MF_STRICT))
return 1;
if (endvma > end)
endvma = end;
if (vma->vm_start > start)
start = vma->vm_start;
if (!(flags & MPOL_MF_DISCONTIG_OK)) {
if (!vma->vm_next && vma->vm_end < end)
return -EFAULT;
if (qp->prev && qp->prev->vm_end < vma->vm_start)
return -EFAULT;
}
qp->prev = vma;
if (flags & MPOL_MF_LAZY) {
/* Similar to task_numa_work, skip inaccessible VMAs */
if (!is_vm_hugetlb_page(vma) &&
(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
!(vma->vm_flags & VM_MIXEDMAP))
change_prot_numa(vma, start, endvma);
return 1;
}
/* queue pages from current vma */
if (flags & MPOL_MF_VALID)
return 0;
return 1;
}
/*
* Walk through page tables and collect pages to be migrated.
*
* If pages found in a given range are on a set of nodes (determined by
* @nodes and @flags,) it's isolated and queued to the pagelist which is
* passed via @private.
*
* queue_pages_range() has three possible return values:
* 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
* specified.
* 0 - queue pages successfully or no misplaced page.
* errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
* memory range specified by nodemask and maxnode points outside
* your accessible address space (-EFAULT)
*/
static int
queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
nodemask_t *nodes, unsigned long flags,
struct list_head *pagelist)
{
struct queue_pages qp = {
.pagelist = pagelist,
.flags = flags,
.nmask = nodes,
.prev = NULL,
};
struct mm_walk queue_pages_walk = {
.hugetlb_entry = queue_pages_hugetlb,
.pmd_entry = queue_pages_pte_range,
.test_walk = queue_pages_test_walk,
.mm = mm,
.private = &qp,
};
return walk_page_range(start, end, &queue_pages_walk);
}
/*
* Apply policy to a single VMA
* This must be called with the mmap_sem held for writing.
*/
static int vma_replace_policy(struct vm_area_struct *vma,
struct mempolicy *pol)
{
int err;
struct mempolicy *old;
struct mempolicy *new;
pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
vma->vm_start, vma->vm_end, vma->vm_pgoff,
vma->vm_ops, vma->vm_file,
vma->vm_ops ? vma->vm_ops->set_policy : NULL);
new = mpol_dup(pol);
if (IS_ERR(new))
return PTR_ERR(new);
vm_write_begin(vma);
if (vma->vm_ops && vma->vm_ops->set_policy) {
err = vma->vm_ops->set_policy(vma, new);
if (err)
goto err_out;
}
old = vma->vm_policy;
/*
* The speculative page fault handler accesses this field without
* hodling the mmap_sem.
*/
WRITE_ONCE(vma->vm_policy, new);
vm_write_end(vma);
mpol_put(old);
return 0;
err_out:
vm_write_end(vma);
mpol_put(new);
return err;
}
/* Step 2: apply policy to a range and do splits. */
static int mbind_range(struct mm_struct *mm, unsigned long start,
unsigned long end, struct mempolicy *new_pol)
{
struct vm_area_struct *next;
struct vm_area_struct *prev;
struct vm_area_struct *vma;
int err = 0;
pgoff_t pgoff;
unsigned long vmstart;
unsigned long vmend;
vma = find_vma(mm, start);
if (!vma || vma->vm_start > start)
return -EFAULT;
prev = vma->vm_prev;
if (start > vma->vm_start)
prev = vma;
for (; vma && vma->vm_start < end; prev = vma, vma = next) {
next = vma->vm_next;
vmstart = max(start, vma->vm_start);
vmend = min(end, vma->vm_end);
if (mpol_equal(vma_policy(vma), new_pol))
continue;
pgoff = vma->vm_pgoff +
((vmstart - vma->vm_start) >> PAGE_SHIFT);
prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
vma->anon_vma, vma->vm_file, pgoff,
new_pol, vma->vm_userfaultfd_ctx,
vma_get_anon_name(vma));
if (prev) {
vma = prev;
next = vma->vm_next;
if (mpol_equal(vma_policy(vma), new_pol))
continue;
/* vma_merge() joined vma && vma->next, case 8 */
goto replace;
}
if (vma->vm_start != vmstart) {
err = split_vma(vma->vm_mm, vma, vmstart, 1);
if (err)
goto out;
}
if (vma->vm_end != vmend) {
err = split_vma(vma->vm_mm, vma, vmend, 0);
if (err)
goto out;
}
replace:
err = vma_replace_policy(vma, new_pol);
if (err)
goto out;
}
out:
return err;
}
/* Set the process memory policy */
static long do_set_mempolicy(unsigned short mode, unsigned short flags,
nodemask_t *nodes)
{
struct mempolicy *new, *old;
NODEMASK_SCRATCH(scratch);
int ret;
if (!scratch)
return -ENOMEM;
new = mpol_new(mode, flags, nodes);
if (IS_ERR(new)) {
ret = PTR_ERR(new);
goto out;
}
task_lock(current);
ret = mpol_set_nodemask(new, nodes, scratch);
if (ret) {
task_unlock(current);
mpol_put(new);
goto out;
}
old = current->mempolicy;
current->mempolicy = new;
if (new && new->mode == MPOL_INTERLEAVE)
current->il_prev = MAX_NUMNODES-1;
task_unlock(current);
mpol_put(old);
ret = 0;
out:
NODEMASK_SCRATCH_FREE(scratch);
return ret;
}
/*
* Return nodemask for policy for get_mempolicy() query
*
* Called with task's alloc_lock held
*/
static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
{
nodes_clear(*nodes);
if (p == &default_policy)
return;
switch (p->mode) {
case MPOL_BIND:
/* Fall through */
case MPOL_INTERLEAVE:
*nodes = p->v.nodes;
break;
case MPOL_PREFERRED:
if (!(p->flags & MPOL_F_LOCAL))
node_set(p->v.preferred_node, *nodes);
/* else return empty node mask for local allocation */
break;
default:
BUG();
}
}
static int lookup_node(unsigned long addr)
{
struct page *p;
int err;
err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
if (err >= 0) {
err = page_to_nid(p);
put_page(p);
}
return err;
}
/* Retrieve NUMA policy */
static long do_get_mempolicy(int *policy, nodemask_t *nmask,
unsigned long addr, unsigned long flags)
{
int err;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = NULL;
struct mempolicy *pol = current->mempolicy;
if (flags &
~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
return -EINVAL;
if (flags & MPOL_F_MEMS_ALLOWED) {
if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
return -EINVAL;
*policy = 0; /* just so it's initialized */
task_lock(current);
*nmask = cpuset_current_mems_allowed;
task_unlock(current);
return 0;
}
if (flags & MPOL_F_ADDR) {
/*
* Do NOT fall back to task policy if the
* vma/shared policy at addr is NULL. We
* want to return MPOL_DEFAULT in this case.
*/
down_read(&mm->mmap_sem);
vma = find_vma_intersection(mm, addr, addr+1);
if (!vma) {
up_read(&mm->mmap_sem);
return -EFAULT;
}
if (vma->vm_ops && vma->vm_ops->get_policy)
pol = vma->vm_ops->get_policy(vma, addr);
else
pol = vma->vm_policy;
} else if (addr)
return -EINVAL;
if (!pol)
pol = &default_policy; /* indicates default behavior */
if (flags & MPOL_F_NODE) {
if (flags & MPOL_F_ADDR) {
err = lookup_node(addr);
if (err < 0)
goto out;
*policy = err;
} else if (pol == current->mempolicy &&
pol->mode == MPOL_INTERLEAVE) {
*policy = next_node_in(current->il_prev, pol->v.nodes);
} else {
err = -EINVAL;
goto out;
}
} else {
*policy = pol == &default_policy ? MPOL_DEFAULT :
pol->mode;
/*
* Internal mempolicy flags must be masked off before exposing
* the policy to userspace.
*/
*policy |= (pol->flags & MPOL_MODE_FLAGS);
}
err = 0;
if (nmask) {
if (mpol_store_user_nodemask(pol)) {
*nmask = pol->w.user_nodemask;
} else {
task_lock(current);
get_policy_nodemask(pol, nmask);
task_unlock(current);
}
}
out:
mpol_cond_put(pol);
if (vma)
up_read(&current->mm->mmap_sem);
return err;
}
#ifdef CONFIG_MIGRATION
/*
* page migration, thp tail pages can be passed.
*/
static int migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags)
{
struct page *head = compound_head(page);
/*
* Avoid migrating a page that is shared with others.
*/
if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
if (!isolate_lru_page(head)) {
list_add_tail(&head->lru, pagelist);
mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON + page_is_file_cache(head),
hpage_nr_pages(head));
} else if (flags & MPOL_MF_STRICT) {
/*
* Non-movable page may reach here. And, there may be
* temporary off LRU pages or non-LRU movable pages.
* Treat them as unmovable pages since they can't be
* isolated, so they can't be moved at the moment. It
* should return -EIO for this case too.
*/
return -EIO;
}
}
return 0;
}
/* page allocation callback for NUMA node migration */
struct page *alloc_new_node_page(struct page *page, unsigned long node)
{
if (PageHuge(page))
return alloc_huge_page_node(page_hstate(compound_head(page)),
node);
else if (PageTransHuge(page)) {
struct page *thp;
thp = alloc_pages_node(node,
(GFP_TRANSHUGE | __GFP_THISNODE),
HPAGE_PMD_ORDER);
if (!thp)
return NULL;
prep_transhuge_page(thp);
return thp;
} else
return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
__GFP_THISNODE, 0);
}
/*
* Migrate pages from one node to a target node.
* Returns error or the number of pages not migrated.
*/
static int migrate_to_node(struct mm_struct *mm, int source, int dest,
int flags)
{
nodemask_t nmask;
LIST_HEAD(pagelist);
int err = 0;
nodes_clear(nmask);
node_set(source, nmask);
/*
* This does not "check" the range but isolates all pages that
* need migration. Between passing in the full user address
* space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
*/
VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest,
MIGRATE_SYNC, MR_SYSCALL);
if (err)
putback_movable_pages(&pagelist);
}
return err;
}
/*
* Move pages between the two nodesets so as to preserve the physical
* layout as much as possible.
*
* Returns the number of page that could not be moved.
*/
int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
const nodemask_t *to, int flags)
{
int busy = 0;
int err;
nodemask_t tmp;
err = migrate_prep();
if (err)
return err;
down_read(&mm->mmap_sem);
/*
* Find a 'source' bit set in 'tmp' whose corresponding 'dest'
* bit in 'to' is not also set in 'tmp'. Clear the found 'source'
* bit in 'tmp', and return that <source, dest> pair for migration.
* The pair of nodemasks 'to' and 'from' define the map.
*
* If no pair of bits is found that way, fallback to picking some
* pair of 'source' and 'dest' bits that are not the same. If the
* 'source' and 'dest' bits are the same, this represents a node
* that will be migrating to itself, so no pages need move.
*
* If no bits are left in 'tmp', or if all remaining bits left
* in 'tmp' correspond to the same bit in 'to', return false
* (nothing left to migrate).
*
* This lets us pick a pair of nodes to migrate between, such that
* if possible the dest node is not already occupied by some other
* source node, minimizing the risk of overloading the memory on a
* node that would happen if we migrated incoming memory to a node
* before migrating outgoing memory source that same node.
*
* A single scan of tmp is sufficient. As we go, we remember the
* most recent <s, d> pair that moved (s != d). If we find a pair
* that not only moved, but what's better, moved to an empty slot
* (d is not set in tmp), then we break out then, with that pair.
* Otherwise when we finish scanning from_tmp, we at least have the
* most recent <s, d> pair that moved. If we get all the way through
* the scan of tmp without finding any node that moved, much less
* moved to an empty node, then there is nothing left worth migrating.
*/
tmp = *from;
while (!nodes_empty(tmp)) {
int s,d;
int source = NUMA_NO_NODE;
int dest = 0;
for_each_node_mask(s, tmp) {
/*
* do_migrate_pages() tries to maintain the relative
* node relationship of the pages established between
* threads and memory areas.
*
* However if the number of source nodes is not equal to
* the number of destination nodes we can not preserve
* this node relative relationship. In that case, skip
* copying memory from a node that is in the destination
* mask.
*
* Example: [2,3,4] -> [3,4,5] moves everything.
* [0-7] - > [3,4,5] moves only 0,1,2,6,7.
*/
if ((nodes_weight(*from) != nodes_weight(*to)) &&
(node_isset(s, *to)))
continue;
d = node_remap(s, *from, *to);
if (s == d)
continue;
source = s; /* Node moved. Memorize */
dest = d;
/* dest not in remaining from nodes? */
if (!node_isset(dest, tmp))
break;
}
if (source == NUMA_NO_NODE)
break;
node_clear(source, tmp);
err = migrate_to_node(mm, source, dest, flags);
if (err > 0)
busy += err;
if (err < 0)
break;
}
up_read(&mm->mmap_sem);
if (err < 0)
return err;
return busy;
}
/*
* Allocate a new page for page migration based on vma policy.
* Start by assuming the page is mapped by the same vma as contains @start.
* Search forward from there, if not. N.B., this assumes that the
* list of pages handed to migrate_pages()--which is how we get here--
* is in virtual address order.
*/
static struct page *new_page(struct page *page, unsigned long start)
{
struct vm_area_struct *vma;
unsigned long uninitialized_var(address);
vma = find_vma(current->mm, start);
while (vma) {
address = page_address_in_vma(page, vma);
if (address != -EFAULT)
break;
vma = vma->vm_next;
}
if (PageHuge(page)) {
return alloc_huge_page_vma(page_hstate(compound_head(page)),
vma, address);
} else if (PageTransHuge(page)) {
struct page *thp;
thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
HPAGE_PMD_ORDER);
if (!thp)
return NULL;
prep_transhuge_page(thp);
return thp;
}
/*
* if !vma, alloc_page_vma() will use task or system default policy
*/
return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
vma, address);
}
#else
static int migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags)
{
return -EIO;
}
int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
const nodemask_t *to, int flags)
{
return -ENOSYS;
}
static struct page *new_page(struct page *page, unsigned long start)
{
return NULL;
}
#endif
static long do_mbind(unsigned long start, unsigned long len,
unsigned short mode, unsigned short mode_flags,
nodemask_t *nmask, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct mempolicy *new;
unsigned long end;
int err;
int ret;
LIST_HEAD(pagelist);
if (flags & ~(unsigned long)MPOL_MF_VALID)
return -EINVAL;
if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
return -EPERM;
if (start & ~PAGE_MASK)
return -EINVAL;
if (mode == MPOL_DEFAULT)
flags &= ~MPOL_MF_STRICT;
len = (len + PAGE_SIZE - 1) & PAGE_MASK;
end = start + len;
if (end < start)
return -EINVAL;
if (end == start)
return 0;
new = mpol_new(mode, mode_flags, nmask);
if (IS_ERR(new))
return PTR_ERR(new);
if (flags & MPOL_MF_LAZY)
new->flags |= MPOL_F_MOF;
/*
* If we are using the default policy then operation
* on discontinuous address spaces is okay after all
*/
if (!new)
flags |= MPOL_MF_DISCONTIG_OK;
pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
start, start + len, mode, mode_flags,
nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
err = migrate_prep();
if (err)
goto mpol_out;
}
{
NODEMASK_SCRATCH(scratch);
if (scratch) {
down_write(&mm->mmap_sem);
task_lock(current);
err = mpol_set_nodemask(new, nmask, scratch);
task_unlock(current);
if (err)
up_write(&mm->mmap_sem);
} else
err = -ENOMEM;
NODEMASK_SCRATCH_FREE(scratch);
}
if (err)
goto mpol_out;
ret = queue_pages_range(mm, start, end, nmask,
flags | MPOL_MF_INVERT, &pagelist);
if (ret < 0) {
err = ret;
goto up_out;
}
err = mbind_range(mm, start, end, new);
if (!err) {
int nr_failed = 0;
if (!list_empty(&pagelist)) {
WARN_ON_ONCE(flags & MPOL_MF_LAZY);
nr_failed = migrate_pages(&pagelist, new_page, NULL,
start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
if (nr_failed)
putback_movable_pages(&pagelist);
}
if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
err = -EIO;
} else {
up_out:
if (!list_empty(&pagelist))
putback_movable_pages(&pagelist);
}
up_write(&mm->mmap_sem);
mpol_out:
mpol_put(new);
return err;
}
/*
* User space interface with variable sized bitmaps for nodelists.
*/
/* Copy a node mask from user space. */
static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
unsigned long maxnode)
{
unsigned long k;
unsigned long t;
unsigned long nlongs;
unsigned long endmask;
--maxnode;
nodes_clear(*nodes);
if (maxnode == 0 || !nmask)
return 0;
if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
return -EINVAL;
nlongs = BITS_TO_LONGS(maxnode);
if ((maxnode % BITS_PER_LONG) == 0)
endmask = ~0UL;
else
endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
/*
* When the user specified more nodes than supported just check
* if the non supported part is all zero.
*
* If maxnode have more longs than MAX_NUMNODES, check
* the bits in that area first. And then go through to
* check the rest bits which equal or bigger than MAX_NUMNODES.
* Otherwise, just check bits [MAX_NUMNODES, maxnode).
*/
if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
if (get_user(t, nmask + k))
return -EFAULT;
if (k == nlongs - 1) {
if (t & endmask)
return -EINVAL;
} else if (t)
return -EINVAL;
}
nlongs = BITS_TO_LONGS(MAX_NUMNODES);
endmask = ~0UL;
}
if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
unsigned long valid_mask = endmask;
valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
if (get_user(t, nmask + nlongs - 1))
return -EFAULT;
if (t & valid_mask)
return -EINVAL;
}
if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
return -EFAULT;
nodes_addr(*nodes)[nlongs-1] &= endmask;
return 0;
}
/* Copy a kernel node mask to user space */
static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
nodemask_t *nodes)
{
unsigned long copy = ALIGN(maxnode-1, 64) / 8;
unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
if (copy > nbytes) {
if (copy > PAGE_SIZE)
return -EINVAL;
if (clear_user((char __user *)mask + nbytes, copy - nbytes))
return -EFAULT;
copy = nbytes;
}
return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
}
static long kernel_mbind(unsigned long start, unsigned long len,
unsigned long mode, const unsigned long __user *nmask,
unsigned long maxnode, unsigned int flags)
{
nodemask_t nodes;
int err;
unsigned short mode_flags;
start = untagged_addr(start);
mode_flags = mode & MPOL_MODE_FLAGS;
mode &= ~MPOL_MODE_FLAGS;
if (mode >= MPOL_MAX)
return -EINVAL;
if ((mode_flags & MPOL_F_STATIC_NODES) &&
(mode_flags & MPOL_F_RELATIVE_NODES))
return -EINVAL;
err = get_nodes(&nodes, nmask, maxnode);
if (err)
return err;
return do_mbind(start, len, mode, mode_flags, &nodes, flags);
}
SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
unsigned long, mode, const unsigned long __user *, nmask,
unsigned long, maxnode, unsigned int, flags)
{
return kernel_mbind(start, len, mode, nmask, maxnode, flags);
}
/* Set the process memory policy */
static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
unsigned long maxnode)
{
int err;
nodemask_t nodes;
unsigned short flags;
flags = mode & MPOL_MODE_FLAGS;
mode &= ~MPOL_MODE_FLAGS;
if ((unsigned int)mode >= MPOL_MAX)
return -EINVAL;
if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
return -EINVAL;
err = get_nodes(&nodes, nmask, maxnode);
if (err)
return err;
return do_set_mempolicy(mode, flags, &nodes);
}
SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
unsigned long, maxnode)
{
return kernel_set_mempolicy(mode, nmask, maxnode);
}
static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
const unsigned long __user *old_nodes,
const unsigned long __user *new_nodes)
{
struct mm_struct *mm = NULL;
struct task_struct *task;
nodemask_t task_nodes;
int err;
nodemask_t *old;
nodemask_t *new;
NODEMASK_SCRATCH(scratch);
if (!scratch)
return -ENOMEM;
old = &scratch->mask1;
new = &scratch->mask2;
err = get_nodes(old, old_nodes, maxnode);
if (err)
goto out;
err = get_nodes(new, new_nodes, maxnode);
if (err)
goto out;
/* Find the mm_struct */
rcu_read_lock();
task = pid ? find_task_by_vpid(pid) : current;
if (!task) {
rcu_read_unlock();
err = -ESRCH;
goto out;
}
get_task_struct(task);
err = -EINVAL;
/*
* Check if this process has the right to modify the specified process.
* Use the regular "ptrace_may_access()" checks.
*/
if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
rcu_read_unlock();
err = -EPERM;
goto out_put;
}
rcu_read_unlock();
task_nodes = cpuset_mems_allowed(task);
/* Is the user allowed to access the target nodes? */
if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
err = -EPERM;
goto out_put;
}
task_nodes = cpuset_mems_allowed(current);
nodes_and(*new, *new, task_nodes);
if (nodes_empty(*new))
goto out_put;
nodes_and(*new, *new, node_states[N_MEMORY]);
if (nodes_empty(*new))
goto out_put;
err = security_task_movememory(task);
if (err)
goto out_put;
mm = get_task_mm(task);
put_task_struct(task);
if (!mm) {
err = -EINVAL;
goto out;
}
err = do_migrate_pages(mm, old, new,
capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
mmput(mm);
out:
NODEMASK_SCRATCH_FREE(scratch);
return err;
out_put:
put_task_struct(task);
goto out;
}
SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
const unsigned long __user *, old_nodes,
const unsigned long __user *, new_nodes)
{
return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
}
/* Retrieve NUMA policy */
static int kernel_get_mempolicy(int __user *policy,
unsigned long __user *nmask,
unsigned long maxnode,
unsigned long addr,
unsigned long flags)
{
int err;
int uninitialized_var(pval);
nodemask_t nodes;
addr = untagged_addr(addr);
if (nmask != NULL && maxnode < nr_node_ids)
return -EINVAL;
err = do_get_mempolicy(&pval, &nodes, addr, flags);
if (err)
return err;
if (policy && put_user(pval, policy))
return -EFAULT;
if (nmask)
err = copy_nodes_to_user(nmask, maxnode, &nodes);
return err;
}
SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
unsigned long __user *, nmask, unsigned long, maxnode,
unsigned long, addr, unsigned long, flags)
{
return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
compat_ulong_t __user *, nmask,
compat_ulong_t, maxnode,
compat_ulong_t, addr, compat_ulong_t, flags)
{
long err;
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
DECLARE_BITMAP(bm, MAX_NUMNODES);
nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask)
nm = compat_alloc_user_space(alloc_size);
err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
if (!err && nmask) {
unsigned long copy_size;
copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
err = copy_from_user(bm, nm, copy_size);
/* ensure entire bitmap is zeroed */
err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
err |= compat_put_bitmap(nmask, bm, nr_bits);
}
return err;
}
COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
compat_ulong_t, maxnode)
{
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
DECLARE_BITMAP(bm, MAX_NUMNODES);
nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask) {
if (compat_get_bitmap(bm, nmask, nr_bits))
return -EFAULT;
nm = compat_alloc_user_space(alloc_size);
if (copy_to_user(nm, bm, alloc_size))
return -EFAULT;
}
return kernel_set_mempolicy(mode, nm, nr_bits+1);
}
COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
compat_ulong_t, mode, compat_ulong_t __user *, nmask,
compat_ulong_t, maxnode, compat_ulong_t, flags)
{
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
nodemask_t bm;
nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask) {
if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
return -EFAULT;
nm = compat_alloc_user_space(alloc_size);
if (copy_to_user(nm, nodes_addr(bm), alloc_size))
return -EFAULT;
}
return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
}
COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
compat_ulong_t, maxnode,
const compat_ulong_t __user *, old_nodes,
const compat_ulong_t __user *, new_nodes)
{
unsigned long __user *old = NULL;
unsigned long __user *new = NULL;
nodemask_t tmp_mask;
unsigned long nr_bits;
unsigned long size;
nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (old_nodes) {
if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
return -EFAULT;
old = compat_alloc_user_space(new_nodes ? size * 2 : size);
if (new_nodes)
new = old + size / sizeof(unsigned long);
if (copy_to_user(old, nodes_addr(tmp_mask), size))
return -EFAULT;
}
if (new_nodes) {
if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
return -EFAULT;
if (new == NULL)
new = compat_alloc_user_space(size);
if (copy_to_user(new, nodes_addr(tmp_mask), size))
return -EFAULT;
}
return kernel_migrate_pages(pid, nr_bits + 1, old, new);
}
#endif /* CONFIG_COMPAT */
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
unsigned long addr)
{
struct mempolicy *pol;
if (!vma)
return NULL;
if (vma->vm_ops && vma->vm_ops->get_policy)
return vma->vm_ops->get_policy(vma, addr);
/*
* This could be called without holding the mmap_sem in the
* speculative page fault handler's path.
*/
pol = READ_ONCE(vma->vm_policy);
if (pol) {
/*
* shmem_alloc_page() passes MPOL_F_SHARED policy with
* a pseudo vma whose vma->vm_ops=NULL. Take a reference
* count on these policies which will be dropped by
* mpol_cond_put() later
*/
if (mpol_needs_cond_ref(pol))
mpol_get(pol);
}
return pol;
}
/*
* get_vma_policy(@vma, @addr)
* @vma: virtual memory area whose policy is sought
* @addr: address in @vma for shared policy lookup
*
* Returns effective policy for a VMA at specified address.
* Falls back to current->mempolicy or system default policy, as necessary.
* Shared policies [those marked as MPOL_F_SHARED] require an extra reference
* count--added by the get_policy() vm_op, as appropriate--to protect against
* freeing by another task. It is the caller's responsibility to free the
* extra reference for shared policies.
*/
static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
unsigned long addr)
{
struct mempolicy *pol = __get_vma_policy(vma, addr);
if (!pol)
pol = get_task_policy(current);
return pol;
}
bool vma_policy_mof(struct vm_area_struct *vma)
{
struct mempolicy *pol;
if (vma->vm_ops && vma->vm_ops->get_policy) {
bool ret = false;
pol = vma->vm_ops->get_policy(vma, vma->vm_start);
if (pol && (pol->flags & MPOL_F_MOF))
ret = true;
mpol_cond_put(pol);
return ret;
}
pol = vma->vm_policy;
if (!pol)
pol = get_task_policy(current);
return pol->flags & MPOL_F_MOF;
}
static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
{
enum zone_type dynamic_policy_zone = policy_zone;
BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
/*
* if policy->v.nodes has movable memory only,
* we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
*
* policy->v.nodes is intersect with node_states[N_MEMORY].
* so if the following test faile, it implies
* policy->v.nodes has movable memory only.
*/
if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
dynamic_policy_zone = ZONE_MOVABLE;
return zone >= dynamic_policy_zone;
}
/*
* Return a nodemask representing a mempolicy for filtering nodes for
* page allocation
*/
static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
{
/* Lower zones don't get a nodemask applied for MPOL_BIND */
if (unlikely(policy->mode == MPOL_BIND) &&
apply_policy_zone(policy, gfp_zone(gfp)) &&
cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
return &policy->v.nodes;
return NULL;
}
/* Return the node id preferred by the given mempolicy, or the given id */
static int policy_node(gfp_t gfp, struct mempolicy *policy,
int nd)
{
if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
nd = policy->v.preferred_node;
else {
/*
* __GFP_THISNODE shouldn't even be used with the bind policy
* because we might easily break the expectation to stay on the
* requested node and not break the policy.
*/
WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
}
return nd;
}
/* Do dynamic interleaving for a process */
static unsigned interleave_nodes(struct mempolicy *policy)
{
unsigned next;
struct task_struct *me = current;
next = next_node_in(me->il_prev, policy->v.nodes);
if (next < MAX_NUMNODES)
me->il_prev = next;
return next;
}
/*
* Depending on the memory policy provide a node from which to allocate the
* next slab entry.
*/
unsigned int mempolicy_slab_node(void)
{
struct mempolicy *policy;
int node = numa_mem_id();
if (in_interrupt())
return node;
policy = current->mempolicy;
if (!policy || policy->flags & MPOL_F_LOCAL)
return node;
switch (policy->mode) {
case MPOL_PREFERRED:
/*
* handled MPOL_F_LOCAL above
*/
return policy->v.preferred_node;
case MPOL_INTERLEAVE:
return interleave_nodes(policy);
case MPOL_BIND: {
struct zoneref *z;
/*
* Follow bind policy behavior and start allocation at the
* first node.
*/
struct zonelist *zonelist;
enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
z = first_zones_zonelist(zonelist, highest_zoneidx,
&policy->v.nodes);
return z->zone ? zone_to_nid(z->zone) : node;
}
default:
BUG();
}
}
/*
* Do static interleaving for a VMA with known offset @n. Returns the n'th
* node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
* number of present nodes.
*/
static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
{
unsigned nnodes = nodes_weight(pol->v.nodes);
unsigned target;
int i;
int nid;
if (!nnodes)
return numa_node_id();
target = (unsigned int)n % nnodes;
nid = first_node(pol->v.nodes);
for (i = 0; i < target; i++)
nid = next_node(nid, pol->v.nodes);
return nid;
}
/* Determine a node number for interleave */
static inline unsigned interleave_nid(struct mempolicy *pol,
struct vm_area_struct *vma, unsigned long addr, int shift)
{
if (vma) {
unsigned long off;
/*
* for small pages, there is no difference between
* shift and PAGE_SHIFT, so the bit-shift is safe.
* for huge pages, since vm_pgoff is in units of small
* pages, we need to shift off the always 0 bits to get
* a useful offset.
*/
BUG_ON(shift < PAGE_SHIFT);
off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
off += (addr - vma->vm_start) >> shift;
return offset_il_node(pol, off);
} else
return interleave_nodes(pol);
}
#ifdef CONFIG_HUGETLBFS
/*
* huge_node(@vma, @addr, @gfp_flags, @mpol)
* @vma: virtual memory area whose policy is sought
* @addr: address in @vma for shared policy lookup and interleave policy
* @gfp_flags: for requested zone
* @mpol: pointer to mempolicy pointer for reference counted mempolicy
* @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
*
* Returns a nid suitable for a huge page allocation and a pointer
* to the struct mempolicy for conditional unref after allocation.
* If the effective policy is 'BIND, returns a pointer to the mempolicy's
* @nodemask for filtering the zonelist.
*
* Must be protected by read_mems_allowed_begin()
*/
int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
struct mempolicy **mpol, nodemask_t **nodemask)
{
int nid;
*mpol = get_vma_policy(vma, addr);
*nodemask = NULL; /* assume !MPOL_BIND */
if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
nid = interleave_nid(*mpol, vma, addr,
huge_page_shift(hstate_vma(vma)));
} else {
nid = policy_node(gfp_flags, *mpol, numa_node_id());
if ((*mpol)->mode == MPOL_BIND)
*nodemask = &(*mpol)->v.nodes;
}
return nid;
}
/*
* init_nodemask_of_mempolicy
*
* If the current task's mempolicy is "default" [NULL], return 'false'
* to indicate default policy. Otherwise, extract the policy nodemask
* for 'bind' or 'interleave' policy into the argument nodemask, or
* initialize the argument nodemask to contain the single node for
* 'preferred' or 'local' policy and return 'true' to indicate presence
* of non-default mempolicy.
*
* We don't bother with reference counting the mempolicy [mpol_get/put]
* because the current task is examining it's own mempolicy and a task's
* mempolicy is only ever changed by the task itself.
*
* N.B., it is the caller's responsibility to free a returned nodemask.
*/
bool init_nodemask_of_mempolicy(nodemask_t *mask)
{
struct mempolicy *mempolicy;
int nid;
if (!(mask && current->mempolicy))
return false;
task_lock(current);
mempolicy = current->mempolicy;
switch (mempolicy->mode) {
case MPOL_PREFERRED:
if (mempolicy->flags & MPOL_F_LOCAL)
nid = numa_node_id();
else
nid = mempolicy->v.preferred_node;
init_nodemask_of_node(mask, nid);
break;
case MPOL_BIND:
/* Fall through */
case MPOL_INTERLEAVE:
*mask = mempolicy->v.nodes;
break;
default:
BUG();
}
task_unlock(current);
return true;
}
#endif
/*
* mempolicy_nodemask_intersects
*
* If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
* policy. Otherwise, check for intersection between mask and the policy
* nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
* policy, always return true since it may allocate elsewhere on fallback.
*
* Takes task_lock(tsk) to prevent freeing of its mempolicy.
*/
bool mempolicy_nodemask_intersects(struct task_struct *tsk,
const nodemask_t *mask)
{
struct mempolicy *mempolicy;
bool ret = true;
if (!mask)
return ret;
task_lock(tsk);
mempolicy = tsk->mempolicy;
if (!mempolicy)
goto out;
switch (mempolicy->mode) {
case MPOL_PREFERRED:
/*
* MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
* allocate from, they may fallback to other nodes when oom.
* Thus, it's possible for tsk to have allocated memory from
* nodes in mask.
*/
break;
case MPOL_BIND:
case MPOL_INTERLEAVE:
ret = nodes_intersects(mempolicy->v.nodes, *mask);
break;
default:
BUG();
}
out:
task_unlock(tsk);
return ret;
}
/* Allocate a page in interleaved policy.
Own path because it needs to do special accounting. */
static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
unsigned nid)
{
struct page *page;
page = __alloc_pages(gfp, order, nid);
/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
if (!static_branch_likely(&vm_numa_stat_key))
return page;
if (page && page_to_nid(page) == nid) {
preempt_disable();
__inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
preempt_enable();
}
return page;
}
/**
* alloc_pages_vma - Allocate a page for a VMA.
*
* @gfp:
* %GFP_USER user allocation.
* %GFP_KERNEL kernel allocations,
* %GFP_HIGHMEM highmem/user allocations,
* %GFP_FS allocation should not call back into a file system.
* %GFP_ATOMIC don't sleep.
*
* @order:Order of the GFP allocation.
* @vma: Pointer to VMA or NULL if not available.
* @addr: Virtual Address of the allocation. Must be inside the VMA.
* @node: Which node to prefer for allocation (modulo policy).
* @hugepage: for hugepages try only the preferred node if possible
*
* This function allocates a page from the kernel page pool and applies
* a NUMA policy associated with the VMA or the current process.
* When VMA is not NULL caller must hold down_read on the mmap_sem of the
* mm_struct of the VMA to prevent it from going away. Should be used for
* all allocations for pages that will be mapped into user space. Returns
* NULL when no page can be allocated.
*/
struct page *
alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
unsigned long addr, int node, bool hugepage)
{
struct mempolicy *pol;
struct page *page;
int preferred_nid;
nodemask_t *nmask;
pol = get_vma_policy(vma, addr);
if (pol->mode == MPOL_INTERLEAVE) {
unsigned nid;
nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
mpol_cond_put(pol);
page = alloc_page_interleave(gfp, order, nid);
goto out;
}
if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
int hpage_node = node;
/*
* For hugepage allocation and non-interleave policy which
* allows the current node (or other explicitly preferred
* node) we only try to allocate from the current/preferred
* node and don't fall back to other nodes, as the cost of
* remote accesses would likely offset THP benefits.
*
* If the policy is interleave, or does not allow the current
* node in its nodemask, we allocate the standard way.
*/
if (pol->mode == MPOL_PREFERRED &&
!(pol->flags & MPOL_F_LOCAL))
hpage_node = pol->v.preferred_node;
nmask = policy_nodemask(gfp, pol);
if (!nmask || node_isset(hpage_node, *nmask)) {
mpol_cond_put(pol);
/*
* We cannot invoke reclaim if __GFP_THISNODE
* is set. Invoking reclaim with
* __GFP_THISNODE set, would cause THP
* allocations to trigger heavy swapping
* despite there may be tons of free memory
* (including potentially plenty of THP
* already available in the buddy) on all the
* other NUMA nodes.
*
* At most we could invoke compaction when
* __GFP_THISNODE is set (but we would need to
* refrain from invoking reclaim even if
* compaction returned COMPACT_SKIPPED because
* there wasn't not enough memory to succeed
* compaction). For now just avoid
* __GFP_THISNODE instead of limiting the
* allocation path to a strict and single
* compaction invocation.
*
* Supposedly if direct reclaim was enabled by
* the caller, the app prefers THP regardless
* of the node it comes from so this would be
* more desiderable behavior than only
* providing THP originated from the local
* node in such case.
*/
if (!(gfp & __GFP_DIRECT_RECLAIM))
gfp |= __GFP_THISNODE;
page = __alloc_pages_node(hpage_node, gfp, order);
goto out;
}
}
nmask = policy_nodemask(gfp, pol);
preferred_nid = policy_node(gfp, pol, node);
page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
mpol_cond_put(pol);
out:
return page;
}
/**
* alloc_pages_current - Allocate pages.
*
* @gfp:
* %GFP_USER user allocation,
* %GFP_KERNEL kernel allocation,
* %GFP_HIGHMEM highmem allocation,
* %GFP_FS don't call back into a file system.
* %GFP_ATOMIC don't sleep.
* @order: Power of two of allocation size in pages. 0 is a single page.
*
* Allocate a page from the kernel page pool. When not in
* interrupt context and apply the current process NUMA policy.
* Returns NULL when no page can be allocated.
*/
struct page *alloc_pages_current(gfp_t gfp, unsigned order)
{
struct mempolicy *pol = &default_policy;
struct page *page;
if (!in_interrupt() && !(gfp & __GFP_THISNODE))
pol = get_task_policy(current);
/*
* No reference counting needed for current->mempolicy
* nor system default_policy
*/
if (pol->mode == MPOL_INTERLEAVE)
page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
else
page = __alloc_pages_nodemask(gfp, order,
policy_node(gfp, pol, numa_node_id()),
policy_nodemask(gfp, pol));
return page;
}
EXPORT_SYMBOL(alloc_pages_current);
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
{
struct mempolicy *pol = mpol_dup(vma_policy(src));
if (IS_ERR(pol))
return PTR_ERR(pol);
dst->vm_policy = pol;
return 0;
}
/*
* If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
* rebinds the mempolicy its copying by calling mpol_rebind_policy()
* with the mems_allowed returned by cpuset_mems_allowed(). This
* keeps mempolicies cpuset relative after its cpuset moves. See
* further kernel/cpuset.c update_nodemask().
*
* current's mempolicy may be rebinded by the other task(the task that changes
* cpuset's mems), so we needn't do rebind work for current task.
*/
/* Slow path of a mempolicy duplicate */
struct mempolicy *__mpol_dup(struct mempolicy *old)
{
struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
if (!new)
return ERR_PTR(-ENOMEM);
/* task's mempolicy is protected by alloc_lock */
if (old == current->mempolicy) {
task_lock(current);
*new = *old;
task_unlock(current);
} else
*new = *old;
if (current_cpuset_is_being_rebound()) {
nodemask_t mems = cpuset_mems_allowed(current);
mpol_rebind_policy(new, &mems);
}
atomic_set(&new->refcnt, 1);
return new;
}
/* Slow path of a mempolicy comparison */
bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
{
if (!a || !b)
return false;
if (a->mode != b->mode)
return false;
if (a->flags != b->flags)
return false;
if (mpol_store_user_nodemask(a))
if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
return false;
switch (a->mode) {
case MPOL_BIND:
/* Fall through */
case MPOL_INTERLEAVE:
return !!nodes_equal(a->v.nodes, b->v.nodes);
case MPOL_PREFERRED:
/* a's ->flags is the same as b's */
if (a->flags & MPOL_F_LOCAL)
return true;
return a->v.preferred_node == b->v.preferred_node;
default:
BUG();
return false;
}
}
/*
* Shared memory backing store policy support.
*
* Remember policies even when nobody has shared memory mapped.
* The policies are kept in Red-Black tree linked from the inode.
* They are protected by the sp->lock rwlock, which should be held
* for any accesses to the tree.
*/
/*
* lookup first element intersecting start-end. Caller holds sp->lock for
* reading or for writing
*/
static struct sp_node *
sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
{
struct rb_node *n = sp->root.rb_node;
while (n) {
struct sp_node *p = rb_entry(n, struct sp_node, nd);
if (start >= p->end)
n = n->rb_right;
else if (end <= p->start)
n = n->rb_left;
else
break;
}
if (!n)
return NULL;
for (;;) {
struct sp_node *w = NULL;
struct rb_node *prev = rb_prev(n);
if (!prev)
break;
w = rb_entry(prev, struct sp_node, nd);
if (w->end <= start)
break;
n = prev;
}
return rb_entry(n, struct sp_node, nd);
}
/*
* Insert a new shared policy into the list. Caller holds sp->lock for
* writing.
*/
static void sp_insert(struct shared_policy *sp, struct sp_node *new)
{
struct rb_node **p = &sp->root.rb_node;
struct rb_node *parent = NULL;
struct sp_node *nd;
while (*p) {
parent = *p;
nd = rb_entry(parent, struct sp_node, nd);
if (new->start < nd->start)
p = &(*p)->rb_left;
else if (new->end > nd->end)
p = &(*p)->rb_right;
else
BUG();
}
rb_link_node(&new->nd, parent, p);
rb_insert_color(&new->nd, &sp->root);
pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
new->policy ? new->policy->mode : 0);
}
/* Find shared policy intersecting idx */
struct mempolicy *
mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
{
struct mempolicy *pol = NULL;
struct sp_node *sn;
if (!sp->root.rb_node)
return NULL;
read_lock(&sp->lock);
sn = sp_lookup(sp, idx, idx+1);
if (sn) {
mpol_get(sn->policy);
pol = sn->policy;
}
read_unlock(&sp->lock);
return pol;
}
static void sp_free(struct sp_node *n)
{
mpol_put(n->policy);
kmem_cache_free(sn_cache, n);
}
/**
* mpol_misplaced - check whether current page node is valid in policy
*
* @page: page to be checked
* @vma: vm area where page mapped
* @addr: virtual address where page mapped
*
* Lookup current policy node id for vma,addr and "compare to" page's
* node id.
*
* Returns:
* -1 - not misplaced, page is in the right node
* node - node id where the page should be
*
* Policy determination "mimics" alloc_page_vma().
* Called from fault path where we know the vma and faulting address.
*/
int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
{
struct mempolicy *pol;
struct zoneref *z;
int curnid = page_to_nid(page);
unsigned long pgoff;
int thiscpu = raw_smp_processor_id();
int thisnid = cpu_to_node(thiscpu);
int polnid = -1;
int ret = -1;
pol = get_vma_policy(vma, addr);
if (!(pol->flags & MPOL_F_MOF))
goto out;
switch (pol->mode) {
case MPOL_INTERLEAVE:
pgoff = vma->vm_pgoff;
pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
polnid = offset_il_node(pol, pgoff);
break;
case MPOL_PREFERRED:
if (pol->flags & MPOL_F_LOCAL)
polnid = numa_node_id();
else
polnid = pol->v.preferred_node;
break;
case MPOL_BIND:
/*
* allows binding to multiple nodes.
* use current page if in policy nodemask,
* else select nearest allowed node, if any.
* If no allowed nodes, use current [!misplaced].
*/
if (node_isset(curnid, pol->v.nodes))
goto out;
z = first_zones_zonelist(
node_zonelist(numa_node_id(), GFP_HIGHUSER),
gfp_zone(GFP_HIGHUSER),
&pol->v.nodes);
polnid = zone_to_nid(z->zone);
break;
default:
BUG();
}
/* Migrate the page towards the node whose CPU is referencing it */
if (pol->flags & MPOL_F_MORON) {
polnid = thisnid;
if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
goto out;
}
if (curnid != polnid)
ret = polnid;
out:
mpol_cond_put(pol);
return ret;
}
/*
* Drop the (possibly final) reference to task->mempolicy. It needs to be
* dropped after task->mempolicy is set to NULL so that any allocation done as
* part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
* policy.
*/
void mpol_put_task_policy(struct task_struct *task)
{
struct mempolicy *pol;
task_lock(task);
pol = task->mempolicy;
task->mempolicy = NULL;
task_unlock(task);
mpol_put(pol);
}
static void sp_delete(struct shared_policy *sp, struct sp_node *n)
{
pr_debug("deleting %lx-l%lx\n", n->start, n->end);
rb_erase(&n->nd, &sp->root);
sp_free(n);
}
static void sp_node_init(struct sp_node *node, unsigned long start,
unsigned long end, struct mempolicy *pol)
{
node->start = start;
node->end = end;
node->policy = pol;
}
static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
struct mempolicy *pol)
{
struct sp_node *n;
struct mempolicy *newpol;
n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
if (!n)
return NULL;
newpol = mpol_dup(pol);
if (IS_ERR(newpol)) {
kmem_cache_free(sn_cache, n);
return NULL;
}
newpol->flags |= MPOL_F_SHARED;
sp_node_init(n, start, end, newpol);
return n;
}
/* Replace a policy range. */
static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
unsigned long end, struct sp_node *new)
{
struct sp_node *n;
struct sp_node *n_new = NULL;
struct mempolicy *mpol_new = NULL;
int ret = 0;
restart:
write_lock(&sp->lock);
n = sp_lookup(sp, start, end);
/* Take care of old policies in the same range. */
while (n && n->start < end) {
struct rb_node *next = rb_next(&n->nd);
if (n->start >= start) {
if (n->end <= end)
sp_delete(sp, n);
else
n->start = end;
} else {
/* Old policy spanning whole new range. */
if (n->end > end) {
if (!n_new)
goto alloc_new;
*mpol_new = *n->policy;
atomic_set(&mpol_new->refcnt, 1);
sp_node_init(n_new, end, n->end, mpol_new);
n->end = start;
sp_insert(sp, n_new);
n_new = NULL;
mpol_new = NULL;
break;
} else
n->end = start;
}
if (!next)
break;
n = rb_entry(next, struct sp_node, nd);
}
if (new)
sp_insert(sp, new);
write_unlock(&sp->lock);
ret = 0;
err_out:
if (mpol_new)
mpol_put(mpol_new);
if (n_new)
kmem_cache_free(sn_cache, n_new);
return ret;
alloc_new:
write_unlock(&sp->lock);
ret = -ENOMEM;
n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
if (!n_new)
goto err_out;
mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
if (!mpol_new)
goto err_out;
goto restart;
}
/**
* mpol_shared_policy_init - initialize shared policy for inode
* @sp: pointer to inode shared policy
* @mpol: struct mempolicy to install
*
* Install non-NULL @mpol in inode's shared policy rb-tree.
* On entry, the current task has a reference on a non-NULL @mpol.
* This must be released on exit.
* This is called at get_inode() calls and we can use GFP_KERNEL.
*/
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
{
int ret;
sp->root = RB_ROOT; /* empty tree == default mempolicy */
rwlock_init(&sp->lock);
if (mpol) {
struct vm_area_struct pvma;
struct mempolicy *new;
NODEMASK_SCRATCH(scratch);
if (!scratch)
goto put_mpol;
/* contextualize the tmpfs mount point mempolicy */
new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
if (IS_ERR(new))
goto free_scratch; /* no valid nodemask intersection */
task_lock(current);
ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
task_unlock(current);
if (ret)
goto put_new;
/* Create pseudo-vma that contains just the policy */
vma_init(&pvma, NULL);
pvma.vm_end = TASK_SIZE; /* policy covers entire file */
mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
put_new:
mpol_put(new); /* drop initial ref */
free_scratch:
NODEMASK_SCRATCH_FREE(scratch);
put_mpol:
mpol_put(mpol); /* drop our incoming ref on sb mpol */
}
}
int mpol_set_shared_policy(struct shared_policy *info,
struct vm_area_struct *vma, struct mempolicy *npol)
{
int err;
struct sp_node *new = NULL;
unsigned long sz = vma_pages(vma);
pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
vma->vm_pgoff,
sz, npol ? npol->mode : -1,
npol ? npol->flags : -1,
npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
if (npol) {
new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
if (!new)
return -ENOMEM;
}
err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
if (err && new)
sp_free(new);
return err;
}
/* Free a backing policy store on inode delete. */
void mpol_free_shared_policy(struct shared_policy *p)
{
struct sp_node *n;
struct rb_node *next;
if (!p->root.rb_node)
return;
write_lock(&p->lock);
next = rb_first(&p->root);
while (next) {
n = rb_entry(next, struct sp_node, nd);
next = rb_next(&n->nd);
sp_delete(p, n);
}
write_unlock(&p->lock);
}
#ifdef CONFIG_NUMA_BALANCING
static int __initdata numabalancing_override;
static void __init check_numabalancing_enable(void)
{
bool numabalancing_default = false;
if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
numabalancing_default = true;
/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
if (numabalancing_override)
set_numabalancing_state(numabalancing_override == 1);
if (num_online_nodes() > 1 && !numabalancing_override) {
pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
numabalancing_default ? "Enabling" : "Disabling");
set_numabalancing_state(numabalancing_default);
}
}
static int __init setup_numabalancing(char *str)
{
int ret = 0;
if (!str)
goto out;
if (!strcmp(str, "enable")) {
numabalancing_override = 1;
ret = 1;
} else if (!strcmp(str, "disable")) {
numabalancing_override = -1;
ret = 1;
}
out:
if (!ret)
pr_warn("Unable to parse numa_balancing=\n");
return ret;
}
__setup("numa_balancing=", setup_numabalancing);
#else
static inline void __init check_numabalancing_enable(void)
{
}
#endif /* CONFIG_NUMA_BALANCING */
/* assumes fs == KERNEL_DS */
void __init numa_policy_init(void)
{
nodemask_t interleave_nodes;
unsigned long largest = 0;
int nid, prefer = 0;
policy_cache = kmem_cache_create("numa_policy",
sizeof(struct mempolicy),
0, SLAB_PANIC, NULL);
sn_cache = kmem_cache_create("shared_policy_node",
sizeof(struct sp_node),
0, SLAB_PANIC, NULL);
for_each_node(nid) {
preferred_node_policy[nid] = (struct mempolicy) {
.refcnt = ATOMIC_INIT(1),
.mode = MPOL_PREFERRED,
.flags = MPOL_F_MOF | MPOL_F_MORON,
.v = { .preferred_node = nid, },
};
}
/*
* Set interleaving policy for system init. Interleaving is only
* enabled across suitably sized nodes (default is >= 16MB), or
* fall back to the largest node if they're all smaller.
*/
nodes_clear(interleave_nodes);
for_each_node_state(nid, N_MEMORY) {
unsigned long total_pages = node_present_pages(nid);
/* Preserve the largest node */
if (largest < total_pages) {
largest = total_pages;
prefer = nid;
}
/* Interleave this node? */
if ((total_pages << PAGE_SHIFT) >= (16 << 20))
node_set(nid, interleave_nodes);
}
/* All too small, use the largest */
if (unlikely(nodes_empty(interleave_nodes)))
node_set(prefer, interleave_nodes);
if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
pr_err("%s: interleaving failed\n", __func__);
check_numabalancing_enable();
}
/* Reset policy of current process to default */
void numa_default_policy(void)
{
do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
}
/*
* Parse and format mempolicy from/to strings
*/
/*
* "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
*/
static const char * const policy_modes[] =
{
[MPOL_DEFAULT] = "default",
[MPOL_PREFERRED] = "prefer",
[MPOL_BIND] = "bind",
[MPOL_INTERLEAVE] = "interleave",
[MPOL_LOCAL] = "local",
};
#ifdef CONFIG_TMPFS
/**
* mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
* @str: string containing mempolicy to parse
* @mpol: pointer to struct mempolicy pointer, returned on success.
*
* Format of input:
* <mode>[=<flags>][:<nodelist>]
*
* On success, returns 0, else 1
*/
int mpol_parse_str(char *str, struct mempolicy **mpol)
{
struct mempolicy *new = NULL;
unsigned short mode;
unsigned short mode_flags;
nodemask_t nodes;
char *nodelist = strchr(str, ':');
char *flags = strchr(str, '=');
int err = 1;
if (flags)
*flags++ = '\0'; /* terminate mode string */
if (nodelist) {
/* NUL-terminate mode or flags string */
*nodelist++ = '\0';
if (nodelist_parse(nodelist, nodes))
goto out;
if (!nodes_subset(nodes, node_states[N_MEMORY]))
goto out;
} else
nodes_clear(nodes);
for (mode = 0; mode < MPOL_MAX; mode++) {
if (!strcmp(str, policy_modes[mode])) {
break;
}
}
if (mode >= MPOL_MAX)
goto out;
switch (mode) {
case MPOL_PREFERRED:
/*
* Insist on a nodelist of one node only, although later
* we use first_node(nodes) to grab a single node, so here
* nodelist (or nodes) cannot be empty.
*/
if (nodelist) {
char *rest = nodelist;
while (isdigit(*rest))
rest++;
if (*rest)
goto out;
if (nodes_empty(nodes))
goto out;
}
break;
case MPOL_INTERLEAVE:
/*
* Default to online nodes with memory if no nodelist
*/
if (!nodelist)
nodes = node_states[N_MEMORY];
break;
case MPOL_LOCAL:
/*
* Don't allow a nodelist; mpol_new() checks flags
*/
if (nodelist)
goto out;
mode = MPOL_PREFERRED;
break;
case MPOL_DEFAULT:
/*
* Insist on a empty nodelist
*/
if (!nodelist)
err = 0;
goto out;
case MPOL_BIND:
/*
* Insist on a nodelist
*/
if (!nodelist)
goto out;
}
mode_flags = 0;
if (flags) {
/*
* Currently, we only support two mutually exclusive
* mode flags.
*/
if (!strcmp(flags, "static"))
mode_flags |= MPOL_F_STATIC_NODES;
else if (!strcmp(flags, "relative"))
mode_flags |= MPOL_F_RELATIVE_NODES;
else
goto out;
}
new = mpol_new(mode, mode_flags, &nodes);
if (IS_ERR(new))
goto out;
/*
* Save nodes for mpol_to_str() to show the tmpfs mount options
* for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
*/
if (mode != MPOL_PREFERRED)
new->v.nodes = nodes;
else if (nodelist)
new->v.preferred_node = first_node(nodes);
else
new->flags |= MPOL_F_LOCAL;
/*
* Save nodes for contextualization: this will be used to "clone"
* the mempolicy in a specific context [cpuset] at a later time.
*/
new->w.user_nodemask = nodes;
err = 0;
out:
/* Restore string for error message */
if (nodelist)
*--nodelist = ':';
if (flags)
*--flags = '=';
if (!err)
*mpol = new;
return err;
}
#endif /* CONFIG_TMPFS */
/**
* mpol_to_str - format a mempolicy structure for printing
* @buffer: to contain formatted mempolicy string
* @maxlen: length of @buffer
* @pol: pointer to mempolicy to be formatted
*
* Convert @pol into a string. If @buffer is too short, truncate the string.
* Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
* longest flag, "relative", and to display at least a few node ids.
*/
void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
{
char *p = buffer;
nodemask_t nodes = NODE_MASK_NONE;
unsigned short mode = MPOL_DEFAULT;
unsigned short flags = 0;
if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
mode = pol->mode;
flags = pol->flags;
}
switch (mode) {
case MPOL_DEFAULT:
break;
case MPOL_PREFERRED:
if (flags & MPOL_F_LOCAL)
mode = MPOL_LOCAL;
else
node_set(pol->v.preferred_node, nodes);
break;
case MPOL_BIND:
case MPOL_INTERLEAVE:
nodes = pol->v.nodes;
break;
default:
WARN_ON_ONCE(1);
snprintf(p, maxlen, "unknown");
return;
}
p += snprintf(p, maxlen, "%s", policy_modes[mode]);
if (flags & MPOL_MODE_FLAGS) {
p += snprintf(p, buffer + maxlen - p, "=");
/*
* Currently, the only defined flags are mutually exclusive
*/
if (flags & MPOL_F_STATIC_NODES)
p += snprintf(p, buffer + maxlen - p, "static");
else if (flags & MPOL_F_RELATIVE_NODES)
p += snprintf(p, buffer + maxlen - p, "relative");
}
if (!nodes_empty(nodes))
p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
nodemask_pr_args(&nodes));
}