diff --git a/BUILD.bazel b/BUILD.bazel
index 93c4e7303ce5..a395ead50527 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -35,6 +35,8 @@ _aarch64_additional_kmi_symbol_lists = [
"android/abi_gki_aarch64_fips140",
"android/abi_gki_aarch64_generic",
"android/abi_gki_aarch64_hikey960",
+ "android/abi_gki_aarch64_rockchip",
+ "android/abi_gki_aarch64_type_visibility",
"android/abi_gki_aarch64_virtual_device",
]
diff --git a/android/abi_gki_aarch64.xml b/android/abi_gki_aarch64.xml
index 9d4b5cd90e25..c3a7ec57372e 100644
--- a/android/abi_gki_aarch64.xml
+++ b/android/abi_gki_aarch64.xml
@@ -39,6 +39,7 @@
+
@@ -87,6 +88,7 @@
+
@@ -116,6 +118,7 @@
+
@@ -258,6 +261,8 @@
+
+
@@ -326,6 +331,8 @@
+
+
@@ -341,6 +348,8 @@
+
+
@@ -358,6 +367,8 @@
+
+
@@ -652,6 +663,7 @@
+
@@ -667,6 +679,7 @@
+
@@ -680,9 +693,11 @@
+
+
@@ -926,13 +941,16 @@
+
+
+
@@ -953,6 +971,11 @@
+
+
+
+
+
@@ -1018,6 +1041,8 @@
+
+
@@ -1202,6 +1227,7 @@
+
@@ -1618,6 +1644,9 @@
+
+
+
@@ -1769,9 +1798,11 @@
+
+
@@ -1875,7 +1906,15 @@
+
+
+
+
+
+
+
+
@@ -2115,6 +2154,7 @@
+
@@ -2122,6 +2162,8 @@
+
+
@@ -2315,6 +2357,7 @@
+
@@ -2385,6 +2428,7 @@
+
@@ -2516,6 +2560,8 @@
+
+
@@ -2568,6 +2614,12 @@
+
+
+
+
+
+
@@ -2732,6 +2784,7 @@
+
@@ -2746,11 +2799,13 @@
+
+
@@ -2770,6 +2825,7 @@
+
@@ -2841,6 +2897,8 @@
+
+
@@ -3800,6 +3858,8 @@
+
+
@@ -3868,6 +3928,8 @@
+
+
@@ -3883,6 +3945,8 @@
+
+
@@ -3892,8 +3956,14 @@
+
+
+
+
+
+
@@ -3986,6 +4056,7 @@
+
@@ -4916,6 +4987,14 @@
+
+
+
+
+
+
+
+
@@ -5473,6 +5552,14 @@
+
+
+
+
+
+
+
+
@@ -6527,34 +6614,34 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -7314,6 +7401,7 @@
+
@@ -7980,6 +8068,10 @@
+
+
+
+
@@ -9229,6 +9321,11 @@
+
+
+
+
+
@@ -9498,6 +9595,23 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -9552,6 +9666,11 @@
+
+
+
+
+
@@ -11241,6 +11360,7 @@
+
@@ -11316,6 +11436,7 @@
+
@@ -12495,6 +12616,7 @@
+
@@ -12572,6 +12694,7 @@
+
@@ -14447,6 +14570,7 @@
+
@@ -16921,6 +17045,23 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -18087,6 +18228,7 @@
+
@@ -19958,6 +20100,10 @@
+
+
+
+
@@ -20159,6 +20305,7 @@
+
@@ -20842,6 +20989,7 @@
+
@@ -21643,6 +21791,20 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -23472,6 +23634,23 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -25651,6 +25830,7 @@
+
@@ -26564,6 +26744,23 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -26849,6 +27046,7 @@
+
@@ -28184,6 +28382,7 @@
+
@@ -28320,6 +28519,7 @@
+
@@ -28553,6 +28753,11 @@
+
+
+
+
+
@@ -28711,6 +28916,11 @@
+
+
+
+
+
@@ -31223,6 +31433,20 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -33869,6 +34093,7 @@
+
@@ -34786,6 +35011,11 @@
+
+
+
+
+
@@ -35595,6 +35825,7 @@
+
@@ -36980,13 +37211,13 @@
-
+
-
+
@@ -37995,7 +38226,29 @@
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -40212,6 +40465,7 @@
+
@@ -41092,6 +41346,14 @@
+
+
+
+
+
+
+
+
@@ -44615,6 +44877,7 @@
+
@@ -46645,6 +46908,7 @@
+
@@ -47760,6 +48024,7 @@
+
@@ -48121,6 +48386,9 @@
+
+
+
@@ -48394,6 +48662,26 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -48572,6 +48860,7 @@
+
@@ -49756,6 +50045,12 @@
+
+
+
+
+
+
@@ -51180,6 +51475,7 @@
+
@@ -51211,6 +51507,7 @@
+
@@ -51621,6 +51918,14 @@
+
+
+
+
+
+
+
+
@@ -52859,6 +53164,11 @@
+
+
+
+
+
@@ -54482,6 +54792,7 @@
+
@@ -55555,6 +55866,7 @@
+
@@ -55741,6 +56053,29 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -61026,6 +61361,7 @@
+
@@ -61517,6 +61853,7 @@
+
@@ -62918,6 +63255,14 @@
+
+
+
+
+
+
+
+
@@ -65188,6 +65533,11 @@
+
+
+
+
+
@@ -65804,6 +66154,7 @@
+
@@ -66171,6 +66522,10 @@
+
+
+
+
@@ -66746,6 +67101,10 @@
+
+
+
+
@@ -66821,6 +67180,7 @@
+
@@ -66847,6 +67207,7 @@
+
@@ -67973,6 +68334,7 @@
+
@@ -68536,6 +68898,11 @@
+
+
+
+
+
@@ -69454,6 +69821,7 @@
+
@@ -72322,6 +72690,11 @@
+
+
+
+
+
@@ -72421,6 +72794,7 @@
+
@@ -72801,6 +73175,7 @@
+
@@ -74256,6 +74631,7 @@
+
@@ -75155,6 +75531,7 @@
+
@@ -77208,6 +77585,7 @@
+
@@ -77773,6 +78151,7 @@
+
@@ -78267,6 +78646,7 @@
+
@@ -81143,6 +81523,26 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -86876,6 +87276,7 @@
+
@@ -87010,6 +87411,10 @@
+
+
+
+
@@ -89551,6 +89956,7 @@
+
@@ -92331,6 +92737,11 @@
+
+
+
+
+
@@ -92578,6 +92989,7 @@
+
@@ -93575,6 +93987,17 @@
+
+
+
+
+
+
+
+
+
+
+
@@ -95346,6 +95769,14 @@
+
+
+
+
+
+
+
+
@@ -95384,6 +95815,7 @@
+
@@ -100810,6 +101242,7 @@
+
@@ -101289,6 +101722,26 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -101716,6 +102169,7 @@
+
@@ -102032,6 +102486,7 @@
+
@@ -102239,7 +102694,7 @@
-
+
@@ -102266,7 +102721,7 @@
-
+
@@ -102741,6 +103196,29 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -102894,6 +103372,20 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -103356,7 +103848,50 @@
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -104944,6 +105479,7 @@
+
@@ -105097,6 +105633,7 @@
+
@@ -107065,6 +107602,7 @@
+
@@ -107960,6 +108498,7 @@
+
@@ -109672,6 +110211,11 @@
+
+
+
+
+
@@ -109956,6 +110500,11 @@
+
+
+
+
+
@@ -110144,6 +110693,13 @@
+
+
+
+
+
+
+
@@ -110991,6 +111547,18 @@
+
+
+
+
+
+
+
+
+
+
+
+
@@ -111422,6 +111990,19 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -111509,6 +112090,16 @@
+
+
+
+
+
+
+
+
+
+
@@ -111574,6 +112165,8 @@
+
+
@@ -111642,6 +112235,8 @@
+
+
@@ -111657,6 +112252,8 @@
+
+
@@ -111750,6 +112347,14 @@
+
+
+
+
+
+
+
+
@@ -113383,6 +113988,11 @@
+
+
+
+
+
@@ -113392,12 +114002,16 @@
+
+
+
+
@@ -113449,6 +114063,20 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -113485,6 +114113,8 @@
+
+
@@ -113514,6 +114144,19 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -113542,6 +114185,20 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -114783,6 +115440,11 @@
+
+
+
+
+
@@ -114797,6 +115459,11 @@
+
+
+
+
+
@@ -114819,6 +115486,10 @@
+
+
+
+
@@ -114924,6 +115595,30 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -115262,6 +115957,16 @@
+
+
+
+
+
+
+
+
+
+
@@ -116327,6 +117032,10 @@
+
+
+
+
@@ -118481,6 +119190,18 @@
+
+
+
+
+
+
+
+
+
+
+
+
@@ -119226,6 +119947,13 @@
+
+
+
+
+
+
+
@@ -119242,6 +119970,13 @@
+
+
+
+
+
+
+
@@ -119783,10 +120518,49 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -121038,6 +121812,12 @@
+
+
+
+
+
+
@@ -121070,6 +121850,19 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -122015,6 +122808,10 @@
+
+
+
+
@@ -122361,6 +123158,11 @@
+
+
+
+
+
@@ -123071,6 +123873,17 @@
+
+
+
+
+
+
+
+
+
+
+
@@ -123327,6 +124140,32 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -124156,6 +124995,10 @@
+
+
+
+
@@ -124225,6 +125068,11 @@
+
+
+
+
+
@@ -124250,6 +125098,11 @@
+
+
+
+
+
@@ -124357,6 +125210,10 @@
+
+
+
+
@@ -124694,6 +125551,16 @@
+
+
+
+
+
+
+
+
+
+
@@ -126878,6 +127745,7 @@
+
diff --git a/android/abi_gki_aarch64_generic b/android/abi_gki_aarch64_generic
index 3a76d3602f24..7b0d8f603f8b 100644
--- a/android/abi_gki_aarch64_generic
+++ b/android/abi_gki_aarch64_generic
@@ -2247,6 +2247,8 @@
__traceiter_android_rvh_util_est_update
__traceiter_android_vh_arch_set_freq_scale
__traceiter_android_vh_bh_lru_install
+ __traceiter_android_vh_binder_restore_priority
+ __traceiter_android_vh_binder_set_priority
__traceiter_android_vh_cma_alloc_adjust
__traceiter_android_vh_cma_alloc_finish
__traceiter_android_vh_cma_alloc_start
@@ -2312,6 +2314,8 @@
__traceiter_dwc3_readl
__traceiter_dwc3_writel
__traceiter_gpu_mem_total
+ __traceiter_irq_handler_entry
+ __traceiter_irq_handler_exit
__traceiter_mm_vmscan_direct_reclaim_begin
__traceiter_mm_vmscan_direct_reclaim_end
__traceiter_pelt_cfs_tp
@@ -2327,6 +2331,8 @@
__traceiter_sched_switch
__traceiter_sched_util_est_cfs_tp
__traceiter_sched_util_est_se_tp
+ __traceiter_softirq_entry
+ __traceiter_softirq_exit
__traceiter_suspend_resume
trace_output_call
__tracepoint_android_rvh_arm64_serror_panic
@@ -2376,6 +2382,8 @@
__tracepoint_android_rvh_util_est_update
__tracepoint_android_vh_arch_set_freq_scale
__tracepoint_android_vh_bh_lru_install
+ __tracepoint_android_vh_binder_restore_priority
+ __tracepoint_android_vh_binder_set_priority
__tracepoint_android_vh_cma_alloc_adjust
__tracepoint_android_vh_cma_alloc_finish
__tracepoint_android_vh_cma_alloc_start
@@ -2441,6 +2449,8 @@
__tracepoint_dwc3_readl
__tracepoint_dwc3_writel
__tracepoint_gpu_mem_total
+ __tracepoint_irq_handler_entry
+ __tracepoint_irq_handler_exit
__tracepoint_mm_vmscan_direct_reclaim_begin
__tracepoint_mm_vmscan_direct_reclaim_end
__tracepoint_pelt_cfs_tp
@@ -2458,6 +2468,8 @@
__tracepoint_sched_switch
__tracepoint_sched_util_est_cfs_tp
__tracepoint_sched_util_est_se_tp
+ __tracepoint_softirq_entry
+ __tracepoint_softirq_exit
__tracepoint_suspend_resume
trace_print_array_seq
trace_print_bitmask_seq
diff --git a/android/abi_gki_aarch64_rockchip b/android/abi_gki_aarch64_rockchip
index e69de29bb2d1..07fd8de4b52f 100644
--- a/android/abi_gki_aarch64_rockchip
+++ b/android/abi_gki_aarch64_rockchip
@@ -0,0 +1,434 @@
+[abi_symbol_list]
+# commonly used symbols
+ atomic_notifier_chain_register
+ cancel_delayed_work_sync
+ __cfi_slowpath
+ clk_bulk_disable
+ clk_bulk_enable
+ clk_bulk_prepare
+ clk_bulk_unprepare
+ clk_disable
+ clk_enable
+ clk_gate_ops
+ clk_get
+ __clk_get_name
+ clk_get_rate
+ clk_notifier_register
+ clk_prepare
+ clk_put
+ clk_register
+ clk_set_rate
+ clk_unprepare
+ __const_udelay
+ cpumask_next
+ __cpu_online_mask
+ __cpu_possible_mask
+ delayed_work_timer_fn
+ _dev_err
+ devfreq_recommended_opp
+ device_init_wakeup
+ _dev_info
+ devm_clk_bulk_get_all
+ devm_clk_get
+ devm_clk_hw_register
+ devm_clk_register
+ devm_ioremap_resource
+ devm_kfree
+ devm_kmalloc
+ devm_nvmem_register
+ devm_platform_ioremap_resource
+ devm_regulator_get
+ devm_regulator_get_optional
+ devm_request_threaded_irq
+ devm_reset_control_array_get
+ dev_pm_opp_find_freq_ceil
+ dev_pm_opp_get_opp_count
+ dev_pm_opp_get_opp_table
+ dev_pm_opp_get_voltage
+ dev_pm_opp_put
+ dev_pm_opp_put_opp_table
+ devres_add
+ devres_alloc_node
+ devres_free
+ _dev_warn
+ down_read
+ down_write
+ init_timer_key
+ iounmap
+ irq_set_irq_wake
+ irq_to_desc
+ kfree
+ __kmalloc
+ kmalloc_caches
+ kmem_cache_alloc_trace
+ ktime_get
+ __list_add_valid
+ __log_post_read_mmio
+ __log_read_mmio
+ __log_write_mmio
+ memcpy
+ module_layout
+ __msecs_to_jiffies
+ msleep
+ __mutex_init
+ mutex_lock
+ mutex_unlock
+ nr_cpu_ids
+ nvmem_cell_put
+ nvmem_cell_read
+ of_clk_add_provider
+ of_clk_del_provider
+ of_clk_get
+ of_clk_get_by_name
+ of_clk_get_parent_count
+ of_clk_src_simple_get
+ of_count_phandle_with_args
+ of_device_get_match_data
+ of_device_is_available
+ of_find_matching_node_and_match
+ of_find_property
+ of_get_next_available_child
+ of_get_next_child
+ of_iomap
+ of_match_device
+ of_match_node
+ of_nvmem_cell_get
+ of_parse_phandle
+ of_property_count_elems_of_size
+ of_property_read_string
+ of_property_read_string_helper
+ of_property_read_u32_index
+ of_property_read_variable_u32_array
+ panic_notifier_list
+ __platform_driver_probe
+ __platform_driver_register
+ platform_driver_unregister
+ platform_get_irq
+ platform_get_resource
+ pm_clk_create
+ pm_clk_destroy
+ print_hex_dump
+ printk
+ __put_task_struct
+ queue_delayed_work_on
+ regmap_read
+ regmap_update_bits_base
+ regmap_write
+ regulator_disable
+ regulator_enable
+ regulator_get_voltage
+ regulator_put
+ regulator_set_voltage
+ reset_control_assert
+ reset_control_deassert
+ snprintf
+ sprintf
+ __stack_chk_fail
+ strcmp
+ syscon_node_to_regmap
+ syscon_regmap_lookup_by_phandle
+ sysfs_create_file_ns
+ system_freezable_wq
+ system_wq
+ thermal_zone_get_zone_by_name
+ __traceiter_rwmmio_post_read
+ __traceiter_rwmmio_read
+ __traceiter_rwmmio_write
+ __tracepoint_rwmmio_post_read
+ __tracepoint_rwmmio_read
+ __tracepoint_rwmmio_write
+ __udelay
+ update_devfreq
+ up_read
+ up_write
+ usleep_range
+
+# required by clk-link.ko
+ pm_clk_add
+ pm_clk_resume
+ pm_clk_suspend
+ __pm_runtime_disable
+ pm_runtime_enable
+
+# required by clk-pwm.ko
+ devm_pwm_get
+ of_clk_add_hw_provider
+ of_clk_hw_simple_get
+ pwm_apply_state
+
+# required by clk-rockchip.ko
+ clk_divider_ops
+ clk_divider_ro_ops
+ clk_fixed_factor_ops
+ clk_fractional_divider_ops
+ __clk_get_hw
+ clk_get_parent
+ clk_hw_get_flags
+ clk_hw_get_name
+ clk_hw_get_parent
+ clk_hw_get_rate
+ clk_hw_register_composite
+ clk_hw_round_rate
+ __clk_mux_determine_rate
+ clk_mux_ops
+ clk_mux_ro_ops
+ clk_notifier_unregister
+ clk_register_divider_table
+ clk_register_fixed_factor
+ clk_register_gate
+ clk_register_mux_table
+ clk_unregister
+ gcd
+ kmemdup
+ match_string
+ of_clk_src_onecell_get
+ rational_best_approximation
+ _raw_spin_lock_irqsave
+ _raw_spin_unlock_irqrestore
+ register_restart_handler
+ reset_controller_register
+ __warn_printk
+
+# required by clk-scmi.ko
+ clk_hw_set_rate_range
+ devm_of_clk_add_hw_provider
+ of_clk_hw_onecell_get
+ scmi_driver_register
+ scmi_driver_unregister
+
+# required by industrialio-triggered-buffer.ko
+ iio_alloc_pollfunc
+ iio_dealloc_pollfunc
+ iio_device_attach_buffer
+
+# required by io-domain.ko
+ _dev_crit
+ regulator_register_notifier
+ regulator_unregister_notifier
+
+# required by kfifo_buf.ko
+ iio_buffer_init
+ iio_buffer_put
+ __kfifo_alloc
+ __kfifo_free
+ __kfifo_in
+ __kfifo_to_user
+ mutex_lock_interruptible
+
+# required by nvmem-rockchip-otp.ko
+ devm_clk_bulk_get
+ param_ops_uint
+
+# required by pm_domains.ko
+ clk_bulk_put
+ of_genpd_add_provider_onecell
+ panic
+ param_ops_bool
+ pm_clk_add_clk
+ pm_genpd_add_subdomain
+ pm_genpd_init
+ pm_genpd_remove
+ strrchr
+
+# required by rockchip-cpuinfo.ko
+ nvmem_cell_get
+
+# required by rockchip-dfi.ko
+ devm_devfreq_event_add_edev
+ gic_nonsecure_priorities
+
+# required by rockchip.ko
+ __genphy_config_aneg
+ genphy_resume
+ genphy_soft_reset
+ genphy_suspend
+ mdiobus_read
+ mdiobus_write
+ phy_drivers_register
+ phy_drivers_unregister
+
+# required by rockchip_bus.ko
+ cpufreq_register_notifier
+ cpu_topology
+
+# required by rockchip_debug.ko
+ atomic_notifier_chain_unregister
+ nr_irqs
+ __per_cpu_offset
+
+# required by rockchip_dmc.ko
+ cpufreq_cpu_get
+ cpufreq_cpu_put
+ __cpufreq_driver_target
+ cpufreq_quick_get
+ cpu_latency_qos_add_request
+ cpu_latency_qos_update_request
+ cpu_number
+ cpus_read_lock
+ cpus_read_unlock
+ devfreq_add_governor
+ devfreq_event_disable_edev
+ devfreq_event_enable_edev
+ devfreq_event_get_edev_by_phandle
+ devfreq_event_get_edev_count
+ devfreq_event_get_event
+ devfreq_monitor_resume
+ devfreq_monitor_start
+ devfreq_monitor_stop
+ devfreq_monitor_suspend
+ devfreq_resume_device
+ devfreq_suspend_device
+ devfreq_update_interval
+ devm_devfreq_add_device
+ devm_devfreq_register_opp_notifier
+ _dev_notice
+ dev_pm_opp_put_regulators
+ dev_pm_opp_register_set_opp_helper
+ dev_pm_opp_set_rate
+ dev_pm_opp_set_regulators
+ dev_pm_opp_unregister_set_opp_helper
+ disable_irq
+ enable_irq
+ finish_wait
+ init_wait_entry
+ __init_waitqueue_head
+ input_close_device
+ input_open_device
+ input_register_handle
+ input_register_handler
+ input_unregister_handle
+ kstrtouint
+ __memset_io
+ of_devfreq_cooling_register_power
+ of_get_child_by_name
+ platform_get_irq_byname
+ prepare_to_wait_event
+ queue_work_on
+ ___ratelimit
+ schedule_timeout
+ __wake_up
+
+# required by rockchip_dmc_common.ko
+ down_write_trylock
+
+# required by rockchip_opp_select.ko
+ dev_pm_opp_disable
+ dev_pm_opp_of_add_table
+ dev_pm_opp_set_prop_name
+ of_find_node_opts_by_path
+ of_machine_is_compatible
+ regulator_get_linear_step
+ regulator_get_optional
+
+# required by rockchip_pm_config.ko
+ of_find_node_by_name
+ of_get_named_gpio_flags
+
+# required by rockchip_pvtm.ko
+ debugfs_create_dir
+ debugfs_create_file
+ debugfs_remove
+ seq_lseek
+ seq_printf
+ seq_puts
+ seq_read
+ single_open
+ single_release
+
+# required by rockchip_pwm_remotectl.ko
+ arm64_const_caps_ready
+ cpu_hwcap_keys
+ devm_input_allocate_device
+ input_event
+ input_register_device
+ input_set_capability
+ irq_set_affinity_hint
+ jiffies
+ jiffies_to_msecs
+ mod_timer
+ of_get_property
+ param_ops_int
+ __pm_relax
+ pm_wakeup_ws_event
+ __tasklet_hi_schedule
+ tasklet_init
+ wakeup_source_add
+ wakeup_source_remove
+
+# required by rockchip_saradc.ko
+ complete
+ devm_add_action
+ devm_iio_device_alloc
+ __devm_iio_device_register
+ __devm_reset_control_get
+ find_next_bit
+ iio_get_time_ns
+ iio_push_to_buffers
+ iio_trigger_notify_done
+ __init_swait_queue_head
+ wait_for_completion_timeout
+
+# required by rockchip_sip.ko
+ arm64_use_ng_mappings
+ __arm_smccc_smc
+ __ioremap
+ memstart_addr
+ pfn_valid
+ sched_clock
+ vmap
+
+# required by rockchip_system_monitor.ko
+ add_cpu
+ bitmap_parselist
+ blocking_notifier_call_chain
+ blocking_notifier_chain_register
+ blocking_notifier_chain_unregister
+ dev_pm_opp_find_freq_floor
+ dev_pm_qos_add_request
+ dev_pm_qos_remove_request
+ dev_pm_qos_update_request
+ freq_qos_add_request
+ freq_qos_remove_request
+ freq_qos_update_request
+ kobject_create_and_add
+ kstrdup
+ kstrtoull
+ __list_del_entry_valid
+ memset
+ mod_delayed_work_on
+ register_pm_notifier
+ register_reboot_notifier
+ regulator_get
+ remove_cpu
+ strchr
+ strsep
+ strstr
+ thermal_zone_get_temp
+
+# required by rockchip_thermal.ko
+ devm_pinctrl_get
+ devm_thermal_zone_of_sensor_register
+ pinctrl_lookup_state
+ pinctrl_select_state
+ thermal_zone_device_disable
+ thermal_zone_device_enable
+ thermal_zone_device_update
+
+# required by rtc-hym8563.ko
+ _bcd2bin
+ _bin2bcd
+ device_property_present
+ devm_rtc_device_register
+ i2c_del_driver
+ i2c_register_driver
+ i2c_smbus_read_byte_data
+ i2c_smbus_read_i2c_block_data
+ i2c_smbus_write_byte_data
+ i2c_smbus_write_i2c_block_data
+ rtc_valid_tm
+
+# required by timer-rockchip.ko
+ clockevents_config_and_register
+ irq_of_parse_and_map
+ of_device_is_compatible
+ request_threaded_irq
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index ef1cf41ea394..edca12fd852e 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -75,6 +75,7 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs,
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
__KVM_HOST_SMCCC_FUNC___pkvm_init_shadow,
+ __KVM_HOST_SMCCC_FUNC___pkvm_init_shadow_vcpu,
__KVM_HOST_SMCCC_FUNC___pkvm_teardown_shadow,
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load,
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put,
diff --git a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
index 80ecc831fc31..de7f607dde58 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
@@ -42,14 +42,20 @@ struct kvm_shadow_vm {
/* The total size of the donated shadow area. */
size_t shadow_area_size;
+ /*
+ * The number of vcpus initialized and ready to run in the shadow vm.
+ * Modifying this is protected by shadow_lock.
+ */
+ unsigned int nr_vcpus;
+
struct kvm_arch arch;
struct kvm_pgtable pgt;
struct kvm_pgtable_mm_ops mm_ops;
struct hyp_pool pool;
hyp_spinlock_t lock;
- /* Array of the shadow state per vcpu. */
- struct shadow_vcpu_state shadow_vcpus[0];
+ /* Array of the shadow state pointers per vcpu. */
+ struct shadow_vcpu_state *shadow_vcpus[0];
};
static inline bool vcpu_is_protected(struct kvm_vcpu *vcpu)
@@ -65,6 +71,9 @@ extern phys_addr_t pvmfw_size;
void hyp_shadow_table_init(void *tbl);
int __pkvm_init_shadow(struct kvm *kvm, void *shadow_va, size_t size, void *pgd);
+int __pkvm_init_shadow_vcpu(unsigned int shadow_handle,
+ struct kvm_vcpu *host_vcpu,
+ void *shadow_vcpu_hva);
int __pkvm_teardown_shadow(int shadow_handle);
struct kvm_vcpu *get_shadow_vcpu(int shadow_handle, unsigned int vcpu_idx);
void put_shadow_vcpu(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 14f9da9f87b9..9901e102f330 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -1006,6 +1006,17 @@ static void handle___pkvm_init_shadow(struct kvm_cpu_context *host_ctxt)
shadow_size, pgd);
}
+static void handle___pkvm_init_shadow_vcpu(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(unsigned int, shadow_handle, host_ctxt, 1);
+ DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 2);
+ DECLARE_REG(void *, shadow_vcpu_hva, host_ctxt, 3);
+
+ cpu_reg(host_ctxt, 1) = __pkvm_init_shadow_vcpu(shadow_handle,
+ host_vcpu,
+ shadow_vcpu_hva);
+}
+
static void handle___pkvm_teardown_shadow(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(int, shadow_handle, host_ctxt, 1);
@@ -1079,6 +1090,7 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__vgic_v3_save_vmcr_aprs),
HANDLE_FUNC(__vgic_v3_restore_vmcr_aprs),
HANDLE_FUNC(__pkvm_init_shadow),
+ HANDLE_FUNC(__pkvm_init_shadow_vcpu),
HANDLE_FUNC(__pkvm_teardown_shadow),
HANDLE_FUNC(__pkvm_vcpu_load),
HANDLE_FUNC(__pkvm_vcpu_put),
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index b9e6337852dd..d43ccfd72e04 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -276,9 +276,9 @@ struct kvm_vcpu *get_shadow_vcpu(int shadow_handle, unsigned int vcpu_idx)
hyp_spin_lock(&shadow_lock);
vm = find_shadow_by_handle(shadow_handle);
- if (!vm || vm->created_vcpus <= vcpu_idx)
+ if (!vm || vm->nr_vcpus <= vcpu_idx)
goto unlock;
- vcpu = &vm->shadow_vcpus[vcpu_idx].vcpu;
+ vcpu = &vm->shadow_vcpus[vcpu_idx]->vcpu;
/* Ensure vcpu isn't loaded on more than one cpu simultaneously. */
if (unlikely(vcpu->arch.pkvm.loaded_on_cpu)) {
@@ -370,48 +370,28 @@ static int copy_features(struct kvm_vcpu *shadow_vcpu, struct kvm_vcpu *host_vcp
return 0;
}
-static void unpin_host_vcpus(struct shadow_vcpu_state *shadow_vcpus, int nr_vcpus)
+static void unpin_host_vcpu(struct shadow_vcpu_state *shadow_vcpu)
{
- int i;
+ struct kvm_vcpu *host_vcpu = shadow_vcpu->vcpu.arch.pkvm.host_vcpu;
+ size_t sve_state_size;
+ void *sve_state = shadow_vcpu->vcpu.arch.sve_state;
- for (i = 0; i < nr_vcpus; i++) {
- struct kvm_vcpu *host_vcpu = shadow_vcpus[i].vcpu.arch.pkvm.host_vcpu;
- struct kvm_vcpu *shadow_vcpu = &shadow_vcpus[i].vcpu;
- size_t sve_state_size;
- void *sve_state;
+ hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
- hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
+ if (!sve_state)
+ return;
- if (!test_bit(KVM_ARM_VCPU_SVE, shadow_vcpu->arch.features))
- continue;
-
- sve_state = shadow_vcpu->arch.sve_state;
- sve_state = kern_hyp_va(sve_state);
- sve_state_size = vcpu_sve_state_size(shadow_vcpu);
- hyp_unpin_shared_mem(sve_state, sve_state + sve_state_size);
- }
+ sve_state = kern_hyp_va(sve_state);
+ sve_state_size = vcpu_sve_state_size(&shadow_vcpu->vcpu);
+ hyp_unpin_shared_mem(sve_state, sve_state + sve_state_size);
}
-static int set_host_vcpus(struct shadow_vcpu_state *shadow_vcpus, int nr_vcpus,
- struct kvm_vcpu **vcpu_array, size_t vcpu_array_size)
+static void unpin_host_vcpus(struct shadow_vcpu_state *shadow_vcpus[], int nr_vcpus)
{
int i;
- if (vcpu_array_size < sizeof(*vcpu_array) * nr_vcpus)
- return -EINVAL;
-
- for (i = 0; i < nr_vcpus; i++) {
- struct kvm_vcpu *host_vcpu = kern_hyp_va(vcpu_array[i]);
-
- if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1)) {
- unpin_host_vcpus(shadow_vcpus, i);
- return -EBUSY;
- }
-
- shadow_vcpus[i].vcpu.arch.pkvm.host_vcpu = host_vcpu;
- }
-
- return 0;
+ for (i = 0; i < nr_vcpus; i++)
+ unpin_host_vcpu(shadow_vcpus[i]);
}
static int init_ptrauth(struct kvm_vcpu *shadow_vcpu)
@@ -423,86 +403,106 @@ static int init_ptrauth(struct kvm_vcpu *shadow_vcpu)
return ret;
}
-static int init_shadow_structs(struct kvm *kvm, struct kvm_shadow_vm *vm,
- struct kvm_vcpu **vcpu_array, int nr_vcpus)
+static void init_shadow_vm(struct kvm *kvm, struct kvm_shadow_vm *vm,
+ int nr_vcpus)
{
- int i;
- int ret;
-
vm->host_kvm = kvm;
vm->created_vcpus = nr_vcpus;
vm->arch.pkvm.pvmfw_load_addr = kvm->arch.pkvm.pvmfw_load_addr;
vm->arch.pkvm.enabled = READ_ONCE(kvm->arch.pkvm.enabled);
+}
- for (i = 0; i < nr_vcpus; i++) {
- struct shadow_vcpu_state *shadow_state = &vm->shadow_vcpus[i];
- struct kvm_vcpu *shadow_vcpu = &shadow_state->vcpu;
- struct kvm_vcpu *host_vcpu = shadow_vcpu->arch.pkvm.host_vcpu;
-
- shadow_vcpu->kvm = kvm;
- shadow_vcpu->vcpu_id = host_vcpu->vcpu_id;
- shadow_vcpu->vcpu_idx = i;
-
- ret = copy_features(shadow_vcpu, host_vcpu);
- if (ret)
- return ret;
-
- ret = init_ptrauth(shadow_vcpu);
- if (ret)
- return ret;
-
- if (test_bit(KVM_ARM_VCPU_SVE, shadow_vcpu->arch.features)) {
- size_t sve_state_size;
- void *sve_state;
-
- shadow_vcpu->arch.sve_state = READ_ONCE(host_vcpu->arch.sve_state);
- shadow_vcpu->arch.sve_max_vl = READ_ONCE(host_vcpu->arch.sve_max_vl);
-
- sve_state = kern_hyp_va(shadow_vcpu->arch.sve_state);
- sve_state_size = vcpu_sve_state_size(shadow_vcpu);
-
- if (!shadow_vcpu->arch.sve_state || !sve_state_size ||
- hyp_pin_shared_mem(sve_state,
- sve_state + sve_state_size)) {
- clear_bit(KVM_ARM_VCPU_SVE,
- shadow_vcpu->arch.features);
- shadow_vcpu->arch.sve_state = NULL;
- shadow_vcpu->arch.sve_max_vl = 0;
- return -EINVAL;
- }
- }
+static int init_shadow_vcpu(struct shadow_vcpu_state *shadow_state,
+ struct kvm_vcpu *host_vcpu,
+ struct kvm_shadow_vm *vm, int vcpu_idx)
+{
+ struct kvm_vcpu *shadow_vcpu = &shadow_state->vcpu;
+ int ret;
- if (vm->arch.pkvm.enabled)
- pkvm_vcpu_init_traps(shadow_vcpu);
- kvm_reset_pvm_sys_regs(shadow_vcpu);
+ host_vcpu = kern_hyp_va(host_vcpu);
+ if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1))
+ return -EBUSY;
- vm->vcpus[i] = shadow_vcpu;
- shadow_state->vm = vm;
+ if (host_vcpu->vcpu_idx != vcpu_idx) {
+ ret = -EINVAL;
+ goto done;
+ }
- shadow_vcpu->arch.hw_mmu = &vm->arch.mmu;
- shadow_vcpu->arch.pkvm.shadow_vm = vm;
- shadow_vcpu->arch.power_off = true;
+ shadow_vcpu->arch.pkvm.host_vcpu = host_vcpu;
+ shadow_vcpu->kvm = vm->host_kvm;
+ shadow_vcpu->vcpu_id = host_vcpu->vcpu_id;
+ shadow_vcpu->vcpu_idx = vcpu_idx;
- if (test_bit(KVM_ARM_VCPU_POWER_OFF, shadow_vcpu->arch.features)) {
- shadow_vcpu->arch.pkvm.power_state = PSCI_0_2_AFFINITY_LEVEL_OFF;
- } else if (pvm_has_pvmfw(vm)) {
- if (vm->pvmfw_entry_vcpu)
- return -EINVAL;
+ ret = copy_features(shadow_vcpu, host_vcpu);
+ if (ret)
+ goto done;
- vm->pvmfw_entry_vcpu = shadow_vcpu;
- shadow_vcpu->arch.reset_state.reset = true;
- shadow_vcpu->arch.pkvm.power_state = PSCI_0_2_AFFINITY_LEVEL_ON_PENDING;
- } else {
- struct vcpu_reset_state *reset_state = &shadow_vcpu->arch.reset_state;
+ ret = init_ptrauth(shadow_vcpu);
+ if (ret)
+ goto done;
+
+ if (test_bit(KVM_ARM_VCPU_SVE, shadow_vcpu->arch.features)) {
+ size_t sve_state_size;
+ void *sve_state;
+
+ shadow_vcpu->arch.sve_state =
+ READ_ONCE(host_vcpu->arch.sve_state);
+ shadow_vcpu->arch.sve_max_vl =
+ READ_ONCE(host_vcpu->arch.sve_max_vl);
+
+ sve_state = kern_hyp_va(shadow_vcpu->arch.sve_state);
+ sve_state_size = vcpu_sve_state_size(shadow_vcpu);
- reset_state->pc = *vcpu_pc(host_vcpu);
- reset_state->r0 = vcpu_get_reg(host_vcpu, 0);
- reset_state->reset = true;
- shadow_vcpu->arch.pkvm.power_state = PSCI_0_2_AFFINITY_LEVEL_ON_PENDING;
+ if (!shadow_vcpu->arch.sve_state || !sve_state_size ||
+ hyp_pin_shared_mem(sve_state, sve_state + sve_state_size)) {
+ clear_bit(KVM_ARM_VCPU_SVE, shadow_vcpu->arch.features);
+ shadow_vcpu->arch.sve_state = NULL;
+ shadow_vcpu->arch.sve_max_vl = 0;
+ ret = -EINVAL;
+ goto done;
}
}
- return 0;
+ if (vm->arch.pkvm.enabled)
+ pkvm_vcpu_init_traps(shadow_vcpu);
+ kvm_reset_pvm_sys_regs(shadow_vcpu);
+
+ vm->vcpus[vcpu_idx] = shadow_vcpu;
+ shadow_state->vm = vm;
+
+ shadow_vcpu->arch.hw_mmu = &vm->arch.mmu;
+ shadow_vcpu->arch.pkvm.shadow_vm = vm;
+ shadow_vcpu->arch.power_off = true;
+
+ if (test_bit(KVM_ARM_VCPU_POWER_OFF, shadow_vcpu->arch.features)) {
+ shadow_vcpu->arch.pkvm.power_state =
+ PSCI_0_2_AFFINITY_LEVEL_OFF;
+ } else if (pvm_has_pvmfw(vm)) {
+ if (vm->pvmfw_entry_vcpu) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ vm->pvmfw_entry_vcpu = shadow_vcpu;
+ shadow_vcpu->arch.reset_state.reset = true;
+ shadow_vcpu->arch.pkvm.power_state =
+ PSCI_0_2_AFFINITY_LEVEL_ON_PENDING;
+ } else {
+ struct vcpu_reset_state *reset_state =
+ &shadow_vcpu->arch.reset_state;
+
+ reset_state->pc = *vcpu_pc(host_vcpu);
+ reset_state->r0 = vcpu_get_reg(host_vcpu, 0);
+ reset_state->reset = true;
+ shadow_vcpu->arch.pkvm.power_state =
+ PSCI_0_2_AFFINITY_LEVEL_ON_PENDING;
+ }
+
+done:
+ if (ret)
+ unpin_host_vcpu(shadow_state);
+
+ return ret;
}
static bool __exists_shadow(struct kvm *host_kvm)
@@ -591,7 +591,7 @@ static size_t pkvm_get_shadow_size(int num_vcpus)
{
/* Shadow space for the vm struct and all of its vcpu states. */
return sizeof(struct kvm_shadow_vm) +
- sizeof(struct shadow_vcpu_state) * num_vcpus;
+ sizeof(struct shadow_vcpu_state *) * num_vcpus;
}
/*
@@ -613,14 +613,14 @@ static int check_shadow_size(int nr_vcpus, size_t shadow_size)
return 0;
}
-static void drain_shadow_vcpus(struct shadow_vcpu_state *shadow_vcpus,
+static void drain_shadow_vcpus(struct shadow_vcpu_state *shadow_vcpus[],
unsigned int nr_vcpus,
struct kvm_hyp_memcache *mc)
{
int i;
for (i = 0; i < nr_vcpus; i++) {
- struct kvm_vcpu *shadow_vcpu = &shadow_vcpus[i].vcpu;
+ struct kvm_vcpu *shadow_vcpu = &shadow_vcpus[i]->vcpu;
struct kvm_hyp_memcache *vcpu_mc = &shadow_vcpu->arch.pkvm_memcache;
void *addr;
@@ -645,8 +645,6 @@ static void drain_shadow_vcpus(struct shadow_vcpu_state *shadow_vcpus,
* Must be a multiple of the page size.
* pgd: The host va of the area being donated for the stage-2 PGD for the VM.
* Must be page aligned. Its size is implied by the VM's VTCR.
- * Note: An array to the host KVM VCPUs (host VA) is passed via the pgd, as to
- * not to be dependent on how the VCPU's are layed out in struct kvm.
*
* Return a unique handle to the protected VM on success,
* negative error code on failure.
@@ -698,19 +696,13 @@ int __pkvm_init_shadow(struct kvm *kvm,
if (ret)
goto err_remove_mappings;
- ret = set_host_vcpus(vm->shadow_vcpus, nr_vcpus, pgd, pgd_size);
- if (ret)
- goto err_remove_pgd;
-
- ret = init_shadow_structs(kvm, vm, pgd, nr_vcpus);
- if (ret < 0)
- goto err_unpin_host_vcpus;
+ init_shadow_vm(kvm, vm, nr_vcpus);
/* Add the entry to the shadow table. */
hyp_spin_lock(&shadow_lock);
ret = insert_shadow_table(kvm, vm, shadow_size);
if (ret < 0)
- goto err_unlock_unpin_host_vcpus;
+ goto err_unlock;
ret = kvm_guest_prepare_stage2(vm, pgd);
if (ret)
@@ -721,34 +713,105 @@ int __pkvm_init_shadow(struct kvm *kvm,
err_remove_shadow_table:
remove_shadow_table(vm->shadow_handle);
-err_unlock_unpin_host_vcpus:
+err_unlock:
hyp_spin_unlock(&shadow_lock);
-err_unpin_host_vcpus:
- unpin_host_vcpus(vm->shadow_vcpus, nr_vcpus);
-err_remove_pgd:
WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(pgd), nr_pgd_pages));
-
err_remove_mappings:
/* Clear the donated shadow memory on failure to avoid data leaks. */
memset(vm, 0, shadow_size);
WARN_ON(__pkvm_hyp_donate_host(hyp_phys_to_pfn(shadow_pa),
shadow_size >> PAGE_SHIFT));
-
err:
hyp_unpin_shared_mem(kvm, kvm + 1);
return ret;
}
+/*
+ * Initialize the protected vcpu state shadow copy in host-donated memory.
+ *
+ * shadow_handle: The handle for the protected vm.
+ * host_vcpu: A pointer to the corresponding host vcpu (host va).
+ * shadow_vcpu_hva: The host va of the area being donated for the vcpu state.
+ * Must be page aligned. The size of the area must be equal to
+ * the paged-aligned size of kvm_shadow_vcpu_state.
+ *
+ * Return 0 on success, negative error code on failure.
+ */
+int __pkvm_init_shadow_vcpu(unsigned int shadow_handle,
+ struct kvm_vcpu *host_vcpu,
+ void *shadow_vcpu_hva)
+{
+ struct kvm_shadow_vm *vm;
+ struct shadow_vcpu_state *shadow_state = kern_hyp_va(shadow_vcpu_hva);
+ size_t vcpu_state_sz = sizeof(*shadow_state);
+ u64 nr_pages = PAGE_ALIGN(vcpu_state_sz) >> PAGE_SHIFT;
+ unsigned int idx;
+ int ret;
+
+ if (!PAGE_ALIGNED(shadow_vcpu_hva))
+ return -EINVAL;
+
+ ret = __pkvm_host_donate_hyp(hyp_virt_to_pfn(shadow_state),
+ nr_pages);
+ if (ret)
+ return ret;
+
+ memset(shadow_state, 0, vcpu_state_sz);
+
+ hyp_spin_lock(&shadow_lock);
+
+ vm = find_shadow_by_handle(shadow_handle);
+ if (!vm) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ idx = vm->nr_vcpus;
+ if (idx >= vm->created_vcpus) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ret = init_shadow_vcpu(shadow_state, host_vcpu, vm, idx);
+ if (ret)
+ goto unlock;
+
+ vm->shadow_vcpus[idx] = shadow_state;
+ vm->nr_vcpus++;
+unlock:
+ hyp_spin_unlock(&shadow_lock);
+
+ if (ret) {
+ memset(shadow_state, 0, vcpu_state_sz);
+ WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(shadow_state),
+ nr_pages));
+ }
+
+ return ret;
+}
+
+static void teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr,
+ size_t size)
+{
+ u64 pfn = hyp_phys_to_pfn(__hyp_pa(addr));
+ u64 nr_pages = size >> PAGE_SHIFT;
+ void *start;
+
+ memset(addr, 0, size);
+
+ for (start = addr; start < addr + size; start += PAGE_SIZE)
+ push_hyp_memcache(mc, start, hyp_virt_to_phys);
+
+ WARN_ON(__pkvm_hyp_donate_host(pfn, nr_pages));
+}
+
int __pkvm_teardown_shadow(int shadow_handle)
{
struct kvm_hyp_memcache *mc;
struct kvm_shadow_vm *vm;
struct kvm *host_kvm;
- size_t shadow_size;
+ unsigned int nr_vcpus;
int err;
- u64 pfn;
- u64 nr_pages;
- void *addr;
int i;
/* Lookup then remove entry from the shadow table. */
@@ -764,6 +827,9 @@ int __pkvm_teardown_shadow(int shadow_handle)
goto err_unlock;
}
+ host_kvm = vm->host_kvm;
+ nr_vcpus = vm->nr_vcpus;
+
/*
* Clear the tracking for last_loaded_vcpu for all cpus for this vm in
* case the same addresses for those vcpus are reused for future vms.
@@ -783,22 +849,17 @@ int __pkvm_teardown_shadow(int shadow_handle)
hyp_spin_unlock(&shadow_lock);
/* Reclaim guest pages, and page-table pages */
- mc = &vm->host_kvm->arch.pkvm.teardown_mc;
+ mc = &host_kvm->arch.pkvm.teardown_mc;
reclaim_guest_pages(vm, mc);
- drain_shadow_vcpus(vm->shadow_vcpus, vm->created_vcpus, mc);
- unpin_host_vcpus(vm->shadow_vcpus, vm->created_vcpus);
+ drain_shadow_vcpus(vm->shadow_vcpus, nr_vcpus, mc);
+ unpin_host_vcpus(vm->shadow_vcpus, nr_vcpus);
- /* Push the metadata pages to the teardown memcache */
- shadow_size = vm->shadow_area_size;
- host_kvm = vm->host_kvm;
- memset(vm, 0, shadow_size);
- for (addr = vm; addr < ((void *)vm + shadow_size); addr += PAGE_SIZE)
- push_hyp_memcache(mc, addr, hyp_virt_to_phys);
- hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
+ for (i = 0; i < nr_vcpus; i++)
+ teardown_donated_memory(mc, vm->shadow_vcpus[i],
+ PAGE_ALIGN(sizeof(vm->shadow_vcpus[i])));
+ teardown_donated_memory(mc, vm, vm->shadow_area_size);
- pfn = hyp_phys_to_pfn(__hyp_pa(vm));
- nr_pages = shadow_size >> PAGE_SHIFT;
- WARN_ON(__pkvm_hyp_donate_host(pfn, nr_pages));
+ hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
return 0;
err_unlock:
@@ -919,7 +980,7 @@ struct kvm_vcpu *pvm_mpidr_to_vcpu(struct kvm_shadow_vm *vm, unsigned long mpidr
mpidr &= MPIDR_HWID_BITMASK;
- for (i = 0; i < vm->created_vcpus; i++) {
+ for (i = 0; i < READ_ONCE(vm->nr_vcpus); i++) {
vcpu = vm->vcpus[i];
if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
@@ -1036,7 +1097,7 @@ static bool pvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
* then if at least one is PENDING_ON then return PENDING_ON.
* Otherwise, return OFF.
*/
- for (i = 0; i < vm->created_vcpus; i++) {
+ for (i = 0; i < READ_ONCE(vm->nr_vcpus); i++) {
tmp = vm->vcpus[i];
mpidr = kvm_vcpu_get_mpidr_aff(tmp);
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
index f7f6b08dd371..e335c9b2c00a 100644
--- a/arch/arm64/kvm/pkvm.c
+++ b/arch/arm64/kvm/pkvm.c
@@ -120,8 +120,8 @@ void __init kvm_hyp_reserve(void)
*/
static int __create_el2_shadow(struct kvm *kvm)
{
- struct kvm_vcpu *vcpu, **vcpu_array;
- size_t pgd_sz, shadow_sz;
+ struct kvm_vcpu *vcpu;
+ size_t pgd_sz, shadow_sz, vcpu_state_sz;
void *pgd, *shadow_addr;
unsigned long idx;
int shadow_handle;
@@ -140,21 +140,15 @@ static int __create_el2_shadow(struct kvm *kvm)
if (!pgd)
return -ENOMEM;
- /* Allocate memory to donate to hyp for the kvm and vcpu state. */
+ /* Allocate memory to donate to hyp for the kvm and vcpu state pointers. */
shadow_sz = PAGE_ALIGN(KVM_SHADOW_VM_SIZE +
- SHADOW_VCPU_STATE_SIZE * kvm->created_vcpus);
+ sizeof(void *) * kvm->created_vcpus);
shadow_addr = alloc_pages_exact(shadow_sz, GFP_KERNEL_ACCOUNT);
if (!shadow_addr) {
ret = -ENOMEM;
goto free_pgd;
}
- /* Stash the vcpu pointers into the PGD */
- BUILD_BUG_ON(KVM_MAX_VCPUS > (PAGE_SIZE / sizeof(u64)));
- vcpu_array = pgd;
- kvm_for_each_vcpu(idx, vcpu, kvm)
- vcpu_array[idx] = vcpu;
-
/* Donate the shadow memory to hyp and let hyp initialize it. */
ret = kvm_call_hyp_nvhe(__pkvm_init_shadow, kvm, shadow_addr, shadow_sz,
pgd);
@@ -166,8 +160,35 @@ static int __create_el2_shadow(struct kvm *kvm)
/* Store the shadow handle given by hyp for future call reference. */
kvm->arch.pkvm.shadow_handle = shadow_handle;
+ /* Donate memory for the vcpu state at hyp and initialize it. */
+ vcpu_state_sz = PAGE_ALIGN(SHADOW_VCPU_STATE_SIZE);
+ kvm_for_each_vcpu (idx, vcpu, kvm) {
+ void *vcpu_state;
+
+ /* Indexing of the vcpus to be sequential starting at 0. */
+ if (WARN_ON(vcpu->vcpu_idx != idx)) {
+ ret = -EINVAL;
+ goto destroy_vm;
+ }
+ vcpu_state = alloc_pages_exact(vcpu_state_sz, GFP_KERNEL_ACCOUNT);
+ if (!vcpu_state) {
+ ret = -ENOMEM;
+ goto destroy_vm;
+ }
+
+ ret = kvm_call_hyp_nvhe(__pkvm_init_shadow_vcpu, shadow_handle,
+ vcpu, vcpu_state);
+ if (ret) {
+ free_pages_exact(vcpu_state, vcpu_state_sz);
+ goto destroy_vm;
+ }
+ }
+
return 0;
+destroy_vm:
+ kvm_shadow_destroy(kvm);
+ return ret;
free_shadow:
free_pages_exact(shadow_addr, shadow_sz);
free_pgd:
diff --git a/build.config.gs101 b/build.config.gs101
index fed552e45171..c2f079abd5a9 100644
--- a/build.config.gs101
+++ b/build.config.gs101
@@ -43,6 +43,7 @@ android/abi_gki_aarch64_fips140
android/abi_gki_aarch64_virtual_device
android/abi_gki_aarch64_db845c
android/abi_gki_aarch64_hikey960
+android/abi_gki_aarch64_rockchip
"
TRIM_NONLISTED_KMI=${TRIM_NONLISTED_KMI:-1}
KMI_SYMBOL_LIST_ADD_ONLY=1
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 0de98abd7282..b93695d28e43 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -623,6 +623,8 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
goto fail;
}
f2fs_delete_entry(de, page, dir, inode);
+ f2fs_unlock_op(sbi);
+
#ifdef CONFIG_UNICODE
/* VFS negative dentries are incompatible with Encoding and
* Case-insensitiveness. Eventually we'll want avoid
@@ -633,8 +635,6 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
if (IS_CASEFOLDED(dir))
d_invalidate(dentry);
#endif
- f2fs_unlock_op(sbi);
-
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
fail:
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index f753c307b8b3..181aac4b5187 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -8,6 +8,9 @@
#include
#include
#include
+#if defined(CONFIG_ARM64) && !defined(__GENKSYMS__)
+#include
+#endif
#ifdef CONFIG_FREEZER
extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
@@ -108,10 +111,15 @@ static inline bool cgroup_freezing(struct task_struct *task)
* The caller shouldn't do anything which isn't allowed for a frozen task
* until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
* wrap a scheduling operation and nothing much else.
+ *
+ * The write to current->flags uses release semantics to prevent a concurrent
+ * freezer_should_skip() from observing this write before a write to on_rq
+ * during a prior call to activate_task(), which may cause it to return true
+ * before deactivate_task() is called.
*/
static inline void freezer_do_not_count(void)
{
- current->flags |= PF_FREEZER_SKIP;
+ smp_store_release(¤t->flags, current->flags | PF_FREEZER_SKIP);
}
/**
@@ -161,7 +169,19 @@ static inline bool freezer_should_skip(struct task_struct *p)
* clearing %PF_FREEZER_SKIP.
*/
smp_mb();
+#ifdef CONFIG_ARM64
+ return (p->flags & PF_FREEZER_SKIP) &&
+ (!p->on_rq || task_cpu_possible_mask(p) == cpu_possible_mask);
+#else
+ /*
+ * On non-aarch64, avoid depending on task_cpu_possible_mask(), which is
+ * defined in , because including that header from
+ * here exposes a tricky bug in the tracepoint headers on x86, and that
+ * macro would end up being defined equal to cpu_possible_mask on other
+ * architectures anyway.
+ */
return p->flags & PF_FREEZER_SKIP;
+#endif
}
/*
diff --git a/include/trace/hooks/mm.h b/include/trace/hooks/mm.h
index 236aaeeff274..785ec3da0aa4 100644
--- a/include/trace/hooks/mm.h
+++ b/include/trace/hooks/mm.h
@@ -110,14 +110,14 @@ DECLARE_HOOK(android_vh_reclaim_pages_plug,
TP_PROTO(bool *do_plug),
TP_ARGS(do_plug));
DECLARE_HOOK(android_vh_zap_pte_range_tlb_start,
- TP_PROTO(void *unused),
- TP_ARGS(unused));
+ TP_PROTO(void *ret),
+ TP_ARGS(ret));
DECLARE_HOOK(android_vh_zap_pte_range_tlb_force_flush,
TP_PROTO(struct page *page, bool *flush),
TP_ARGS(page, flush));
DECLARE_HOOK(android_vh_zap_pte_range_tlb_end,
- TP_PROTO(void *unused),
- TP_ARGS(unused));
+ TP_PROTO(void *ret),
+ TP_ARGS(ret));
DECLARE_HOOK(android_vh_skip_lru_disable,
TP_PROTO(bool *skip),
TP_ARGS(skip));
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index 533d81ed74d4..cd4a81114f75 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -83,7 +83,7 @@ static void __init handle_initrd(void)
* In case that a resume from disk is carried out by linuxrc or one of
* its children, we need to tell the freezer not to wait for us.
*/
- current->flags |= PF_FREEZER_SKIP;
+ freezer_do_not_count();
info = call_usermodehelper_setup("/linuxrc", argv, envp_init,
GFP_KERNEL, init_linuxrc, NULL, NULL);
diff --git a/kernel/power/main.c b/kernel/power/main.c
index d6140ed15d0b..f985c3ccbf2f 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -23,7 +23,7 @@
void lock_system_sleep(void)
{
- current->flags |= PF_FREEZER_SKIP;
+ freezer_do_not_count();
mutex_lock(&system_transition_mutex);
}
EXPORT_SYMBOL_GPL(lock_system_sleep);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f805a67f877e..5000f595fac0 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4617,23 +4617,6 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
BUG();
}
-static bool __task_can_run(struct task_struct *prev)
-{
- if (__fatal_signal_pending(prev))
- return true;
-
- if (!frozen_or_skipped(prev))
- return true;
-
- /*
- * We can't safely go back on the runqueue if we're an asymmetric
- * task skipping the freezer. Doing so can lead to migration failures
- * later on if there aren't any suitable CPUs left around for us to
- * move to.
- */
- return task_cpu_possible_mask(prev) == cpu_possible_mask;
-}
-
/*
* __schedule() is the main scheduler function.
*
@@ -4727,7 +4710,7 @@ static void __sched notrace __schedule(bool preempt)
*/
prev_state = prev->state;
if (!preempt && prev_state) {
- if (signal_pending_state(prev_state, prev) && __task_can_run(prev)) {
+ if (signal_pending_state(prev_state, prev)) {
prev->state = TASK_RUNNING;
} else {
prev->sched_contributes_to_load =
diff --git a/mm/memory.c b/mm/memory.c
index f0fa06835537..83b715ed6577 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1230,10 +1230,11 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
pte_t *start_pte;
pte_t *pte;
swp_entry_t entry;
+ int v_ret = 0;
tlb_change_page_size(tlb, PAGE_SIZE);
again:
- trace_android_vh_zap_pte_range_tlb_start(NULL);
+ trace_android_vh_zap_pte_range_tlb_start(&v_ret);
init_rss_vec(rss);
start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
pte = start_pte;
@@ -1349,7 +1350,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
tlb_flush_mmu(tlb);
}
- trace_android_vh_zap_pte_range_tlb_end(NULL);
+ trace_android_vh_zap_pte_range_tlb_end(&v_ret);
if (addr != end) {
cond_resched();
goto again;