mirror of
https://github.com/LuckfoxTECH/luckfox-pico.git
synced 2026-01-19 09:52:31 +01:00
project:build.sh: Added fastboot support; custom modifications to U-Boot and kernel implemented using patches.
project:cfg:BoardConfig_IPC: Added fastboot BoardConfig file and firmware post-scripts, distinguishing between the BoardConfigs for Luckfox Pico Pro and Luckfox Pico Max. project:app: Added fastboot_client and rk_smart_door for quick boot applications; updated rkipc app to adapt to the latest media library. media:samples: Added more usage examples. media:rockit: Fixed bugs; removed support for retrieving data frames from VPSS. media:isp: Updated rkaiq library and related tools to support connection to RKISP_Tuner. sysdrv:Makefile: Added support for compiling drv_ko on Luckfox Pico Ultra W using Ubuntu; added support for custom root filesystem. sysdrv:tools:board: Updated Buildroot optional mirror sources, updated some software versions, and stored device tree files and configuration files that undergo multiple modifications for U-Boot and kernel separately. sysdrv:source:mcu: Used RISC-V MCU SDK with RT-Thread system, mainly for initializing camera AE during quick boot. sysdrv:source:uboot: Added support for fastboot; added high baud rate DDR bin for serial firmware upgrades. sysdrv:source:kernel: Upgraded to version 5.10.160; increased NPU frequency for RV1106G3; added support for fastboot. Signed-off-by: luckfox-eng29 <eng29@luckfox.com>
This commit is contained in:
@@ -507,7 +507,6 @@ config KEXEC
|
||||
config KEXEC_FILE
|
||||
bool "kexec file based system call"
|
||||
select KEXEC_CORE
|
||||
select BUILD_BIN2C
|
||||
depends on CRYPTO
|
||||
depends on CRYPTO_SHA256
|
||||
depends on CRYPTO_SHA256_S390
|
||||
|
||||
@@ -32,6 +32,16 @@ KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member)
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
|
||||
|
||||
ifdef CONFIG_CC_IS_GCC
|
||||
ifeq ($(call cc-ifversion, -ge, 1200, y), y)
|
||||
ifeq ($(call cc-ifversion, -lt, 1300, y), y)
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds)
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, array-bounds)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
UTS_MACHINE := s390x
|
||||
STACK_SIZE := $(if $(CONFIG_KASAN),65536,16384)
|
||||
CHECKFLAGS += -D__s390__ -D__s390x__
|
||||
|
||||
@@ -91,8 +91,17 @@ SECTIONS
|
||||
_compressed_start = .;
|
||||
*(.vmlinux.bin.compressed)
|
||||
_compressed_end = .;
|
||||
FILL(0xff);
|
||||
. = ALIGN(4096);
|
||||
}
|
||||
|
||||
#define SB_TRAILER_SIZE 32
|
||||
/* Trailer needed for Secure Boot */
|
||||
. += SB_TRAILER_SIZE; /* make sure .sb.trailer does not overwrite the previous section */
|
||||
. = ALIGN(4096) - SB_TRAILER_SIZE;
|
||||
.sb.trailer : {
|
||||
QUAD(0)
|
||||
QUAD(0)
|
||||
QUAD(0)
|
||||
QUAD(0x000000207a49504c)
|
||||
}
|
||||
_end = .;
|
||||
|
||||
|
||||
@@ -701,7 +701,7 @@ static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
gw->walk_bytes_remain -= nbytes;
|
||||
scatterwalk_unmap(&gw->walk);
|
||||
scatterwalk_unmap(gw->walk_ptr);
|
||||
scatterwalk_advance(&gw->walk, nbytes);
|
||||
scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
|
||||
gw->walk_ptr = NULL;
|
||||
@@ -776,7 +776,7 @@ static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
|
||||
goto out;
|
||||
}
|
||||
|
||||
scatterwalk_unmap(&gw->walk);
|
||||
scatterwalk_unmap(gw->walk_ptr);
|
||||
gw->walk_ptr = NULL;
|
||||
|
||||
gw->ptr = gw->buf;
|
||||
|
||||
@@ -2,126 +2,17 @@
|
||||
/*
|
||||
* s390 arch random implementation.
|
||||
*
|
||||
* Copyright IBM Corp. 2017, 2018
|
||||
* Copyright IBM Corp. 2017, 2020
|
||||
* Author(s): Harald Freudenberger
|
||||
*
|
||||
* The s390_arch_random_generate() function may be called from random.c
|
||||
* in interrupt context. So this implementation does the best to be very
|
||||
* fast. There is a buffer of random data which is asynchronously checked
|
||||
* and filled by a workqueue thread.
|
||||
* If there are enough bytes in the buffer the s390_arch_random_generate()
|
||||
* just delivers these bytes. Otherwise false is returned until the
|
||||
* worker thread refills the buffer.
|
||||
* The worker fills the rng buffer by pulling fresh entropy from the
|
||||
* high quality (but slow) true hardware random generator. This entropy
|
||||
* is then spread over the buffer with an pseudo random generator PRNG.
|
||||
* As the arch_get_random_seed_long() fetches 8 bytes and the calling
|
||||
* function add_interrupt_randomness() counts this as 1 bit entropy the
|
||||
* distribution needs to make sure there is in fact 1 bit entropy contained
|
||||
* in 8 bytes of the buffer. The current values pull 32 byte entropy
|
||||
* and scatter this into a 2048 byte buffer. So 8 byte in the buffer
|
||||
* will contain 1 bit of entropy.
|
||||
* The worker thread is rescheduled based on the charge level of the
|
||||
* buffer but at least with 500 ms delay to avoid too much CPU consumption.
|
||||
* So the max. amount of rng data delivered via arch_get_random_seed is
|
||||
* limited to 4k bytes per second.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/static_key.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <asm/cpacf.h>
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(s390_arch_random_available);
|
||||
|
||||
atomic64_t s390_arch_random_counter = ATOMIC64_INIT(0);
|
||||
EXPORT_SYMBOL(s390_arch_random_counter);
|
||||
|
||||
#define ARCH_REFILL_TICKS (HZ/2)
|
||||
#define ARCH_PRNG_SEED_SIZE 32
|
||||
#define ARCH_RNG_BUF_SIZE 2048
|
||||
|
||||
static DEFINE_SPINLOCK(arch_rng_lock);
|
||||
static u8 *arch_rng_buf;
|
||||
static unsigned int arch_rng_buf_idx;
|
||||
|
||||
static void arch_rng_refill_buffer(struct work_struct *);
|
||||
static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
|
||||
|
||||
bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
|
||||
{
|
||||
/* max hunk is ARCH_RNG_BUF_SIZE */
|
||||
if (nbytes > ARCH_RNG_BUF_SIZE)
|
||||
return false;
|
||||
|
||||
/* lock rng buffer */
|
||||
if (!spin_trylock(&arch_rng_lock))
|
||||
return false;
|
||||
|
||||
/* try to resolve the requested amount of bytes from the buffer */
|
||||
arch_rng_buf_idx -= nbytes;
|
||||
if (arch_rng_buf_idx < ARCH_RNG_BUF_SIZE) {
|
||||
memcpy(buf, arch_rng_buf + arch_rng_buf_idx, nbytes);
|
||||
atomic64_add(nbytes, &s390_arch_random_counter);
|
||||
spin_unlock(&arch_rng_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* not enough bytes in rng buffer, refill is done asynchronously */
|
||||
spin_unlock(&arch_rng_lock);
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(s390_arch_random_generate);
|
||||
|
||||
static void arch_rng_refill_buffer(struct work_struct *unused)
|
||||
{
|
||||
unsigned int delay = ARCH_REFILL_TICKS;
|
||||
|
||||
spin_lock(&arch_rng_lock);
|
||||
if (arch_rng_buf_idx > ARCH_RNG_BUF_SIZE) {
|
||||
/* buffer is exhausted and needs refill */
|
||||
u8 seed[ARCH_PRNG_SEED_SIZE];
|
||||
u8 prng_wa[240];
|
||||
/* fetch ARCH_PRNG_SEED_SIZE bytes of entropy */
|
||||
cpacf_trng(NULL, 0, seed, sizeof(seed));
|
||||
/* blow this entropy up to ARCH_RNG_BUF_SIZE with PRNG */
|
||||
memset(prng_wa, 0, sizeof(prng_wa));
|
||||
cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
|
||||
&prng_wa, NULL, 0, seed, sizeof(seed));
|
||||
cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN,
|
||||
&prng_wa, arch_rng_buf, ARCH_RNG_BUF_SIZE, NULL, 0);
|
||||
arch_rng_buf_idx = ARCH_RNG_BUF_SIZE;
|
||||
}
|
||||
delay += (ARCH_REFILL_TICKS * arch_rng_buf_idx) / ARCH_RNG_BUF_SIZE;
|
||||
spin_unlock(&arch_rng_lock);
|
||||
|
||||
/* kick next check */
|
||||
queue_delayed_work(system_long_wq, &arch_rng_work, delay);
|
||||
}
|
||||
|
||||
static int __init s390_arch_random_init(void)
|
||||
{
|
||||
/* all the needed PRNO subfunctions available ? */
|
||||
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG) &&
|
||||
cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN)) {
|
||||
|
||||
/* alloc arch random working buffer */
|
||||
arch_rng_buf = kmalloc(ARCH_RNG_BUF_SIZE, GFP_KERNEL);
|
||||
if (!arch_rng_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
/* kick worker queue job to fill the random buffer */
|
||||
queue_delayed_work(system_long_wq,
|
||||
&arch_rng_work, ARCH_REFILL_TICKS);
|
||||
|
||||
/* enable arch random to the outside world */
|
||||
static_branch_enable(&s390_arch_random_available);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(s390_arch_random_init);
|
||||
|
||||
@@ -437,7 +437,7 @@ __init int hypfs_diag_init(void)
|
||||
int rc;
|
||||
|
||||
if (diag204_probe()) {
|
||||
pr_err("The hardware system does not support hypfs\n");
|
||||
pr_info("The hardware system does not support hypfs\n");
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
|
||||
@@ -496,9 +496,9 @@ fail_hypfs_sprp_exit:
|
||||
hypfs_vm_exit();
|
||||
fail_hypfs_diag_exit:
|
||||
hypfs_diag_exit();
|
||||
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
|
||||
fail_dbfs_exit:
|
||||
hypfs_dbfs_exit();
|
||||
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
|
||||
return rc;
|
||||
}
|
||||
device_initcall(hypfs_init)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
/*
|
||||
* Kernel interface for the s390 arch_random_* functions
|
||||
*
|
||||
* Copyright IBM Corp. 2017
|
||||
* Copyright IBM Corp. 2017, 2022
|
||||
*
|
||||
* Author: Harald Freudenberger <freude@de.ibm.com>
|
||||
*
|
||||
@@ -14,13 +14,13 @@
|
||||
#ifdef CONFIG_ARCH_RANDOM
|
||||
|
||||
#include <linux/static_key.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/cpacf.h>
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(s390_arch_random_available);
|
||||
extern atomic64_t s390_arch_random_counter;
|
||||
|
||||
bool s390_arch_random_generate(u8 *buf, unsigned int nbytes);
|
||||
|
||||
static inline bool __must_check arch_get_random_long(unsigned long *v)
|
||||
{
|
||||
return false;
|
||||
@@ -33,16 +33,22 @@ static inline bool __must_check arch_get_random_int(unsigned int *v)
|
||||
|
||||
static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
|
||||
{
|
||||
if (static_branch_likely(&s390_arch_random_available)) {
|
||||
return s390_arch_random_generate((u8 *)v, sizeof(*v));
|
||||
if (static_branch_likely(&s390_arch_random_available) &&
|
||||
in_task()) {
|
||||
cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
|
||||
atomic64_add(sizeof(*v), &s390_arch_random_counter);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
|
||||
{
|
||||
if (static_branch_likely(&s390_arch_random_available)) {
|
||||
return s390_arch_random_generate((u8 *)v, sizeof(*v));
|
||||
if (static_branch_likely(&s390_arch_random_available) &&
|
||||
in_task()) {
|
||||
cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
|
||||
atomic64_add(sizeof(*v), &s390_arch_random_counter);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -16,7 +16,8 @@
|
||||
"3: jl 1b\n" \
|
||||
" lhi %0,0\n" \
|
||||
"4: sacf 768\n" \
|
||||
EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
|
||||
EX_TABLE(0b,4b) EX_TABLE(1b,4b) \
|
||||
EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
|
||||
: "=d" (ret), "=&d" (oldval), "=&d" (newval), \
|
||||
"=m" (*uaddr) \
|
||||
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
|
||||
|
||||
@@ -148,4 +148,6 @@ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
|
||||
unsigned long gaddr, unsigned long vmaddr);
|
||||
int gmap_mark_unmergeable(void);
|
||||
void s390_reset_acc(struct mm_struct *mm);
|
||||
void s390_unlist_old_asce(struct gmap *gmap);
|
||||
int s390_replace_asce(struct gmap *gmap);
|
||||
#endif /* _ASM_S390_GMAP_H */
|
||||
|
||||
@@ -28,9 +28,11 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
static inline int prepare_hugepage_range(struct file *file,
|
||||
unsigned long addr, unsigned long len)
|
||||
{
|
||||
if (len & ~HPAGE_MASK)
|
||||
struct hstate *h = hstate_file(file);
|
||||
|
||||
if (len & ~huge_page_mask(h))
|
||||
return -EINVAL;
|
||||
if (addr & ~HPAGE_MASK)
|
||||
if (addr & ~huge_page_mask(h))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -9,6 +9,8 @@
|
||||
#ifndef _S390_KEXEC_H
|
||||
#define _S390_KEXEC_H
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/setup.h>
|
||||
@@ -83,4 +85,12 @@ struct kimage_arch {
|
||||
extern const struct kexec_file_ops s390_kexec_image_ops;
|
||||
extern const struct kexec_file_ops s390_kexec_elf_ops;
|
||||
|
||||
#ifdef CONFIG_KEXEC_FILE
|
||||
struct purgatory_info;
|
||||
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
|
||||
Elf_Shdr *section,
|
||||
const Elf_Shdr *relsec,
|
||||
const Elf_Shdr *symtab);
|
||||
#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
|
||||
#endif
|
||||
#endif /*_S390_KEXEC_H */
|
||||
|
||||
@@ -46,10 +46,17 @@ static inline bool test_preempt_need_resched(void)
|
||||
|
||||
static inline void __preempt_count_add(int val)
|
||||
{
|
||||
if (__builtin_constant_p(val) && (val >= -128) && (val <= 127))
|
||||
__atomic_add_const(val, &S390_lowcore.preempt_count);
|
||||
else
|
||||
__atomic_add(val, &S390_lowcore.preempt_count);
|
||||
/*
|
||||
* With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES
|
||||
* enabled, gcc 12 fails to handle __builtin_constant_p().
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) {
|
||||
if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) {
|
||||
__atomic_add_const(val, &S390_lowcore.preempt_count);
|
||||
return;
|
||||
}
|
||||
}
|
||||
__atomic_add(val, &S390_lowcore.preempt_count);
|
||||
}
|
||||
|
||||
static inline void __preempt_count_sub(int val)
|
||||
|
||||
@@ -65,6 +65,7 @@ void arch_setup_new_exec(void);
|
||||
#define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */
|
||||
#define TIF_PATCH_PENDING 5 /* pending live patching update */
|
||||
#define TIF_PGSTE 6 /* New mm's will use 4K page tables */
|
||||
#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
|
||||
#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */
|
||||
#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
|
||||
|
||||
@@ -82,6 +83,7 @@ void arch_setup_new_exec(void);
|
||||
#define TIF_SYSCALL_TRACEPOINT 27 /* syscall tracepoint instrumentation */
|
||||
|
||||
#define _TIF_NOTIFY_RESUME BIT(TIF_NOTIFY_RESUME)
|
||||
#define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_SIGPENDING BIT(TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED BIT(TIF_NEED_RESCHED)
|
||||
#define _TIF_UPROBE BIT(TIF_UPROBE)
|
||||
|
||||
@@ -172,6 +172,7 @@ static inline cycles_t get_cycles(void)
|
||||
{
|
||||
return (cycles_t) get_tod_clock() >> 2;
|
||||
}
|
||||
#define get_cycles get_cycles
|
||||
|
||||
int get_phys_clock(unsigned long *clock);
|
||||
void init_cpu_timer(void);
|
||||
|
||||
@@ -150,6 +150,8 @@ int main(void)
|
||||
OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline);
|
||||
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
|
||||
OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
|
||||
OFFSET(__LC_VMCORE_INFO, lowcore, vmcore_info);
|
||||
OFFSET(__LC_OS_INFO, lowcore, os_info);
|
||||
/* hardware defined lowcore locations 0x1000 - 0x18ff */
|
||||
OFFSET(__LC_MCESAD, lowcore, mcesad);
|
||||
OFFSET(__LC_EXT_PARAMS2, lowcore, ext_params2);
|
||||
|
||||
@@ -44,7 +44,7 @@ struct save_area {
|
||||
u64 fprs[16];
|
||||
u32 fpc;
|
||||
u32 prefix;
|
||||
u64 todpreg;
|
||||
u32 todpreg;
|
||||
u64 timer;
|
||||
u64 todcmp;
|
||||
u64 vxrs_low[16];
|
||||
@@ -432,7 +432,7 @@ static void *get_vmcoreinfo_old(unsigned long *size)
|
||||
Elf64_Nhdr note;
|
||||
void *addr;
|
||||
|
||||
if (copy_oldmem_kernel(&addr, &S390_lowcore.vmcore_info, sizeof(addr)))
|
||||
if (copy_oldmem_kernel(&addr, (void *)__LC_VMCORE_INFO, sizeof(addr)))
|
||||
return NULL;
|
||||
memset(nt_name, 0, sizeof(nt_name));
|
||||
if (copy_oldmem_kernel(¬e, addr, sizeof(note)))
|
||||
|
||||
@@ -52,7 +52,8 @@ STACK_SIZE = 1 << STACK_SHIFT
|
||||
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
|
||||
|
||||
_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
|
||||
_TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING)
|
||||
_TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING | \
|
||||
_TIF_NOTIFY_SIGNAL)
|
||||
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
|
||||
_TIF_SYSCALL_TRACEPOINT)
|
||||
_CIF_WORK = (_CIF_ASCE_PRIMARY | _CIF_ASCE_SECONDARY | _CIF_FPU)
|
||||
@@ -481,8 +482,8 @@ ENTRY(system_call)
|
||||
#endif
|
||||
TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
|
||||
jo .Lsysc_syscall_restart
|
||||
TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
|
||||
jo .Lsysc_sigpending
|
||||
TSTMSK __TI_flags(%r12),(_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)
|
||||
jnz .Lsysc_sigpending
|
||||
TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
|
||||
jo .Lsysc_notify_resume
|
||||
TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
|
||||
@@ -863,8 +864,8 @@ ENTRY(io_int_handler)
|
||||
TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING
|
||||
jo .Lio_patch_pending
|
||||
#endif
|
||||
TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
|
||||
jo .Lio_sigpending
|
||||
TSTMSK __TI_flags(%r12),(_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)
|
||||
jnz .Lio_sigpending
|
||||
TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
|
||||
jo .Lio_notify_resume
|
||||
TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE
|
||||
|
||||
@@ -29,6 +29,7 @@ int s390_verify_sig(const char *kernel, unsigned long kernel_len)
|
||||
const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1;
|
||||
struct module_signature *ms;
|
||||
unsigned long sig_len;
|
||||
int ret;
|
||||
|
||||
/* Skip signature verification when not secure IPLed. */
|
||||
if (!ipl_secure_flag)
|
||||
@@ -63,11 +64,18 @@ int s390_verify_sig(const char *kernel, unsigned long kernel_len)
|
||||
return -EBADMSG;
|
||||
}
|
||||
|
||||
return verify_pkcs7_signature(kernel, kernel_len,
|
||||
kernel + kernel_len, sig_len,
|
||||
VERIFY_USE_PLATFORM_KEYRING,
|
||||
VERIFYING_MODULE_SIGNATURE,
|
||||
NULL, NULL);
|
||||
ret = verify_pkcs7_signature(kernel, kernel_len,
|
||||
kernel + kernel_len, sig_len,
|
||||
VERIFY_USE_SECONDARY_KEYRING,
|
||||
VERIFYING_MODULE_SIGNATURE,
|
||||
NULL, NULL);
|
||||
if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING))
|
||||
ret = verify_pkcs7_signature(kernel, kernel_len,
|
||||
kernel + kernel_len, sig_len,
|
||||
VERIFY_USE_PLATFORM_KEYRING,
|
||||
VERIFYING_MODULE_SIGNATURE,
|
||||
NULL, NULL);
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_KEXEC_SIG */
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include <asm/checksum.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/os_info.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
/*
|
||||
* OS info structure has to be page aligned
|
||||
@@ -123,7 +124,7 @@ static void os_info_old_init(void)
|
||||
return;
|
||||
if (!OLDMEM_BASE)
|
||||
goto fail;
|
||||
if (copy_oldmem_kernel(&addr, &S390_lowcore.os_info, sizeof(addr)))
|
||||
if (copy_oldmem_kernel(&addr, (void *)__LC_OS_INFO, sizeof(addr)))
|
||||
goto fail;
|
||||
if (addr == 0 || addr % PAGE_SIZE)
|
||||
goto fail;
|
||||
|
||||
@@ -292,6 +292,26 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Events CPU_CYLCES and INSTRUCTIONS can be submitted with two different
|
||||
* attribute::type values:
|
||||
* - PERF_TYPE_HARDWARE:
|
||||
* - pmu->type:
|
||||
* Handle both type of invocations identical. They address the same hardware.
|
||||
* The result is different when event modifiers exclude_kernel and/or
|
||||
* exclude_user are also set.
|
||||
*/
|
||||
static int cpumf_pmu_event_type(struct perf_event *event)
|
||||
{
|
||||
u64 ev = event->attr.config;
|
||||
|
||||
if (cpumf_generic_events_basic[PERF_COUNT_HW_CPU_CYCLES] == ev ||
|
||||
cpumf_generic_events_basic[PERF_COUNT_HW_INSTRUCTIONS] == ev ||
|
||||
cpumf_generic_events_user[PERF_COUNT_HW_CPU_CYCLES] == ev ||
|
||||
cpumf_generic_events_user[PERF_COUNT_HW_INSTRUCTIONS] == ev)
|
||||
return PERF_TYPE_HARDWARE;
|
||||
return PERF_TYPE_RAW;
|
||||
}
|
||||
|
||||
static int cpumf_pmu_event_init(struct perf_event *event)
|
||||
{
|
||||
unsigned int type = event->attr.type;
|
||||
@@ -301,7 +321,7 @@ static int cpumf_pmu_event_init(struct perf_event *event)
|
||||
err = __hw_perf_event_init(event, type);
|
||||
else if (event->pmu->type == type)
|
||||
/* Registered as unknown PMU */
|
||||
err = __hw_perf_event_init(event, PERF_TYPE_RAW);
|
||||
err = __hw_perf_event_init(event, cpumf_pmu_event_type(event));
|
||||
else
|
||||
return -ENOENT;
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
|
||||
if (!stack)
|
||||
return NULL;
|
||||
|
||||
return (struct kvm_s390_sie_block *) stack->empty1[0];
|
||||
return (struct kvm_s390_sie_block *)stack->empty1[1];
|
||||
}
|
||||
|
||||
static bool is_in_guest(struct pt_regs *regs)
|
||||
|
||||
@@ -77,6 +77,18 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
||||
|
||||
memcpy(dst, src, arch_task_struct_size);
|
||||
dst->thread.fpu.regs = dst->thread.fpu.fprs;
|
||||
|
||||
/*
|
||||
* Don't transfer over the runtime instrumentation or the guarded
|
||||
* storage control block pointers. These fields are cleared here instead
|
||||
* of in copy_thread() to avoid premature freeing of associated memory
|
||||
* on fork() failure. Wait to clear the RI flag because ->stack still
|
||||
* refers to the source thread.
|
||||
*/
|
||||
dst->thread.ri_cb = NULL;
|
||||
dst->thread.gs_cb = NULL;
|
||||
dst->thread.gs_bc_cb = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -115,7 +127,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
|
||||
frame->sf.gprs[9] = (unsigned long) frame;
|
||||
|
||||
/* Store access registers to kernel stack of new process. */
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
/* kernel thread */
|
||||
memset(&frame->childregs, 0, sizeof(struct pt_regs));
|
||||
frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
|
||||
@@ -134,13 +146,11 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
|
||||
frame->childregs.flags = 0;
|
||||
if (new_stackp)
|
||||
frame->childregs.gprs[15] = new_stackp;
|
||||
|
||||
/* Don't copy runtime instrumentation info */
|
||||
p->thread.ri_cb = NULL;
|
||||
/*
|
||||
* Clear the runtime instrumentation flag after the above childregs
|
||||
* copy. The CB pointer was already cleared in arch_dup_task_struct().
|
||||
*/
|
||||
frame->childregs.psw.mask &= ~PSW_MASK_RI;
|
||||
/* Don't copy guarded storage control block */
|
||||
p->thread.gs_cb = NULL;
|
||||
p->thread.gs_bc_cb = NULL;
|
||||
|
||||
/* Set a new TLS ? */
|
||||
if (clone_flags & CLONE_SETTLS) {
|
||||
|
||||
@@ -1009,6 +1009,11 @@ static void __init setup_randomness(void)
|
||||
if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
|
||||
add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
|
||||
memblock_free((unsigned long) vmms, PAGE_SIZE);
|
||||
|
||||
#ifdef CONFIG_ARCH_RANDOM
|
||||
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
|
||||
static_branch_enable(&s390_arch_random_available);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -472,7 +472,7 @@ void do_signal(struct pt_regs *regs)
|
||||
current->thread.system_call =
|
||||
test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0;
|
||||
|
||||
if (get_signal(&ksig)) {
|
||||
if (test_thread_flag(TIF_SIGPENDING) && get_signal(&ksig)) {
|
||||
/* Whee! Actually deliver the signal. */
|
||||
if (current->thread.system_call) {
|
||||
regs->int_code = current->thread.system_call;
|
||||
|
||||
@@ -122,6 +122,7 @@ SECTIONS
|
||||
/*
|
||||
* Table with the patch locations to undo expolines
|
||||
*/
|
||||
. = ALIGN(4);
|
||||
.nospec_call_table : {
|
||||
__nospec_call_start = . ;
|
||||
*(.s390_indirect*)
|
||||
|
||||
@@ -521,12 +521,27 @@ static int handle_pv_uvc(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int handle_pv_notification(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (vcpu->arch.sie_block->ipa == 0xb210)
|
||||
return handle_pv_spx(vcpu);
|
||||
if (vcpu->arch.sie_block->ipa == 0xb220)
|
||||
return handle_pv_sclp(vcpu);
|
||||
if (vcpu->arch.sie_block->ipa == 0xb9a4)
|
||||
return handle_pv_uvc(vcpu);
|
||||
if (vcpu->arch.sie_block->ipa >> 8 == 0xae) {
|
||||
/*
|
||||
* Besides external call, other SIGP orders also cause a
|
||||
* 108 (pv notify) intercept. In contrast to external call,
|
||||
* these orders need to be emulated and hence the appropriate
|
||||
* place to handle them is in handle_instruction().
|
||||
* So first try kvm_s390_handle_sigp_pei() and if that isn't
|
||||
* successful, go on with handle_instruction().
|
||||
*/
|
||||
ret = kvm_s390_handle_sigp_pei(vcpu);
|
||||
if (!ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return handle_instruction(vcpu);
|
||||
}
|
||||
|
||||
@@ -1092,6 +1092,8 @@ static int kvm_s390_vm_get_migration(struct kvm *kvm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
|
||||
|
||||
static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
{
|
||||
struct kvm_s390_vm_tod_clock gtod;
|
||||
@@ -1101,7 +1103,7 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
|
||||
if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
|
||||
return -EINVAL;
|
||||
kvm_s390_set_tod_clock(kvm, >od);
|
||||
__kvm_s390_set_tod_clock(kvm, >od);
|
||||
|
||||
VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
|
||||
gtod.epoch_idx, gtod.tod);
|
||||
@@ -1132,7 +1134,7 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
sizeof(gtod.tod)))
|
||||
return -EFAULT;
|
||||
|
||||
kvm_s390_set_tod_clock(kvm, >od);
|
||||
__kvm_s390_set_tod_clock(kvm, >od);
|
||||
VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
|
||||
return 0;
|
||||
}
|
||||
@@ -1144,6 +1146,16 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
if (attr->flags)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
/*
|
||||
* For protected guests, the TOD is managed by the ultravisor, so trying
|
||||
* to change it will never bring the expected results.
|
||||
*/
|
||||
if (kvm_s390_pv_is_protected(kvm)) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
switch (attr->attr) {
|
||||
case KVM_S390_VM_TOD_EXT:
|
||||
ret = kvm_s390_set_tod_ext(kvm, attr);
|
||||
@@ -1158,6 +1170,9 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
ret = -ENXIO;
|
||||
break;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&kvm->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -3862,14 +3877,12 @@ retry:
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_s390_set_tod_clock(struct kvm *kvm,
|
||||
const struct kvm_s390_vm_tod_clock *gtod)
|
||||
static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_s390_tod_clock_ext htod;
|
||||
int i;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
preempt_disable();
|
||||
|
||||
get_tod_clock_ext((char *)&htod);
|
||||
@@ -3890,7 +3903,15 @@ void kvm_s390_set_tod_clock(struct kvm *kvm,
|
||||
|
||||
kvm_s390_vcpu_unblock_all(kvm);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
|
||||
{
|
||||
if (!mutex_trylock(&kvm->lock))
|
||||
return 0;
|
||||
__kvm_s390_set_tod_clock(kvm, gtod);
|
||||
mutex_unlock(&kvm->lock);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -326,8 +326,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* implemented in kvm-s390.c */
|
||||
void kvm_s390_set_tod_clock(struct kvm *kvm,
|
||||
const struct kvm_s390_vm_tod_clock *gtod);
|
||||
int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
|
||||
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
|
||||
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
|
||||
@@ -102,7 +102,20 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
|
||||
VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
|
||||
kvm_s390_set_tod_clock(vcpu->kvm, >od);
|
||||
/*
|
||||
* To set the TOD clock the kvm lock must be taken, but the vcpu lock
|
||||
* is already held in handle_set_clock. The usual lock order is the
|
||||
* opposite. As SCK is deprecated and should not be used in several
|
||||
* cases, for example when the multiple epoch facility or TOD clock
|
||||
* steering facility is installed (see Principles of Operation), a
|
||||
* slow path can be used. If the lock can not be taken via try_lock,
|
||||
* the instruction will be retried via -EAGAIN at a later point in
|
||||
* time.
|
||||
*/
|
||||
if (!kvm_s390_try_set_tod_clock(vcpu->kvm, >od)) {
|
||||
kvm_s390_retry_instr(vcpu);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
kvm_s390_set_psw_cc(vcpu, 0);
|
||||
return 0;
|
||||
|
||||
@@ -163,10 +163,13 @@ int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
|
||||
atomic_set(&kvm->mm->context.is_protected, 0);
|
||||
KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc);
|
||||
WARN_ONCE(cc, "protvirt destroy vm failed rc %x rrc %x", *rc, *rrc);
|
||||
/* Inteded memory leak on "impossible" error */
|
||||
if (!cc)
|
||||
/* Intended memory leak on "impossible" error */
|
||||
if (!cc) {
|
||||
kvm_s390_pv_dealloc_vm(kvm);
|
||||
return cc ? -EIO : 0;
|
||||
return 0;
|
||||
}
|
||||
s390_replace_asce(kvm->arch.gmap);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
|
||||
|
||||
@@ -492,9 +492,9 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
|
||||
struct kvm_vcpu *dest_vcpu;
|
||||
u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
|
||||
|
||||
trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
|
||||
|
||||
if (order_code == SIGP_EXTERNAL_CALL) {
|
||||
trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
|
||||
|
||||
dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
|
||||
BUG_ON(dest_vcpu == NULL);
|
||||
|
||||
|
||||
@@ -535,8 +535,10 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
|
||||
scb_s->eca |= scb_o->eca & ECA_CEI;
|
||||
/* Epoch Extension */
|
||||
if (test_kvm_facility(vcpu->kvm, 139))
|
||||
if (test_kvm_facility(vcpu->kvm, 139)) {
|
||||
scb_s->ecd |= scb_o->ecd & ECD_MEF;
|
||||
scb_s->epdx = scb_o->epdx;
|
||||
}
|
||||
|
||||
/* etoken */
|
||||
if (test_kvm_facility(vcpu->kvm, 156))
|
||||
|
||||
@@ -429,7 +429,9 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
|
||||
flags = FAULT_FLAG_DEFAULT;
|
||||
if (user_mode(regs))
|
||||
flags |= FAULT_FLAG_USER;
|
||||
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
|
||||
if ((trans_exc_code & store_indication) == 0x400)
|
||||
access = VM_WRITE;
|
||||
if (access == VM_WRITE)
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
mmap_read_lock(mm);
|
||||
|
||||
|
||||
@@ -2596,6 +2596,18 @@ static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Give a chance to schedule after setting a key to 256 pages.
|
||||
* We only hold the mm lock, which is a rwsem and the kvm srcu.
|
||||
* Both can sleep.
|
||||
*/
|
||||
static int __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr,
|
||||
unsigned long next, struct mm_walk *walk)
|
||||
{
|
||||
cond_resched();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
|
||||
unsigned long hmask, unsigned long next,
|
||||
struct mm_walk *walk)
|
||||
@@ -2618,12 +2630,14 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
|
||||
end = start + HPAGE_SIZE - 1;
|
||||
__storage_key_init_range(start, end);
|
||||
set_bit(PG_arch_1, &page->flags);
|
||||
cond_resched();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct mm_walk_ops enable_skey_walk_ops = {
|
||||
.hugetlb_entry = __s390_enable_skey_hugetlb,
|
||||
.pte_entry = __s390_enable_skey_pte,
|
||||
.pmd_entry = __s390_enable_skey_pmd,
|
||||
};
|
||||
|
||||
int s390_enable_skey(void)
|
||||
@@ -2707,3 +2721,89 @@ void s390_reset_acc(struct mm_struct *mm)
|
||||
mmput(mm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(s390_reset_acc);
|
||||
|
||||
/**
|
||||
* s390_unlist_old_asce - Remove the topmost level of page tables from the
|
||||
* list of page tables of the gmap.
|
||||
* @gmap: the gmap whose table is to be removed
|
||||
*
|
||||
* On s390x, KVM keeps a list of all pages containing the page tables of the
|
||||
* gmap (the CRST list). This list is used at tear down time to free all
|
||||
* pages that are now not needed anymore.
|
||||
*
|
||||
* This function removes the topmost page of the tree (the one pointed to by
|
||||
* the ASCE) from the CRST list.
|
||||
*
|
||||
* This means that it will not be freed when the VM is torn down, and needs
|
||||
* to be handled separately by the caller, unless a leak is actually
|
||||
* intended. Notice that this function will only remove the page from the
|
||||
* list, the page will still be used as a top level page table (and ASCE).
|
||||
*/
|
||||
void s390_unlist_old_asce(struct gmap *gmap)
|
||||
{
|
||||
struct page *old;
|
||||
|
||||
old = virt_to_page(gmap->table);
|
||||
spin_lock(&gmap->guest_table_lock);
|
||||
list_del(&old->lru);
|
||||
/*
|
||||
* Sometimes the topmost page might need to be "removed" multiple
|
||||
* times, for example if the VM is rebooted into secure mode several
|
||||
* times concurrently, or if s390_replace_asce fails after calling
|
||||
* s390_remove_old_asce and is attempted again later. In that case
|
||||
* the old asce has been removed from the list, and therefore it
|
||||
* will not be freed when the VM terminates, but the ASCE is still
|
||||
* in use and still pointed to.
|
||||
* A subsequent call to replace_asce will follow the pointer and try
|
||||
* to remove the same page from the list again.
|
||||
* Therefore it's necessary that the page of the ASCE has valid
|
||||
* pointers, so list_del can work (and do nothing) without
|
||||
* dereferencing stale or invalid pointers.
|
||||
*/
|
||||
INIT_LIST_HEAD(&old->lru);
|
||||
spin_unlock(&gmap->guest_table_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(s390_unlist_old_asce);
|
||||
|
||||
/**
|
||||
* s390_replace_asce - Try to replace the current ASCE of a gmap with a copy
|
||||
* @gmap: the gmap whose ASCE needs to be replaced
|
||||
*
|
||||
* If the allocation of the new top level page table fails, the ASCE is not
|
||||
* replaced.
|
||||
* In any case, the old ASCE is always removed from the gmap CRST list.
|
||||
* Therefore the caller has to make sure to save a pointer to it
|
||||
* beforehand, unless a leak is actually intended.
|
||||
*/
|
||||
int s390_replace_asce(struct gmap *gmap)
|
||||
{
|
||||
unsigned long asce;
|
||||
struct page *page;
|
||||
void *table;
|
||||
|
||||
s390_unlist_old_asce(gmap);
|
||||
|
||||
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
table = page_to_virt(page);
|
||||
memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT));
|
||||
|
||||
/*
|
||||
* The caller has to deal with the old ASCE, but here we make sure
|
||||
* the new one is properly added to the CRST list, so that
|
||||
* it will be freed when the VM is torn down.
|
||||
*/
|
||||
spin_lock(&gmap->guest_table_lock);
|
||||
list_add(&page->lru, &gmap->crst_list);
|
||||
spin_unlock(&gmap->guest_table_lock);
|
||||
|
||||
/* Set new table origin while preserving existing ASCE control bits */
|
||||
asce = (gmap->asce & ~_ASCE_ORIGIN) | __pa(table);
|
||||
WRITE_ONCE(gmap->asce, asce);
|
||||
WRITE_ONCE(gmap->mm->context.gmap_asce, asce);
|
||||
WRITE_ONCE(gmap->table, table);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(s390_replace_asce);
|
||||
|
||||
@@ -734,7 +734,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
|
||||
ptev = pte_val(*ptep);
|
||||
if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
|
||||
page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
|
||||
page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0);
|
||||
pgste_set_unlock(ptep, pgste);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
@@ -69,6 +69,7 @@ struct zpci_dev *get_zdev_by_fid(u32 fid)
|
||||
list_for_each_entry(tmp, &zpci_list, entry) {
|
||||
if (tmp->fid == fid) {
|
||||
zdev = tmp;
|
||||
zpci_zdev_get(zdev);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,7 +13,8 @@ void zpci_bus_device_unregister(struct zpci_dev *zdev);
|
||||
void zpci_release_device(struct kref *kref);
|
||||
static inline void zpci_zdev_put(struct zpci_dev *zdev)
|
||||
{
|
||||
kref_put(&zdev->kref, zpci_release_device);
|
||||
if (zdev)
|
||||
kref_put(&zdev->kref, zpci_release_device);
|
||||
}
|
||||
|
||||
static inline void zpci_zdev_get(struct zpci_dev *zdev)
|
||||
|
||||
@@ -22,6 +22,8 @@
|
||||
#include <asm/clp.h>
|
||||
#include <uapi/asm/clp.h>
|
||||
|
||||
#include "pci_bus.h"
|
||||
|
||||
bool zpci_unique_uid;
|
||||
|
||||
void update_uid_checking(bool new)
|
||||
@@ -372,8 +374,11 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
|
||||
return;
|
||||
|
||||
zdev = get_zdev_by_fid(entry->fid);
|
||||
if (!zdev)
|
||||
zpci_create_device(entry->fid, entry->fh, entry->config_state);
|
||||
if (zdev) {
|
||||
zpci_zdev_put(zdev);
|
||||
return;
|
||||
}
|
||||
zpci_create_device(entry->fid, entry->fh, entry->config_state);
|
||||
}
|
||||
|
||||
int clp_scan_pci_devices(void)
|
||||
|
||||
@@ -61,10 +61,12 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
|
||||
pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
|
||||
|
||||
if (!pdev)
|
||||
return;
|
||||
goto no_pdev;
|
||||
|
||||
pdev->error_state = pci_channel_io_perm_failure;
|
||||
pci_dev_put(pdev);
|
||||
no_pdev:
|
||||
zpci_zdev_put(zdev);
|
||||
}
|
||||
|
||||
void zpci_event_error(void *data)
|
||||
@@ -76,6 +78,7 @@ void zpci_event_error(void *data)
|
||||
static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
||||
{
|
||||
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
|
||||
bool existing_zdev = !!zdev;
|
||||
enum zpci_state state;
|
||||
struct pci_dev *pdev;
|
||||
int ret;
|
||||
@@ -161,6 +164,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (existing_zdev)
|
||||
zpci_zdev_put(zdev);
|
||||
}
|
||||
|
||||
void zpci_event_availability(void *data)
|
||||
|
||||
@@ -64,7 +64,7 @@ static inline int __pcistg_mio_inuser(
|
||||
asm volatile (
|
||||
" sacf 256\n"
|
||||
"0: llgc %[tmp],0(%[src])\n"
|
||||
" sllg %[val],%[val],8\n"
|
||||
"4: sllg %[val],%[val],8\n"
|
||||
" aghi %[src],1\n"
|
||||
" ogr %[val],%[tmp]\n"
|
||||
" brctg %[cnt],0b\n"
|
||||
@@ -72,7 +72,7 @@ static inline int __pcistg_mio_inuser(
|
||||
"2: ipm %[cc]\n"
|
||||
" srl %[cc],28\n"
|
||||
"3: sacf 768\n"
|
||||
EX_TABLE(0b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
|
||||
EX_TABLE(0b, 3b) EX_TABLE(4b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
|
||||
:
|
||||
[src] "+a" (src), [cnt] "+d" (cnt),
|
||||
[val] "+d" (val), [tmp] "=d" (tmp),
|
||||
@@ -222,10 +222,10 @@ static inline int __pcilg_mio_inuser(
|
||||
"2: ahi %[shift],-8\n"
|
||||
" srlg %[tmp],%[val],0(%[shift])\n"
|
||||
"3: stc %[tmp],0(%[dst])\n"
|
||||
" aghi %[dst],1\n"
|
||||
"5: aghi %[dst],1\n"
|
||||
" brctg %[cnt],2b\n"
|
||||
"4: sacf 768\n"
|
||||
EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b)
|
||||
EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) EX_TABLE(5b, 4b)
|
||||
:
|
||||
[cc] "+d" (cc), [val] "=d" (val), [len] "+d" (len),
|
||||
[dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
|
||||
|
||||
Reference in New Issue
Block a user