mirror of
https://github.com/LuckfoxTECH/luckfox-pico.git
synced 2026-01-18 11:38:31 +01:00
project:build.sh: Added fastboot support; custom modifications to U-Boot and kernel implemented using patches.
project:cfg:BoardConfig_IPC: Added fastboot BoardConfig file and firmware post-scripts, distinguishing between the BoardConfigs for Luckfox Pico Pro and Luckfox Pico Max. project:app: Added fastboot_client and rk_smart_door for quick boot applications; updated rkipc app to adapt to the latest media library. media:samples: Added more usage examples. media:rockit: Fixed bugs; removed support for retrieving data frames from VPSS. media:isp: Updated rkaiq library and related tools to support connection to RKISP_Tuner. sysdrv:Makefile: Added support for compiling drv_ko on Luckfox Pico Ultra W using Ubuntu; added support for custom root filesystem. sysdrv:tools:board: Updated Buildroot optional mirror sources, updated some software versions, and stored device tree files and configuration files that undergo multiple modifications for U-Boot and kernel separately. sysdrv:source:mcu: Used RISC-V MCU SDK with RT-Thread system, mainly for initializing camera AE during quick boot. sysdrv:source:uboot: Added support for fastboot; added high baud rate DDR bin for serial firmware upgrades. sysdrv:source:kernel: Upgraded to version 5.10.160; increased NPU frequency for RV1106G3; added support for fastboot. Signed-off-by: luckfox-eng29 <eng29@luckfox.com>
This commit is contained in:
@@ -203,8 +203,8 @@
|
||||
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
|
||||
#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
|
||||
#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
|
||||
#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETPOLINE_LFENCE ( 7*32+13) /* "" Use LFENCEs for Spectre variant 2 */
|
||||
#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */
|
||||
#define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* "" Fill RSB on VM-Exit */
|
||||
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
|
||||
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
|
||||
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
|
||||
@@ -290,6 +290,13 @@
|
||||
#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
|
||||
#define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* #AC for split lock */
|
||||
#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
|
||||
#define X86_FEATURE_ENTRY_IBPB (11*32+10) /* "" Issue an IBPB on kernel entry */
|
||||
#define X86_FEATURE_RRSBA_CTRL (11*32+11) /* "" RET prediction control */
|
||||
#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
|
||||
#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
|
||||
#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM-Exit when EIBRS is enabled */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
|
||||
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
|
||||
@@ -308,6 +315,7 @@
|
||||
#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
|
||||
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
|
||||
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
|
||||
#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
|
||||
|
||||
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
|
||||
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
|
||||
@@ -417,5 +425,7 @@
|
||||
#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
|
||||
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
|
||||
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
|
||||
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
|
||||
#define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */
|
||||
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
||||
@@ -56,6 +56,25 @@
|
||||
# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
# define DISABLE_RETPOLINE 0
|
||||
#else
|
||||
# define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \
|
||||
(1 << (X86_FEATURE_RETPOLINE_LFENCE & 31)))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RETHUNK
|
||||
# define DISABLE_RETHUNK 0
|
||||
#else
|
||||
# define DISABLE_RETHUNK (1 << (X86_FEATURE_RETHUNK & 31))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_UNRET_ENTRY
|
||||
# define DISABLE_UNRET 0
|
||||
#else
|
||||
# define DISABLE_UNRET (1 << (X86_FEATURE_UNRET & 31))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_IOMMU_SUPPORT
|
||||
# define DISABLE_ENQCMD 0
|
||||
#else
|
||||
@@ -76,7 +95,7 @@
|
||||
#define DISABLED_MASK8 0
|
||||
#define DISABLED_MASK9 (DISABLE_SMAP)
|
||||
#define DISABLED_MASK10 0
|
||||
#define DISABLED_MASK11 0
|
||||
#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET)
|
||||
#define DISABLED_MASK12 0
|
||||
#define DISABLED_MASK13 0
|
||||
#define DISABLED_MASK14 0
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
*
|
||||
* Written by Masami Hiramatsu <mhiramat@redhat.com>
|
||||
*/
|
||||
#include "inat_types.h"
|
||||
#include "inat_types.h" /* __ignore_sync_check__ */
|
||||
|
||||
/*
|
||||
* Internal bits. Don't use bitmasks directly, because these bits are
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*/
|
||||
|
||||
/* insn_attr_t is defined in inat.h */
|
||||
#include "inat.h"
|
||||
#include "inat.h" /* __ignore_sync_check__ */
|
||||
|
||||
struct insn_field {
|
||||
union {
|
||||
@@ -87,13 +87,25 @@ struct insn {
|
||||
#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
|
||||
|
||||
extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
|
||||
extern void insn_get_prefixes(struct insn *insn);
|
||||
extern void insn_get_opcode(struct insn *insn);
|
||||
extern void insn_get_modrm(struct insn *insn);
|
||||
extern void insn_get_sib(struct insn *insn);
|
||||
extern void insn_get_displacement(struct insn *insn);
|
||||
extern void insn_get_immediate(struct insn *insn);
|
||||
extern void insn_get_length(struct insn *insn);
|
||||
extern int insn_get_prefixes(struct insn *insn);
|
||||
extern int insn_get_opcode(struct insn *insn);
|
||||
extern int insn_get_modrm(struct insn *insn);
|
||||
extern int insn_get_sib(struct insn *insn);
|
||||
extern int insn_get_displacement(struct insn *insn);
|
||||
extern int insn_get_immediate(struct insn *insn);
|
||||
extern int insn_get_length(struct insn *insn);
|
||||
|
||||
enum insn_mode {
|
||||
INSN_MODE_32,
|
||||
INSN_MODE_64,
|
||||
/* Mode is determined by the current kernel build. */
|
||||
INSN_MODE_KERN,
|
||||
INSN_NUM_MODES,
|
||||
};
|
||||
|
||||
extern int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m);
|
||||
|
||||
#define insn_decode_kernel(_insn, _ptr) insn_decode((_insn), (_ptr), MAX_INSN_SIZE, INSN_MODE_KERN)
|
||||
|
||||
/* Attribute will be determined after getting ModRM (for opcode groups) */
|
||||
static inline void insn_get_attribute(struct insn *insn)
|
||||
|
||||
@@ -51,6 +51,8 @@
|
||||
#define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
|
||||
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
|
||||
#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
|
||||
#define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */
|
||||
#define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
|
||||
|
||||
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
|
||||
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
|
||||
@@ -91,6 +93,7 @@
|
||||
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
|
||||
#define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */
|
||||
#define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */
|
||||
#define ARCH_CAP_RSBA BIT(2) /* RET may use alternative branch predictors */
|
||||
#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */
|
||||
#define ARCH_CAP_SSB_NO BIT(4) /*
|
||||
* Not susceptible to Speculative Store Bypass
|
||||
@@ -114,6 +117,41 @@
|
||||
* Not susceptible to
|
||||
* TSX Async Abort (TAA) vulnerabilities.
|
||||
*/
|
||||
#define ARCH_CAP_SBDR_SSDP_NO BIT(13) /*
|
||||
* Not susceptible to SBDR and SSDP
|
||||
* variants of Processor MMIO stale data
|
||||
* vulnerabilities.
|
||||
*/
|
||||
#define ARCH_CAP_FBSDP_NO BIT(14) /*
|
||||
* Not susceptible to FBSDP variant of
|
||||
* Processor MMIO stale data
|
||||
* vulnerabilities.
|
||||
*/
|
||||
#define ARCH_CAP_PSDP_NO BIT(15) /*
|
||||
* Not susceptible to PSDP variant of
|
||||
* Processor MMIO stale data
|
||||
* vulnerabilities.
|
||||
*/
|
||||
#define ARCH_CAP_FB_CLEAR BIT(17) /*
|
||||
* VERW clears CPU fill buffer
|
||||
* even on MDS_NO CPUs.
|
||||
*/
|
||||
#define ARCH_CAP_FB_CLEAR_CTRL BIT(18) /*
|
||||
* MSR_IA32_MCU_OPT_CTRL[FB_CLEAR_DIS]
|
||||
* bit available to control VERW
|
||||
* behavior.
|
||||
*/
|
||||
#define ARCH_CAP_RRSBA BIT(19) /*
|
||||
* Indicates RET may use predictors
|
||||
* other than the RSB. With eIBRS
|
||||
* enabled predictions in kernel mode
|
||||
* are restricted to targets in
|
||||
* kernel.
|
||||
*/
|
||||
#define ARCH_CAP_PBRSB_NO BIT(24) /*
|
||||
* Not susceptible to Post-Barrier
|
||||
* Return Stack Buffer Predictions.
|
||||
*/
|
||||
|
||||
#define MSR_IA32_FLUSH_CMD 0x0000010b
|
||||
#define L1D_FLUSH BIT(0) /*
|
||||
@@ -131,6 +169,7 @@
|
||||
/* SRBDS support */
|
||||
#define MSR_IA32_MCU_OPT_CTRL 0x00000123
|
||||
#define RNGDS_MITG_DIS BIT(0)
|
||||
#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */
|
||||
|
||||
#define MSR_IA32_SYSENTER_CS 0x00000174
|
||||
#define MSR_IA32_SYSENTER_ESP 0x00000175
|
||||
@@ -450,6 +489,11 @@
|
||||
#define MSR_AMD64_CPUID_FN_1 0xc0011004
|
||||
#define MSR_AMD64_LS_CFG 0xc0011020
|
||||
#define MSR_AMD64_DC_CFG 0xc0011022
|
||||
|
||||
#define MSR_AMD64_DE_CFG 0xc0011029
|
||||
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
|
||||
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)
|
||||
|
||||
#define MSR_AMD64_BU_CFG2 0xc001102a
|
||||
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
|
||||
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
|
||||
@@ -482,6 +526,9 @@
|
||||
/* Fam 17h MSRs */
|
||||
#define MSR_F17H_IRPERF 0xc00000e9
|
||||
|
||||
#define MSR_ZEN2_SPECTRAL_CHICKEN 0xc00110e3
|
||||
#define MSR_ZEN2_SPECTRAL_CHICKEN_BIT BIT_ULL(1)
|
||||
|
||||
/* Fam 16h MSRs */
|
||||
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
|
||||
#define MSR_F16H_L2I_PERF_CTR 0xc0010231
|
||||
@@ -523,9 +570,6 @@
|
||||
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
|
||||
#define FAM10H_MMIO_CONF_BASE_SHIFT 20
|
||||
#define MSR_FAM10H_NODE_ID 0xc001100c
|
||||
#define MSR_F10H_DECFG 0xc0011029
|
||||
#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
|
||||
#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
|
||||
|
||||
/* K8 MSRs */
|
||||
#define MSR_K8_TOP_MEM1 0xc001001a
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
*
|
||||
* Written by Masami Hiramatsu <mhiramat@redhat.com>
|
||||
*/
|
||||
#include "../include/asm/insn.h"
|
||||
#include "../include/asm/insn.h" /* __ignore_sync_check__ */
|
||||
|
||||
/* Attribute tables are generated from opcode map */
|
||||
#include "inat-tables.c"
|
||||
|
||||
@@ -10,10 +10,13 @@
|
||||
#else
|
||||
#include <string.h>
|
||||
#endif
|
||||
#include "../include/asm/inat.h"
|
||||
#include "../include/asm/insn.h"
|
||||
#include "../include/asm/inat.h" /* __ignore_sync_check__ */
|
||||
#include "../include/asm/insn.h" /* __ignore_sync_check__ */
|
||||
|
||||
#include "../include/asm/emulate_prefix.h"
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kconfig.h>
|
||||
|
||||
#include "../include/asm/emulate_prefix.h" /* __ignore_sync_check__ */
|
||||
|
||||
/* Verify next sizeof(t) bytes can be on the same instruction */
|
||||
#define validate_next(t, insn, n) \
|
||||
@@ -97,8 +100,12 @@ static void insn_get_emulate_prefix(struct insn *insn)
|
||||
* Populates the @insn->prefixes bitmap, and updates @insn->next_byte
|
||||
* to point to the (first) opcode. No effect if @insn->prefixes.got
|
||||
* is already set.
|
||||
*
|
||||
* * Returns:
|
||||
* 0: on success
|
||||
* < 0: on error
|
||||
*/
|
||||
void insn_get_prefixes(struct insn *insn)
|
||||
int insn_get_prefixes(struct insn *insn)
|
||||
{
|
||||
struct insn_field *prefixes = &insn->prefixes;
|
||||
insn_attr_t attr;
|
||||
@@ -106,7 +113,7 @@ void insn_get_prefixes(struct insn *insn)
|
||||
int i, nb;
|
||||
|
||||
if (prefixes->got)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
insn_get_emulate_prefix(insn);
|
||||
|
||||
@@ -217,8 +224,10 @@ vex_end:
|
||||
|
||||
prefixes->got = 1;
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
return;
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -230,16 +239,25 @@ err_out:
|
||||
* If necessary, first collects any preceding (prefix) bytes.
|
||||
* Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got
|
||||
* is already 1.
|
||||
*
|
||||
* Returns:
|
||||
* 0: on success
|
||||
* < 0: on error
|
||||
*/
|
||||
void insn_get_opcode(struct insn *insn)
|
||||
int insn_get_opcode(struct insn *insn)
|
||||
{
|
||||
struct insn_field *opcode = &insn->opcode;
|
||||
int pfx_id, ret;
|
||||
insn_byte_t op;
|
||||
int pfx_id;
|
||||
|
||||
if (opcode->got)
|
||||
return;
|
||||
if (!insn->prefixes.got)
|
||||
insn_get_prefixes(insn);
|
||||
return 0;
|
||||
|
||||
if (!insn->prefixes.got) {
|
||||
ret = insn_get_prefixes(insn);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Get first opcode */
|
||||
op = get_next(insn_byte_t, insn);
|
||||
@@ -254,9 +272,13 @@ void insn_get_opcode(struct insn *insn)
|
||||
insn->attr = inat_get_avx_attribute(op, m, p);
|
||||
if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
|
||||
(!inat_accept_vex(insn->attr) &&
|
||||
!inat_is_group(insn->attr)))
|
||||
insn->attr = 0; /* This instruction is bad */
|
||||
goto end; /* VEX has only 1 byte for opcode */
|
||||
!inat_is_group(insn->attr))) {
|
||||
/* This instruction is bad */
|
||||
insn->attr = 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
/* VEX has only 1 byte for opcode */
|
||||
goto end;
|
||||
}
|
||||
|
||||
insn->attr = inat_get_opcode_attribute(op);
|
||||
@@ -267,13 +289,18 @@ void insn_get_opcode(struct insn *insn)
|
||||
pfx_id = insn_last_prefix_id(insn);
|
||||
insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
|
||||
}
|
||||
if (inat_must_vex(insn->attr))
|
||||
insn->attr = 0; /* This instruction is bad */
|
||||
|
||||
if (inat_must_vex(insn->attr)) {
|
||||
/* This instruction is bad */
|
||||
insn->attr = 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
end:
|
||||
opcode->got = 1;
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
return;
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -283,15 +310,25 @@ err_out:
|
||||
* Populates @insn->modrm and updates @insn->next_byte to point past the
|
||||
* ModRM byte, if any. If necessary, first collects the preceding bytes
|
||||
* (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1.
|
||||
*
|
||||
* Returns:
|
||||
* 0: on success
|
||||
* < 0: on error
|
||||
*/
|
||||
void insn_get_modrm(struct insn *insn)
|
||||
int insn_get_modrm(struct insn *insn)
|
||||
{
|
||||
struct insn_field *modrm = &insn->modrm;
|
||||
insn_byte_t pfx_id, mod;
|
||||
int ret;
|
||||
|
||||
if (modrm->got)
|
||||
return;
|
||||
if (!insn->opcode.got)
|
||||
insn_get_opcode(insn);
|
||||
return 0;
|
||||
|
||||
if (!insn->opcode.got) {
|
||||
ret = insn_get_opcode(insn);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (inat_has_modrm(insn->attr)) {
|
||||
mod = get_next(insn_byte_t, insn);
|
||||
@@ -301,17 +338,22 @@ void insn_get_modrm(struct insn *insn)
|
||||
pfx_id = insn_last_prefix_id(insn);
|
||||
insn->attr = inat_get_group_attribute(mod, pfx_id,
|
||||
insn->attr);
|
||||
if (insn_is_avx(insn) && !inat_accept_vex(insn->attr))
|
||||
insn->attr = 0; /* This is bad */
|
||||
if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) {
|
||||
/* Bad insn */
|
||||
insn->attr = 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (insn->x86_64 && inat_is_force64(insn->attr))
|
||||
insn->opnd_bytes = 8;
|
||||
|
||||
modrm->got = 1;
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
return;
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
|
||||
@@ -325,11 +367,16 @@ err_out:
|
||||
int insn_rip_relative(struct insn *insn)
|
||||
{
|
||||
struct insn_field *modrm = &insn->modrm;
|
||||
int ret;
|
||||
|
||||
if (!insn->x86_64)
|
||||
return 0;
|
||||
if (!modrm->got)
|
||||
insn_get_modrm(insn);
|
||||
|
||||
if (!modrm->got) {
|
||||
ret = insn_get_modrm(insn);
|
||||
if (ret)
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
* For rip-relative instructions, the mod field (top 2 bits)
|
||||
* is zero and the r/m field (bottom 3 bits) is 0x5.
|
||||
@@ -343,15 +390,25 @@ int insn_rip_relative(struct insn *insn)
|
||||
*
|
||||
* If necessary, first collects the instruction up to and including the
|
||||
* ModRM byte.
|
||||
*
|
||||
* Returns:
|
||||
* 0: if decoding succeeded
|
||||
* < 0: otherwise.
|
||||
*/
|
||||
void insn_get_sib(struct insn *insn)
|
||||
int insn_get_sib(struct insn *insn)
|
||||
{
|
||||
insn_byte_t modrm;
|
||||
int ret;
|
||||
|
||||
if (insn->sib.got)
|
||||
return;
|
||||
if (!insn->modrm.got)
|
||||
insn_get_modrm(insn);
|
||||
return 0;
|
||||
|
||||
if (!insn->modrm.got) {
|
||||
ret = insn_get_modrm(insn);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (insn->modrm.nbytes) {
|
||||
modrm = (insn_byte_t)insn->modrm.value;
|
||||
if (insn->addr_bytes != 2 &&
|
||||
@@ -362,8 +419,10 @@ void insn_get_sib(struct insn *insn)
|
||||
}
|
||||
insn->sib.got = 1;
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
return;
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
|
||||
@@ -374,15 +433,25 @@ err_out:
|
||||
* If necessary, first collects the instruction up to and including the
|
||||
* SIB byte.
|
||||
* Displacement value is sign-expanded.
|
||||
*
|
||||
* * Returns:
|
||||
* 0: if decoding succeeded
|
||||
* < 0: otherwise.
|
||||
*/
|
||||
void insn_get_displacement(struct insn *insn)
|
||||
int insn_get_displacement(struct insn *insn)
|
||||
{
|
||||
insn_byte_t mod, rm, base;
|
||||
int ret;
|
||||
|
||||
if (insn->displacement.got)
|
||||
return;
|
||||
if (!insn->sib.got)
|
||||
insn_get_sib(insn);
|
||||
return 0;
|
||||
|
||||
if (!insn->sib.got) {
|
||||
ret = insn_get_sib(insn);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (insn->modrm.nbytes) {
|
||||
/*
|
||||
* Interpreting the modrm byte:
|
||||
@@ -425,9 +494,10 @@ void insn_get_displacement(struct insn *insn)
|
||||
}
|
||||
out:
|
||||
insn->displacement.got = 1;
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
return;
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
/* Decode moffset16/32/64. Return 0 if failed */
|
||||
@@ -538,20 +608,30 @@ err_out:
|
||||
}
|
||||
|
||||
/**
|
||||
* insn_get_immediate() - Get the immediates of instruction
|
||||
* insn_get_immediate() - Get the immediate in an instruction
|
||||
* @insn: &struct insn containing instruction
|
||||
*
|
||||
* If necessary, first collects the instruction up to and including the
|
||||
* displacement bytes.
|
||||
* Basically, most of immediates are sign-expanded. Unsigned-value can be
|
||||
* get by bit masking with ((1 << (nbytes * 8)) - 1)
|
||||
* computed by bit masking with ((1 << (nbytes * 8)) - 1)
|
||||
*
|
||||
* Returns:
|
||||
* 0: on success
|
||||
* < 0: on error
|
||||
*/
|
||||
void insn_get_immediate(struct insn *insn)
|
||||
int insn_get_immediate(struct insn *insn)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (insn->immediate.got)
|
||||
return;
|
||||
if (!insn->displacement.got)
|
||||
insn_get_displacement(insn);
|
||||
return 0;
|
||||
|
||||
if (!insn->displacement.got) {
|
||||
ret = insn_get_displacement(insn);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (inat_has_moffset(insn->attr)) {
|
||||
if (!__get_moffset(insn))
|
||||
@@ -604,9 +684,10 @@ void insn_get_immediate(struct insn *insn)
|
||||
}
|
||||
done:
|
||||
insn->immediate.got = 1;
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
return;
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -615,13 +696,58 @@ err_out:
|
||||
*
|
||||
* If necessary, first collects the instruction up to and including the
|
||||
* immediates bytes.
|
||||
*/
|
||||
void insn_get_length(struct insn *insn)
|
||||
*
|
||||
* Returns:
|
||||
* - 0 on success
|
||||
* - < 0 on error
|
||||
*/
|
||||
int insn_get_length(struct insn *insn)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (insn->length)
|
||||
return;
|
||||
if (!insn->immediate.got)
|
||||
insn_get_immediate(insn);
|
||||
return 0;
|
||||
|
||||
if (!insn->immediate.got) {
|
||||
ret = insn_get_immediate(insn);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
insn->length = (unsigned char)((unsigned long)insn->next_byte
|
||||
- (unsigned long)insn->kaddr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* insn_decode() - Decode an x86 instruction
|
||||
* @insn: &struct insn to be initialized
|
||||
* @kaddr: address (in kernel memory) of instruction (or copy thereof)
|
||||
* @buf_len: length of the insn buffer at @kaddr
|
||||
* @m: insn mode, see enum insn_mode
|
||||
*
|
||||
* Returns:
|
||||
* 0: if decoding succeeded
|
||||
* < 0: otherwise.
|
||||
*/
|
||||
int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m)
|
||||
{
|
||||
int ret;
|
||||
|
||||
#define INSN_MODE_KERN (enum insn_mode)-1 /* __ignore_sync_check__ mode is only valid in the kernel */
|
||||
|
||||
if (m == INSN_MODE_KERN)
|
||||
insn_init(insn, kaddr, buf_len, IS_ENABLED(CONFIG_X86_64));
|
||||
else
|
||||
insn_init(insn, kaddr, buf_len, m == INSN_MODE_64);
|
||||
|
||||
ret = insn_get_length(insn);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (insn_complete(insn))
|
||||
return 0;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/export.h>
|
||||
|
||||
.pushsection .noinstr.text, "ax"
|
||||
@@ -39,7 +39,7 @@ SYM_FUNC_START_WEAK(memcpy)
|
||||
rep movsq
|
||||
movl %edx, %ecx
|
||||
rep movsb
|
||||
ret
|
||||
RET
|
||||
SYM_FUNC_END(memcpy)
|
||||
SYM_FUNC_END_ALIAS(__memcpy)
|
||||
EXPORT_SYMBOL(memcpy)
|
||||
@@ -53,7 +53,7 @@ SYM_FUNC_START_LOCAL(memcpy_erms)
|
||||
movq %rdi, %rax
|
||||
movq %rdx, %rcx
|
||||
rep movsb
|
||||
ret
|
||||
RET
|
||||
SYM_FUNC_END(memcpy_erms)
|
||||
|
||||
SYM_FUNC_START_LOCAL(memcpy_orig)
|
||||
@@ -137,7 +137,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
|
||||
movq %r9, 1*8(%rdi)
|
||||
movq %r10, -2*8(%rdi, %rdx)
|
||||
movq %r11, -1*8(%rdi, %rdx)
|
||||
retq
|
||||
RET
|
||||
.p2align 4
|
||||
.Lless_16bytes:
|
||||
cmpl $8, %edx
|
||||
@@ -149,7 +149,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
|
||||
movq -1*8(%rsi, %rdx), %r9
|
||||
movq %r8, 0*8(%rdi)
|
||||
movq %r9, -1*8(%rdi, %rdx)
|
||||
retq
|
||||
RET
|
||||
.p2align 4
|
||||
.Lless_8bytes:
|
||||
cmpl $4, %edx
|
||||
@@ -162,7 +162,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
|
||||
movl -4(%rsi, %rdx), %r8d
|
||||
movl %ecx, (%rdi)
|
||||
movl %r8d, -4(%rdi, %rdx)
|
||||
retq
|
||||
RET
|
||||
.p2align 4
|
||||
.Lless_3bytes:
|
||||
subl $1, %edx
|
||||
@@ -180,7 +180,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
|
||||
movb %cl, (%rdi)
|
||||
|
||||
.Lend:
|
||||
retq
|
||||
RET
|
||||
SYM_FUNC_END(memcpy_orig)
|
||||
|
||||
.popsection
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/export.h>
|
||||
|
||||
/*
|
||||
@@ -40,7 +40,7 @@ SYM_FUNC_START(__memset)
|
||||
movl %edx,%ecx
|
||||
rep stosb
|
||||
movq %r9,%rax
|
||||
ret
|
||||
RET
|
||||
SYM_FUNC_END(__memset)
|
||||
SYM_FUNC_END_ALIAS(memset)
|
||||
EXPORT_SYMBOL(memset)
|
||||
@@ -63,7 +63,7 @@ SYM_FUNC_START_LOCAL(memset_erms)
|
||||
movq %rdx,%rcx
|
||||
rep stosb
|
||||
movq %r9,%rax
|
||||
ret
|
||||
RET
|
||||
SYM_FUNC_END(memset_erms)
|
||||
|
||||
SYM_FUNC_START_LOCAL(memset_orig)
|
||||
@@ -125,7 +125,7 @@ SYM_FUNC_START_LOCAL(memset_orig)
|
||||
|
||||
.Lende:
|
||||
movq %r10,%rax
|
||||
ret
|
||||
RET
|
||||
|
||||
.Lbad_alignment:
|
||||
cmpq $7,%rdx
|
||||
|
||||
@@ -416,7 +416,7 @@ static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset,
|
||||
*(char *)data);
|
||||
break;
|
||||
case BTF_INT_BOOL:
|
||||
jsonw_bool(jw, *(int *)data);
|
||||
jsonw_bool(jw, *(bool *)data);
|
||||
break;
|
||||
default:
|
||||
/* shouldn't happen */
|
||||
|
||||
@@ -271,6 +271,9 @@ int do_pin_any(int argc, char **argv, int (*get_fd)(int *, char ***))
|
||||
int err;
|
||||
int fd;
|
||||
|
||||
if (!REQ_ARGS(3))
|
||||
return -EINVAL;
|
||||
|
||||
fd = get_fd(&argc, &argv);
|
||||
if (fd < 0)
|
||||
return fd;
|
||||
|
||||
@@ -398,6 +398,16 @@ int main(int argc, char **argv)
|
||||
|
||||
setlinebuf(stdout);
|
||||
|
||||
#ifdef USE_LIBCAP
|
||||
/* Libcap < 2.63 hooks before main() to compute the number of
|
||||
* capabilities of the running kernel, and doing so it calls prctl()
|
||||
* which may fail and set errno to non-zero.
|
||||
* Let's reset errno to make sure this does not interfere with the
|
||||
* batch mode.
|
||||
*/
|
||||
errno = 0;
|
||||
#endif
|
||||
|
||||
last_do_help = do_help;
|
||||
pretty_output = false;
|
||||
json_output = false;
|
||||
|
||||
@@ -210,9 +210,16 @@ strip-libs = $(filter-out -l%,$(1))
|
||||
PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null)
|
||||
PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS))
|
||||
PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
|
||||
PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
|
||||
PERL_EMBED_CCOPTS = $(shell perl -MExtUtils::Embed -e ccopts 2>/dev/null)
|
||||
FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
|
||||
|
||||
ifeq ($(CC_NO_CLANG), 0)
|
||||
PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS))
|
||||
PERL_EMBED_CCOPTS := $(filter-out -flto=auto -ffat-lto-objects, $(PERL_EMBED_CCOPTS))
|
||||
PERL_EMBED_CCOPTS := $(filter-out -specs=%,$(PERL_EMBED_CCOPTS))
|
||||
FLAGS_PERL_EMBED += -Wno-compound-token-split-by-macro
|
||||
endif
|
||||
|
||||
$(OUTPUT)test-libperl.bin:
|
||||
$(BUILD) $(FLAGS_PERL_EMBED)
|
||||
|
||||
|
||||
@@ -1,16 +1,23 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <openssl/evp.h>
|
||||
#include <openssl/sha.h>
|
||||
#include <openssl/md5.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
MD5_CTX context;
|
||||
EVP_MD_CTX *mdctx;
|
||||
unsigned char md[MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH];
|
||||
unsigned char dat[] = "12345";
|
||||
unsigned int digest_len;
|
||||
|
||||
MD5_Init(&context);
|
||||
MD5_Update(&context, &dat[0], sizeof(dat));
|
||||
MD5_Final(&md[0], &context);
|
||||
mdctx = EVP_MD_CTX_new();
|
||||
if (!mdctx)
|
||||
return 0;
|
||||
|
||||
EVP_DigestInit_ex(mdctx, EVP_md5(), NULL);
|
||||
EVP_DigestUpdate(mdctx, &dat[0], sizeof(dat));
|
||||
EVP_DigestFinal_ex(mdctx, &md[0], &digest_len);
|
||||
EVP_MD_CTX_free(mdctx);
|
||||
|
||||
SHA1(&dat[0], sizeof(dat), &md[0]);
|
||||
|
||||
|
||||
@@ -543,6 +543,10 @@ static int calc_digits(int num)
|
||||
{
|
||||
int count = 0;
|
||||
|
||||
/* It takes a digit to represent zero */
|
||||
if (!num)
|
||||
return 1;
|
||||
|
||||
while (num != 0) {
|
||||
num /= 10;
|
||||
count++;
|
||||
|
||||
67
sysdrv/source/kernel/tools/include/linux/kconfig.h
Normal file
67
sysdrv/source/kernel/tools/include/linux/kconfig.h
Normal file
@@ -0,0 +1,67 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _TOOLS_LINUX_KCONFIG_H
|
||||
#define _TOOLS_LINUX_KCONFIG_H
|
||||
|
||||
/* CONFIG_CC_VERSION_TEXT (Do not delete this comment. See help in Kconfig) */
|
||||
|
||||
#define __ARG_PLACEHOLDER_1 0,
|
||||
#define __take_second_arg(__ignored, val, ...) val
|
||||
|
||||
/*
|
||||
* The use of "&&" / "||" is limited in certain expressions.
|
||||
* The following enable to calculate "and" / "or" with macro expansion only.
|
||||
*/
|
||||
#define __and(x, y) ___and(x, y)
|
||||
#define ___and(x, y) ____and(__ARG_PLACEHOLDER_##x, y)
|
||||
#define ____and(arg1_or_junk, y) __take_second_arg(arg1_or_junk y, 0)
|
||||
|
||||
#define __or(x, y) ___or(x, y)
|
||||
#define ___or(x, y) ____or(__ARG_PLACEHOLDER_##x, y)
|
||||
#define ____or(arg1_or_junk, y) __take_second_arg(arg1_or_junk 1, y)
|
||||
|
||||
/*
|
||||
* Helper macros to use CONFIG_ options in C/CPP expressions. Note that
|
||||
* these only work with boolean and tristate options.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Getting something that works in C and CPP for an arg that may or may
|
||||
* not be defined is tricky. Here, if we have "#define CONFIG_BOOGER 1"
|
||||
* we match on the placeholder define, insert the "0," for arg1 and generate
|
||||
* the triplet (0, 1, 0). Then the last step cherry picks the 2nd arg (a one).
|
||||
* When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
|
||||
* the last step cherry picks the 2nd arg, we get a zero.
|
||||
*/
|
||||
#define __is_defined(x) ___is_defined(x)
|
||||
#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val)
|
||||
#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0)
|
||||
|
||||
/*
|
||||
* IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0
|
||||
* otherwise. For boolean options, this is equivalent to
|
||||
* IS_ENABLED(CONFIG_FOO).
|
||||
*/
|
||||
#define IS_BUILTIN(option) __is_defined(option)
|
||||
|
||||
/*
|
||||
* IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
|
||||
* otherwise.
|
||||
*/
|
||||
#define IS_MODULE(option) __is_defined(option##_MODULE)
|
||||
|
||||
/*
|
||||
* IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
|
||||
* code can call a function defined in code compiled based on CONFIG_FOO.
|
||||
* This is similar to IS_ENABLED(), but returns false when invoked from
|
||||
* built-in code when CONFIG_FOO is set to 'm'.
|
||||
*/
|
||||
#define IS_REACHABLE(option) __or(IS_BUILTIN(option), \
|
||||
__and(IS_MODULE(option), __is_defined(MODULE)))
|
||||
|
||||
/*
|
||||
* IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
|
||||
* 0 otherwise.
|
||||
*/
|
||||
#define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option))
|
||||
|
||||
#endif /* _TOOLS_LINUX_KCONFIG_H */
|
||||
@@ -29,11 +29,19 @@ struct unwind_hint {
|
||||
*
|
||||
* UNWIND_HINT_TYPE_REGS_PARTIAL: Used in entry code to indicate that
|
||||
* sp_reg+sp_offset points to the iret return frame.
|
||||
*
|
||||
* UNWIND_HINT_FUNC: Generate the unwind metadata of a callable function.
|
||||
* Useful for code which doesn't have an ELF function annotation.
|
||||
*
|
||||
* UNWIND_HINT_ENTRY: machine entry without stack, SYSCALL/SYSENTER etc.
|
||||
*/
|
||||
#define UNWIND_HINT_TYPE_CALL 0
|
||||
#define UNWIND_HINT_TYPE_REGS 1
|
||||
#define UNWIND_HINT_TYPE_REGS_PARTIAL 2
|
||||
#define UNWIND_HINT_TYPE_RET_OFFSET 3
|
||||
#define UNWIND_HINT_TYPE_FUNC 3
|
||||
#define UNWIND_HINT_TYPE_ENTRY 4
|
||||
#define UNWIND_HINT_TYPE_SAVE 5
|
||||
#define UNWIND_HINT_TYPE_RESTORE 6
|
||||
|
||||
#ifdef CONFIG_STACK_VALIDATION
|
||||
|
||||
@@ -96,7 +104,7 @@ struct unwind_hint {
|
||||
* the debuginfo as necessary. It will also warn if it sees any
|
||||
* inconsistencies.
|
||||
*/
|
||||
.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0
|
||||
.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0
|
||||
.Lunwind_hint_ip_\@:
|
||||
.pushsection .discard.unwind_hints
|
||||
/* struct unwind_hint */
|
||||
@@ -120,7 +128,7 @@ struct unwind_hint {
|
||||
#define STACK_FRAME_NON_STANDARD(func)
|
||||
#else
|
||||
#define ANNOTATE_INTRA_FUNCTION_CALL
|
||||
.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0
|
||||
.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0
|
||||
.endm
|
||||
#endif
|
||||
|
||||
|
||||
@@ -2318,9 +2318,9 @@ static __attribute__((unused))
|
||||
int memcmp(const void *s1, const void *s2, size_t n)
|
||||
{
|
||||
size_t ofs = 0;
|
||||
char c1 = 0;
|
||||
int c1 = 0;
|
||||
|
||||
while (ofs < n && !(c1 = ((char *)s1)[ofs] - ((char *)s2)[ofs])) {
|
||||
while (ofs < n && !(c1 = ((unsigned char *)s1)[ofs] - ((unsigned char *)s2)[ofs])) {
|
||||
ofs++;
|
||||
}
|
||||
return c1;
|
||||
|
||||
@@ -9,8 +9,8 @@
|
||||
#include "../../../arch/alpha/include/uapi/asm/errno.h"
|
||||
#elif defined(__mips__)
|
||||
#include "../../../arch/mips/include/uapi/asm/errno.h"
|
||||
#elif defined(__xtensa__)
|
||||
#include "../../../arch/xtensa/include/uapi/asm/errno.h"
|
||||
#elif defined(__hppa__)
|
||||
#include "../../../arch/parisc/include/uapi/asm/errno.h"
|
||||
#else
|
||||
#include <asm-generic/errno.h>
|
||||
#endif
|
||||
|
||||
@@ -4180,7 +4180,8 @@ struct bpf_sock {
|
||||
__u32 src_ip4;
|
||||
__u32 src_ip6[4];
|
||||
__u32 src_port; /* host byte order */
|
||||
__u32 dst_port; /* network byte order */
|
||||
__be16 dst_port; /* network byte order */
|
||||
__u16 :16; /* zero padding */
|
||||
__u32 dst_ip4;
|
||||
__u32 dst_ip6[4];
|
||||
__u32 state;
|
||||
@@ -5006,7 +5007,10 @@ struct bpf_pidns_info {
|
||||
|
||||
/* User accessible data for SK_LOOKUP programs. Add new fields at the end. */
|
||||
struct bpf_sk_lookup {
|
||||
__bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
|
||||
union {
|
||||
__bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
|
||||
__u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */
|
||||
};
|
||||
|
||||
__u32 family; /* Protocol family (AF_INET, AF_INET6) */
|
||||
__u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
|
||||
|
||||
@@ -35,5 +35,9 @@ struct open_how {
|
||||
#define RESOLVE_IN_ROOT 0x10 /* Make all jumps to "/" and ".."
|
||||
be scoped inside the dirfd
|
||||
(similar to chroot(2)). */
|
||||
#define RESOLVE_CACHED 0x20 /* Only complete if resolution can be
|
||||
completed through cached lookup. May
|
||||
return -EAGAIN if that's not
|
||||
possible. */
|
||||
|
||||
#endif /* _UAPI_LINUX_OPENAT2_H */
|
||||
|
||||
@@ -1646,7 +1646,8 @@ Press any other key to refresh statistics immediately.
|
||||
.format(values))
|
||||
if len(pids) > 1:
|
||||
sys.exit('Error: Multiple processes found (pids: {}). Use "-p"'
|
||||
' to specify the desired pid'.format(" ".join(pids)))
|
||||
' to specify the desired pid'
|
||||
.format(" ".join(map(str, pids))))
|
||||
namespace.pid = pids[0]
|
||||
|
||||
argparser = argparse.ArgumentParser(description=description_text,
|
||||
|
||||
@@ -147,7 +147,7 @@ GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \
|
||||
sort -u | wc -l)
|
||||
VERSIONED_SYM_COUNT = $(shell readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \
|
||||
sed 's/\[.*\]//' | \
|
||||
awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \
|
||||
awk '/GLOBAL/ && /DEFAULT/ && !/UND|ABS/ {print $$NF}' | \
|
||||
grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
|
||||
|
||||
CMD_TARGETS = $(LIB_TARGET) $(PC_FILE)
|
||||
@@ -216,7 +216,7 @@ check_abi: $(OUTPUT)libbpf.so $(VERSION_SCRIPT)
|
||||
sort -u > $(OUTPUT)libbpf_global_syms.tmp; \
|
||||
readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \
|
||||
sed 's/\[.*\]//' | \
|
||||
awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}'| \
|
||||
awk '/GLOBAL/ && /DEFAULT/ && !/UND|ABS/ {print $$NF}'| \
|
||||
grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | \
|
||||
sort -u > $(OUTPUT)libbpf_versioned_syms.tmp; \
|
||||
diff -u $(OUTPUT)libbpf_global_syms.tmp \
|
||||
|
||||
@@ -3652,7 +3652,7 @@ static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
|
||||
int bpf_map__reuse_fd(struct bpf_map *map, int fd)
|
||||
{
|
||||
struct bpf_map_info info = {};
|
||||
__u32 len = sizeof(info);
|
||||
__u32 len = sizeof(info), name_len;
|
||||
int new_fd, err;
|
||||
char *new_name;
|
||||
|
||||
@@ -3662,7 +3662,12 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
new_name = strdup(info.name);
|
||||
name_len = strlen(info.name);
|
||||
if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0)
|
||||
new_name = strdup(map->name);
|
||||
else
|
||||
new_name = strdup(info.name);
|
||||
|
||||
if (!new_name)
|
||||
return -errno;
|
||||
|
||||
@@ -5928,9 +5933,10 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
|
||||
*/
|
||||
prog = NULL;
|
||||
for (i = 0; i < obj->nr_programs; i++) {
|
||||
prog = &obj->programs[i];
|
||||
if (strcmp(prog->sec_name, sec_name) == 0)
|
||||
if (strcmp(obj->programs[i].sec_name, sec_name) == 0) {
|
||||
prog = &obj->programs[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!prog) {
|
||||
pr_warn("sec '%s': failed to find a BPF program\n", sec_name);
|
||||
@@ -5945,10 +5951,17 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
|
||||
insn_idx = rec->insn_off / BPF_INSN_SZ;
|
||||
prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
|
||||
if (!prog) {
|
||||
pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n",
|
||||
sec_name, insn_idx, i);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
/* When __weak subprog is "overridden" by another instance
|
||||
* of the subprog from a different object file, linker still
|
||||
* appends all the .BTF.ext info that used to belong to that
|
||||
* eliminated subprogram.
|
||||
* This is similar to what x86-64 linker does for relocations.
|
||||
* So just ignore such relocations just like we ignore
|
||||
* subprog instructions when discovering subprograms.
|
||||
*/
|
||||
pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n",
|
||||
sec_name, i, insn_idx);
|
||||
continue;
|
||||
}
|
||||
/* no need to apply CO-RE relocation if the program is
|
||||
* not going to be loaded
|
||||
|
||||
@@ -244,7 +244,7 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
|
||||
case BPF_MAP_TYPE_RINGBUF:
|
||||
key_size = 0;
|
||||
value_size = 0;
|
||||
max_entries = 4096;
|
||||
max_entries = sysconf(_SC_PAGE_SIZE);
|
||||
break;
|
||||
case BPF_MAP_TYPE_UNSPEC:
|
||||
case BPF_MAP_TYPE_HASH:
|
||||
|
||||
@@ -59,6 +59,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
|
||||
__u32 len = sizeof(info);
|
||||
struct epoll_event *e;
|
||||
struct ring *r;
|
||||
__u64 mmap_sz;
|
||||
void *tmp;
|
||||
int err;
|
||||
|
||||
@@ -97,8 +98,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
|
||||
r->mask = info.max_entries - 1;
|
||||
|
||||
/* Map writable consumer page */
|
||||
tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
|
||||
map_fd, 0);
|
||||
tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
|
||||
if (tmp == MAP_FAILED) {
|
||||
err = -errno;
|
||||
pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %d\n",
|
||||
@@ -111,8 +111,12 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
|
||||
* data size to allow simple reading of samples that wrap around the
|
||||
* end of a ring buffer. See kernel implementation for details.
|
||||
* */
|
||||
tmp = mmap(NULL, rb->page_size + 2 * info.max_entries, PROT_READ,
|
||||
MAP_SHARED, map_fd, rb->page_size);
|
||||
mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
|
||||
if (mmap_sz != (__u64)(size_t)mmap_sz) {
|
||||
pr_warn("ringbuf: ring buffer size (%u) is too big\n", info.max_entries);
|
||||
return -E2BIG;
|
||||
}
|
||||
tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size);
|
||||
if (tmp == MAP_FAILED) {
|
||||
err = -errno;
|
||||
ringbuf_unmap_ring(rb, r);
|
||||
|
||||
@@ -849,8 +849,6 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
|
||||
goto out_mmap_tx;
|
||||
}
|
||||
|
||||
ctx->prog_fd = -1;
|
||||
|
||||
if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
|
||||
err = xsk_setup_xdp_prog(xsk);
|
||||
if (err)
|
||||
@@ -931,11 +929,14 @@ void xsk_socket__delete(struct xsk_socket *xsk)
|
||||
|
||||
ctx = xsk->ctx;
|
||||
umem = ctx->umem;
|
||||
if (ctx->prog_fd != -1) {
|
||||
|
||||
if (ctx->refcount == 1) {
|
||||
xsk_delete_bpf_maps(xsk);
|
||||
close(ctx->prog_fd);
|
||||
}
|
||||
|
||||
xsk_put_ctx(ctx, true);
|
||||
|
||||
err = xsk_get_mmap_offsets(xsk->fd, &off);
|
||||
if (!err) {
|
||||
if (xsk->rx) {
|
||||
@@ -948,8 +949,6 @@ void xsk_socket__delete(struct xsk_socket *xsk)
|
||||
}
|
||||
}
|
||||
|
||||
xsk_put_ctx(ctx, true);
|
||||
|
||||
umem->refcount--;
|
||||
/* Do not close an fd that also has an associated umem connected
|
||||
* to it.
|
||||
|
||||
@@ -571,7 +571,6 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
const struct perf_cpu_map *cpus = evlist->cpus;
|
||||
const struct perf_thread_map *threads = evlist->threads;
|
||||
|
||||
if (!ops || !ops->get || !ops->mmap)
|
||||
return -EINVAL;
|
||||
@@ -583,7 +582,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
|
||||
perf_evlist__for_each_entry(evlist, evsel) {
|
||||
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
|
||||
evsel->sample_id == NULL &&
|
||||
perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
|
||||
perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
||||
@@ -315,13 +315,15 @@ they mean, and suggestions for how to fix them.
|
||||
function tracing inserts additional calls, which is not obvious from the
|
||||
sources).
|
||||
|
||||
10. file.o: warning: func()+0x5c: alternative modifies stack
|
||||
10. file.o: warning: func()+0x5c: stack layout conflict in alternatives
|
||||
|
||||
This means that an alternative includes instructions that modify the
|
||||
stack. The problem is that there is only one ORC unwind table, this means
|
||||
that the ORC unwind entries must be valid for each of the alternatives.
|
||||
The easiest way to enforce this is to ensure alternatives do not contain
|
||||
any ORC entries, which in turn implies the above constraint.
|
||||
This means that in the use of the alternative() or ALTERNATIVE()
|
||||
macro, the code paths have conflicting modifications to the stack.
|
||||
The problem is that there is only one ORC unwind table, which means
|
||||
that the ORC unwind entries must be consistent for all possible
|
||||
instruction boundaries regardless of which code has been patched.
|
||||
This limitation can be overcome by massaging the alternatives with
|
||||
NOPs to shift the stack changes around so they no longer conflict.
|
||||
|
||||
11. file.o: warning: unannotated intra-function call
|
||||
|
||||
|
||||
@@ -46,10 +46,6 @@ ifeq ($(SRCARCH),x86)
|
||||
SUBCMD_ORC := y
|
||||
endif
|
||||
|
||||
ifeq ($(SUBCMD_ORC),y)
|
||||
CFLAGS += -DINSN_USE_ORC
|
||||
endif
|
||||
|
||||
export SUBCMD_CHECK SUBCMD_ORC
|
||||
export srctree OUTPUT CFLAGS SRCARCH AWK
|
||||
include $(srctree)/tools/build/Makefile.include
|
||||
|
||||
@@ -11,10 +11,6 @@
|
||||
#include "objtool.h"
|
||||
#include "cfi.h"
|
||||
|
||||
#ifdef INSN_USE_ORC
|
||||
#include <asm/orc_types.h>
|
||||
#endif
|
||||
|
||||
enum insn_type {
|
||||
INSN_JUMP_CONDITIONAL,
|
||||
INSN_JUMP_UNCONDITIONAL,
|
||||
@@ -30,6 +26,7 @@ enum insn_type {
|
||||
INSN_CLAC,
|
||||
INSN_STD,
|
||||
INSN_CLD,
|
||||
INSN_TRAP,
|
||||
INSN_OTHER,
|
||||
};
|
||||
|
||||
@@ -87,7 +84,13 @@ unsigned long arch_jump_destination(struct instruction *insn);
|
||||
unsigned long arch_dest_reloc_offset(int addend);
|
||||
|
||||
const char *arch_nop_insn(int len);
|
||||
const char *arch_ret_insn(int len);
|
||||
|
||||
int arch_decode_hint_reg(struct instruction *insn, u8 sp_reg);
|
||||
int arch_decode_hint_reg(u8 sp_reg, int *base);
|
||||
|
||||
bool arch_is_retpoline(struct symbol *sym);
|
||||
bool arch_is_rethunk(struct symbol *sym);
|
||||
|
||||
int arch_rewrite_retpolines(struct objtool_file *file);
|
||||
|
||||
#endif /* _ARCH_H */
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
#include "../../arch.h"
|
||||
#include "../../warn.h"
|
||||
#include <asm/orc_types.h>
|
||||
#include "arch_elf.h"
|
||||
|
||||
static unsigned char op_to_cfi_reg[][2] = {
|
||||
{CFI_AX, CFI_R8},
|
||||
@@ -455,6 +456,11 @@ int arch_decode_instruction(const struct elf *elf, const struct section *sec,
|
||||
|
||||
break;
|
||||
|
||||
case 0xcc:
|
||||
/* int3 */
|
||||
*type = INSN_TRAP;
|
||||
break;
|
||||
|
||||
case 0xe3:
|
||||
/* jecxz/jrcxz */
|
||||
*type = INSN_JUMP_CONDITIONAL;
|
||||
@@ -563,8 +569,8 @@ void arch_initial_func_cfi_state(struct cfi_init_state *state)
|
||||
state->cfa.offset = 8;
|
||||
|
||||
/* initial RA (return address) */
|
||||
state->regs[16].base = CFI_CFA;
|
||||
state->regs[16].offset = -8;
|
||||
state->regs[CFI_RA].base = CFI_CFA;
|
||||
state->regs[CFI_RA].offset = -8;
|
||||
}
|
||||
|
||||
const char *arch_nop_insn(int len)
|
||||
@@ -585,34 +591,52 @@ const char *arch_nop_insn(int len)
|
||||
return nops[len-1];
|
||||
}
|
||||
|
||||
int arch_decode_hint_reg(struct instruction *insn, u8 sp_reg)
|
||||
{
|
||||
struct cfi_reg *cfa = &insn->cfi.cfa;
|
||||
#define BYTE_RET 0xC3
|
||||
|
||||
const char *arch_ret_insn(int len)
|
||||
{
|
||||
static const char ret[5][5] = {
|
||||
{ BYTE_RET },
|
||||
{ BYTE_RET, 0xcc },
|
||||
{ BYTE_RET, 0xcc, 0x90 },
|
||||
{ BYTE_RET, 0xcc, 0x66, 0x90 },
|
||||
{ BYTE_RET, 0xcc, 0x0f, 0x1f, 0x00 },
|
||||
};
|
||||
|
||||
if (len < 1 || len > 5) {
|
||||
WARN("invalid RET size: %d\n", len);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return ret[len-1];
|
||||
}
|
||||
|
||||
int arch_decode_hint_reg(u8 sp_reg, int *base)
|
||||
{
|
||||
switch (sp_reg) {
|
||||
case ORC_REG_UNDEFINED:
|
||||
cfa->base = CFI_UNDEFINED;
|
||||
*base = CFI_UNDEFINED;
|
||||
break;
|
||||
case ORC_REG_SP:
|
||||
cfa->base = CFI_SP;
|
||||
*base = CFI_SP;
|
||||
break;
|
||||
case ORC_REG_BP:
|
||||
cfa->base = CFI_BP;
|
||||
*base = CFI_BP;
|
||||
break;
|
||||
case ORC_REG_SP_INDIRECT:
|
||||
cfa->base = CFI_SP_INDIRECT;
|
||||
*base = CFI_SP_INDIRECT;
|
||||
break;
|
||||
case ORC_REG_R10:
|
||||
cfa->base = CFI_R10;
|
||||
*base = CFI_R10;
|
||||
break;
|
||||
case ORC_REG_R13:
|
||||
cfa->base = CFI_R13;
|
||||
*base = CFI_R13;
|
||||
break;
|
||||
case ORC_REG_DI:
|
||||
cfa->base = CFI_DI;
|
||||
*base = CFI_DI;
|
||||
break;
|
||||
case ORC_REG_DX:
|
||||
cfa->base = CFI_DX;
|
||||
*base = CFI_DX;
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
@@ -620,3 +644,13 @@ int arch_decode_hint_reg(struct instruction *insn, u8 sp_reg)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool arch_is_retpoline(struct symbol *sym)
|
||||
{
|
||||
return !strncmp(sym->name, "__x86_indirect_", 15);
|
||||
}
|
||||
|
||||
bool arch_is_rethunk(struct symbol *sym)
|
||||
{
|
||||
return !strcmp(sym->name, "__x86_return_thunk");
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
#define JUMP_ORIG_OFFSET 0
|
||||
#define JUMP_NEW_OFFSET 4
|
||||
|
||||
#define ALT_ENTRY_SIZE 13
|
||||
#define ALT_ENTRY_SIZE 12
|
||||
#define ALT_ORIG_OFFSET 0
|
||||
#define ALT_NEW_OFFSET 4
|
||||
#define ALT_FEATURE_OFFSET 8
|
||||
|
||||
@@ -18,7 +18,8 @@
|
||||
#include "builtin.h"
|
||||
#include "objtool.h"
|
||||
|
||||
bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess, stats, validate_dup, vmlinux, mcount, noinstr;
|
||||
bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess, stats,
|
||||
validate_dup, vmlinux, mcount, noinstr, sls, unret, rethunk;
|
||||
|
||||
static const char * const check_usage[] = {
|
||||
"objtool check [<options>] file.o",
|
||||
@@ -29,6 +30,8 @@ const struct option check_options[] = {
|
||||
OPT_BOOLEAN('f', "no-fp", &no_fp, "Skip frame pointer validation"),
|
||||
OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"),
|
||||
OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"),
|
||||
OPT_BOOLEAN(0, "rethunk", &rethunk, "validate and annotate rethunk usage"),
|
||||
OPT_BOOLEAN(0, "unret", &unret, "validate entry unret placement"),
|
||||
OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"),
|
||||
OPT_BOOLEAN('b', "backtrace", &backtrace, "unwind on error"),
|
||||
OPT_BOOLEAN('a', "uaccess", &uaccess, "enable uaccess checking"),
|
||||
@@ -37,6 +40,7 @@ const struct option check_options[] = {
|
||||
OPT_BOOLEAN('n', "noinstr", &noinstr, "noinstr validation for vmlinux.o"),
|
||||
OPT_BOOLEAN('l', "vmlinux", &vmlinux, "vmlinux.o validation"),
|
||||
OPT_BOOLEAN('M', "mcount", &mcount, "generate __mcount_loc"),
|
||||
OPT_BOOLEAN('S', "sls", &sls, "validate straight-line-speculation"),
|
||||
OPT_END(),
|
||||
};
|
||||
|
||||
|
||||
@@ -51,11 +51,7 @@ int cmd_orc(int argc, const char **argv)
|
||||
if (list_empty(&file->insn_list))
|
||||
return 0;
|
||||
|
||||
ret = create_orc(file);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = create_orc_sections(file);
|
||||
ret = orc_create(file);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
||||
@@ -8,7 +8,8 @@
|
||||
#include <subcmd/parse-options.h>
|
||||
|
||||
extern const struct option check_options[];
|
||||
extern bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess, stats, validate_dup, vmlinux, mcount, noinstr;
|
||||
extern bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess, stats,
|
||||
validate_dup, vmlinux, mcount, noinstr, sls, unret, rethunk;
|
||||
|
||||
extern int cmd_check(int argc, const char **argv);
|
||||
extern int cmd_orc(int argc, const char **argv);
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
#define _OBJTOOL_CFI_H
|
||||
|
||||
#include "cfi_regs.h"
|
||||
#include <linux/list.h>
|
||||
|
||||
#define CFI_UNDEFINED -1
|
||||
#define CFI_CFA -2
|
||||
@@ -24,6 +25,7 @@ struct cfi_init_state {
|
||||
};
|
||||
|
||||
struct cfi_state {
|
||||
struct hlist_node hash; /* must be first, cficmp() */
|
||||
struct cfi_reg regs[CFI_NUM_REGS];
|
||||
struct cfi_reg vals[CFI_NUM_REGS];
|
||||
struct cfi_reg cfa;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -19,11 +19,28 @@ struct insn_state {
|
||||
s8 instr;
|
||||
};
|
||||
|
||||
struct alt_group {
|
||||
/*
|
||||
* Pointer from a replacement group to the original group. NULL if it
|
||||
* *is* the original group.
|
||||
*/
|
||||
struct alt_group *orig_group;
|
||||
|
||||
/* First and last instructions in the group */
|
||||
struct instruction *first_insn, *last_insn;
|
||||
|
||||
/*
|
||||
* Byte-offset-addressed len-sized array of pointers to CFI structs.
|
||||
* This is shared with the other alt_groups in the same alternative.
|
||||
*/
|
||||
struct cfi_state **cfi;
|
||||
};
|
||||
|
||||
struct instruction {
|
||||
struct list_head list;
|
||||
struct hlist_node hash;
|
||||
struct list_head static_call_node;
|
||||
struct list_head mcount_loc_node;
|
||||
struct list_head call_node;
|
||||
struct section *sec;
|
||||
unsigned long offset;
|
||||
unsigned int len;
|
||||
@@ -31,24 +48,28 @@ struct instruction {
|
||||
unsigned long immediate;
|
||||
bool dead_end, ignore, ignore_alts;
|
||||
bool hint;
|
||||
bool save, restore;
|
||||
bool retpoline_safe;
|
||||
bool entry;
|
||||
s8 instr;
|
||||
u8 visited;
|
||||
u8 ret_offset;
|
||||
int alt_group;
|
||||
struct alt_group *alt_group;
|
||||
struct symbol *call_dest;
|
||||
struct instruction *jump_dest;
|
||||
struct instruction *first_jump_src;
|
||||
struct reloc *jump_table;
|
||||
struct reloc *reloc;
|
||||
struct list_head alts;
|
||||
struct symbol *func;
|
||||
struct list_head stack_ops;
|
||||
struct cfi_state cfi;
|
||||
#ifdef INSN_USE_ORC
|
||||
struct orc_entry orc;
|
||||
#endif
|
||||
struct cfi_state *cfi;
|
||||
};
|
||||
|
||||
#define VISITED_BRANCH 0x01
|
||||
#define VISITED_BRANCH_UACCESS 0x02
|
||||
#define VISITED_BRANCH_MASK 0x03
|
||||
#define VISITED_ENTRY 0x04
|
||||
|
||||
static inline bool is_static_jump(struct instruction *insn)
|
||||
{
|
||||
return insn->type == INSN_JUMP_CONDITIONAL ||
|
||||
|
||||
@@ -262,32 +262,6 @@ struct reloc *find_reloc_by_dest(const struct elf *elf, struct section *sec, uns
|
||||
return find_reloc_by_dest_range(elf, sec, offset, 1);
|
||||
}
|
||||
|
||||
void insn_to_reloc_sym_addend(struct section *sec, unsigned long offset,
|
||||
struct reloc *reloc)
|
||||
{
|
||||
if (sec->sym) {
|
||||
reloc->sym = sec->sym;
|
||||
reloc->addend = offset;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* The Clang assembler strips section symbols, so we have to reference
|
||||
* the function symbol instead:
|
||||
*/
|
||||
reloc->sym = find_symbol_containing(sec, offset);
|
||||
if (!reloc->sym) {
|
||||
/*
|
||||
* Hack alert. This happens when we need to reference the NOP
|
||||
* pad insn immediately after the function.
|
||||
*/
|
||||
reloc->sym = find_symbol_containing(sec, offset - 1);
|
||||
}
|
||||
|
||||
if (reloc->sym)
|
||||
reloc->addend = offset - reloc->sym->offset;
|
||||
}
|
||||
|
||||
static int read_sections(struct elf *elf)
|
||||
{
|
||||
Elf_Scn *s = NULL;
|
||||
@@ -367,12 +341,41 @@ static int read_sections(struct elf *elf)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void elf_add_symbol(struct elf *elf, struct symbol *sym)
|
||||
{
|
||||
struct list_head *entry;
|
||||
struct rb_node *pnode;
|
||||
|
||||
sym->alias = sym;
|
||||
|
||||
sym->type = GELF_ST_TYPE(sym->sym.st_info);
|
||||
sym->bind = GELF_ST_BIND(sym->sym.st_info);
|
||||
|
||||
sym->offset = sym->sym.st_value;
|
||||
sym->len = sym->sym.st_size;
|
||||
|
||||
rb_add(&sym->sec->symbol_tree, &sym->node, symbol_to_offset);
|
||||
pnode = rb_prev(&sym->node);
|
||||
if (pnode)
|
||||
entry = &rb_entry(pnode, struct symbol, node)->list;
|
||||
else
|
||||
entry = &sym->sec->symbol_list;
|
||||
list_add(&sym->list, entry);
|
||||
elf_hash_add(elf->symbol_hash, &sym->hash, sym->idx);
|
||||
elf_hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name));
|
||||
|
||||
/*
|
||||
* Don't store empty STT_NOTYPE symbols in the rbtree. They
|
||||
* can exist within a function, confusing the sorting.
|
||||
*/
|
||||
if (!sym->len)
|
||||
rb_erase(&sym->node, &sym->sec->symbol_tree);
|
||||
}
|
||||
|
||||
static int read_symbols(struct elf *elf)
|
||||
{
|
||||
struct section *symtab, *symtab_shndx, *sec;
|
||||
struct symbol *sym, *pfunc;
|
||||
struct list_head *entry;
|
||||
struct rb_node *pnode;
|
||||
int symbols_nr, i;
|
||||
char *coldstr;
|
||||
Elf_Data *shndx_data = NULL;
|
||||
@@ -400,7 +403,6 @@ static int read_symbols(struct elf *elf)
|
||||
return -1;
|
||||
}
|
||||
memset(sym, 0, sizeof(*sym));
|
||||
sym->alias = sym;
|
||||
|
||||
sym->idx = i;
|
||||
|
||||
@@ -417,9 +419,6 @@ static int read_symbols(struct elf *elf)
|
||||
goto err;
|
||||
}
|
||||
|
||||
sym->type = GELF_ST_TYPE(sym->sym.st_info);
|
||||
sym->bind = GELF_ST_BIND(sym->sym.st_info);
|
||||
|
||||
if ((sym->sym.st_shndx > SHN_UNDEF &&
|
||||
sym->sym.st_shndx < SHN_LORESERVE) ||
|
||||
(shndx_data && sym->sym.st_shndx == SHN_XINDEX)) {
|
||||
@@ -432,32 +431,14 @@ static int read_symbols(struct elf *elf)
|
||||
sym->name);
|
||||
goto err;
|
||||
}
|
||||
if (sym->type == STT_SECTION) {
|
||||
if (GELF_ST_TYPE(sym->sym.st_info) == STT_SECTION) {
|
||||
sym->name = sym->sec->name;
|
||||
sym->sec->sym = sym;
|
||||
}
|
||||
} else
|
||||
sym->sec = find_section_by_index(elf, 0);
|
||||
|
||||
sym->offset = sym->sym.st_value;
|
||||
sym->len = sym->sym.st_size;
|
||||
|
||||
rb_add(&sym->sec->symbol_tree, &sym->node, symbol_to_offset);
|
||||
pnode = rb_prev(&sym->node);
|
||||
if (pnode)
|
||||
entry = &rb_entry(pnode, struct symbol, node)->list;
|
||||
else
|
||||
entry = &sym->sec->symbol_list;
|
||||
list_add(&sym->list, entry);
|
||||
elf_hash_add(elf->symbol_hash, &sym->hash, sym->idx);
|
||||
elf_hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name));
|
||||
|
||||
/*
|
||||
* Don't store empty STT_NOTYPE symbols in the rbtree. They
|
||||
* can exist within a function, confusing the sorting.
|
||||
*/
|
||||
if (!sym->len)
|
||||
rb_erase(&sym->node, &sym->sec->symbol_tree);
|
||||
elf_add_symbol(elf, sym);
|
||||
}
|
||||
|
||||
if (stats)
|
||||
@@ -524,12 +505,280 @@ err:
|
||||
return -1;
|
||||
}
|
||||
|
||||
void elf_add_reloc(struct elf *elf, struct reloc *reloc)
|
||||
{
|
||||
struct section *sec = reloc->sec;
|
||||
static struct section *elf_create_reloc_section(struct elf *elf,
|
||||
struct section *base,
|
||||
int reltype);
|
||||
|
||||
list_add_tail(&reloc->list, &sec->reloc_list);
|
||||
int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
|
||||
unsigned int type, struct symbol *sym, s64 addend)
|
||||
{
|
||||
struct reloc *reloc;
|
||||
|
||||
if (!sec->reloc && !elf_create_reloc_section(elf, sec, SHT_RELA))
|
||||
return -1;
|
||||
|
||||
reloc = malloc(sizeof(*reloc));
|
||||
if (!reloc) {
|
||||
perror("malloc");
|
||||
return -1;
|
||||
}
|
||||
memset(reloc, 0, sizeof(*reloc));
|
||||
|
||||
reloc->sec = sec->reloc;
|
||||
reloc->offset = offset;
|
||||
reloc->type = type;
|
||||
reloc->sym = sym;
|
||||
reloc->addend = addend;
|
||||
|
||||
list_add_tail(&reloc->list, &sec->reloc->reloc_list);
|
||||
elf_hash_add(elf->reloc_hash, &reloc->hash, reloc_hash(reloc));
|
||||
|
||||
sec->reloc->changed = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that any reloc section containing references to @sym is marked
|
||||
* changed such that it will get re-generated in elf_rebuild_reloc_sections()
|
||||
* with the new symbol index.
|
||||
*/
|
||||
static void elf_dirty_reloc_sym(struct elf *elf, struct symbol *sym)
|
||||
{
|
||||
struct section *sec;
|
||||
|
||||
list_for_each_entry(sec, &elf->sections, list) {
|
||||
struct reloc *reloc;
|
||||
|
||||
if (sec->changed)
|
||||
continue;
|
||||
|
||||
list_for_each_entry(reloc, &sec->reloc_list, list) {
|
||||
if (reloc->sym == sym) {
|
||||
sec->changed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The libelf API is terrible; gelf_update_sym*() takes a data block relative
|
||||
* index value, *NOT* the symbol index. As such, iterate the data blocks and
|
||||
* adjust index until it fits.
|
||||
*
|
||||
* If no data block is found, allow adding a new data block provided the index
|
||||
* is only one past the end.
|
||||
*/
|
||||
static int elf_update_symbol(struct elf *elf, struct section *symtab,
|
||||
struct section *symtab_shndx, struct symbol *sym)
|
||||
{
|
||||
Elf32_Word shndx = sym->sec ? sym->sec->idx : SHN_UNDEF;
|
||||
Elf_Data *symtab_data = NULL, *shndx_data = NULL;
|
||||
Elf64_Xword entsize = symtab->sh.sh_entsize;
|
||||
int max_idx, idx = sym->idx;
|
||||
Elf_Scn *s, *t = NULL;
|
||||
bool is_special_shndx = sym->sym.st_shndx >= SHN_LORESERVE &&
|
||||
sym->sym.st_shndx != SHN_XINDEX;
|
||||
|
||||
if (is_special_shndx)
|
||||
shndx = sym->sym.st_shndx;
|
||||
|
||||
s = elf_getscn(elf->elf, symtab->idx);
|
||||
if (!s) {
|
||||
WARN_ELF("elf_getscn");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (symtab_shndx) {
|
||||
t = elf_getscn(elf->elf, symtab_shndx->idx);
|
||||
if (!t) {
|
||||
WARN_ELF("elf_getscn");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
/* get next data descriptor for the relevant sections */
|
||||
symtab_data = elf_getdata(s, symtab_data);
|
||||
if (t)
|
||||
shndx_data = elf_getdata(t, shndx_data);
|
||||
|
||||
/* end-of-list */
|
||||
if (!symtab_data) {
|
||||
void *buf;
|
||||
|
||||
if (idx) {
|
||||
/* we don't do holes in symbol tables */
|
||||
WARN("index out of range");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* if @idx == 0, it's the next contiguous entry, create it */
|
||||
symtab_data = elf_newdata(s);
|
||||
if (t)
|
||||
shndx_data = elf_newdata(t);
|
||||
|
||||
buf = calloc(1, entsize);
|
||||
if (!buf) {
|
||||
WARN("malloc");
|
||||
return -1;
|
||||
}
|
||||
|
||||
symtab_data->d_buf = buf;
|
||||
symtab_data->d_size = entsize;
|
||||
symtab_data->d_align = 1;
|
||||
symtab_data->d_type = ELF_T_SYM;
|
||||
|
||||
symtab->sh.sh_size += entsize;
|
||||
symtab->changed = true;
|
||||
|
||||
if (t) {
|
||||
shndx_data->d_buf = &sym->sec->idx;
|
||||
shndx_data->d_size = sizeof(Elf32_Word);
|
||||
shndx_data->d_align = sizeof(Elf32_Word);
|
||||
shndx_data->d_type = ELF_T_WORD;
|
||||
|
||||
symtab_shndx->sh.sh_size += sizeof(Elf32_Word);
|
||||
symtab_shndx->changed = true;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
/* empty blocks should not happen */
|
||||
if (!symtab_data->d_size) {
|
||||
WARN("zero size data");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* is this the right block? */
|
||||
max_idx = symtab_data->d_size / entsize;
|
||||
if (idx < max_idx)
|
||||
break;
|
||||
|
||||
/* adjust index and try again */
|
||||
idx -= max_idx;
|
||||
}
|
||||
|
||||
/* something went side-ways */
|
||||
if (idx < 0) {
|
||||
WARN("negative index");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* setup extended section index magic and write the symbol */
|
||||
if ((shndx >= SHN_UNDEF && shndx < SHN_LORESERVE) || is_special_shndx) {
|
||||
sym->sym.st_shndx = shndx;
|
||||
if (!shndx_data)
|
||||
shndx = 0;
|
||||
} else {
|
||||
sym->sym.st_shndx = SHN_XINDEX;
|
||||
if (!shndx_data) {
|
||||
WARN("no .symtab_shndx");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (!gelf_update_symshndx(symtab_data, shndx_data, idx, &sym->sym, shndx)) {
|
||||
WARN_ELF("gelf_update_symshndx");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct symbol *
|
||||
elf_create_section_symbol(struct elf *elf, struct section *sec)
|
||||
{
|
||||
struct section *symtab, *symtab_shndx;
|
||||
Elf32_Word first_non_local, new_idx;
|
||||
struct symbol *sym, *old;
|
||||
|
||||
symtab = find_section_by_name(elf, ".symtab");
|
||||
if (symtab) {
|
||||
symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
|
||||
} else {
|
||||
WARN("no .symtab");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
sym = calloc(1, sizeof(*sym));
|
||||
if (!sym) {
|
||||
perror("malloc");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
sym->name = sec->name;
|
||||
sym->sec = sec;
|
||||
|
||||
// st_name 0
|
||||
sym->sym.st_info = GELF_ST_INFO(STB_LOCAL, STT_SECTION);
|
||||
// st_other 0
|
||||
// st_value 0
|
||||
// st_size 0
|
||||
|
||||
/*
|
||||
* Move the first global symbol, as per sh_info, into a new, higher
|
||||
* symbol index. This fees up a spot for a new local symbol.
|
||||
*/
|
||||
first_non_local = symtab->sh.sh_info;
|
||||
new_idx = symtab->sh.sh_size / symtab->sh.sh_entsize;
|
||||
old = find_symbol_by_index(elf, first_non_local);
|
||||
if (old) {
|
||||
old->idx = new_idx;
|
||||
|
||||
hlist_del(&old->hash);
|
||||
elf_hash_add(elf->symbol_hash, &old->hash, old->idx);
|
||||
|
||||
elf_dirty_reloc_sym(elf, old);
|
||||
|
||||
if (elf_update_symbol(elf, symtab, symtab_shndx, old)) {
|
||||
WARN("elf_update_symbol move");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
new_idx = first_non_local;
|
||||
}
|
||||
|
||||
sym->idx = new_idx;
|
||||
if (elf_update_symbol(elf, symtab, symtab_shndx, sym)) {
|
||||
WARN("elf_update_symbol");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Either way, we added a LOCAL symbol.
|
||||
*/
|
||||
symtab->sh.sh_info += 1;
|
||||
|
||||
elf_add_symbol(elf, sym);
|
||||
|
||||
return sym;
|
||||
}
|
||||
|
||||
int elf_add_reloc_to_insn(struct elf *elf, struct section *sec,
|
||||
unsigned long offset, unsigned int type,
|
||||
struct section *insn_sec, unsigned long insn_off)
|
||||
{
|
||||
struct symbol *sym = insn_sec->sym;
|
||||
int addend = insn_off;
|
||||
|
||||
if (!sym) {
|
||||
/*
|
||||
* Due to how weak functions work, we must use section based
|
||||
* relocations. Symbol based relocations would result in the
|
||||
* weak and non-weak function annotations being overlaid on the
|
||||
* non-weak function after linking.
|
||||
*/
|
||||
sym = elf_create_section_symbol(elf, insn_sec);
|
||||
if (!sym)
|
||||
return -1;
|
||||
|
||||
insn_sec->sym = sym;
|
||||
}
|
||||
|
||||
return elf_add_reloc(elf, sec, offset, type, sym, addend);
|
||||
}
|
||||
|
||||
static int read_rel_reloc(struct section *sec, int i, struct reloc *reloc, unsigned int *symndx)
|
||||
@@ -609,7 +858,9 @@ static int read_relocs(struct elf *elf)
|
||||
return -1;
|
||||
}
|
||||
|
||||
elf_add_reloc(elf, reloc);
|
||||
list_add_tail(&reloc->list, &sec->reloc_list);
|
||||
elf_hash_add(elf->reloc_hash, &reloc->hash, reloc_hash(reloc));
|
||||
|
||||
nr_reloc++;
|
||||
}
|
||||
max_reloc = max(max_reloc, nr_reloc);
|
||||
@@ -687,13 +938,49 @@ err:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
|
||||
{
|
||||
Elf_Data *data;
|
||||
Elf_Scn *s;
|
||||
int len;
|
||||
|
||||
if (!strtab)
|
||||
strtab = find_section_by_name(elf, ".strtab");
|
||||
if (!strtab) {
|
||||
WARN("can't find .strtab section");
|
||||
return -1;
|
||||
}
|
||||
|
||||
s = elf_getscn(elf->elf, strtab->idx);
|
||||
if (!s) {
|
||||
WARN_ELF("elf_getscn");
|
||||
return -1;
|
||||
}
|
||||
|
||||
data = elf_newdata(s);
|
||||
if (!data) {
|
||||
WARN_ELF("elf_newdata");
|
||||
return -1;
|
||||
}
|
||||
|
||||
data->d_buf = str;
|
||||
data->d_size = strlen(str) + 1;
|
||||
data->d_align = 1;
|
||||
data->d_type = ELF_T_SYM;
|
||||
|
||||
len = strtab->len;
|
||||
strtab->len += data->d_size;
|
||||
strtab->changed = true;
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
struct section *elf_create_section(struct elf *elf, const char *name,
|
||||
unsigned int sh_flags, size_t entsize, int nr)
|
||||
{
|
||||
struct section *sec, *shstrtab;
|
||||
size_t size = entsize * nr;
|
||||
Elf_Scn *s;
|
||||
Elf_Data *data;
|
||||
|
||||
sec = malloc(sizeof(*sec));
|
||||
if (!sec) {
|
||||
@@ -750,7 +1037,6 @@ struct section *elf_create_section(struct elf *elf, const char *name,
|
||||
sec->sh.sh_addralign = 1;
|
||||
sec->sh.sh_flags = SHF_ALLOC | sh_flags;
|
||||
|
||||
|
||||
/* Add section name to .shstrtab (or .strtab for Clang) */
|
||||
shstrtab = find_section_by_name(elf, ".shstrtab");
|
||||
if (!shstrtab)
|
||||
@@ -759,27 +1045,9 @@ struct section *elf_create_section(struct elf *elf, const char *name,
|
||||
WARN("can't find .shstrtab or .strtab section");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
s = elf_getscn(elf->elf, shstrtab->idx);
|
||||
if (!s) {
|
||||
WARN_ELF("elf_getscn");
|
||||
sec->sh.sh_name = elf_add_string(elf, shstrtab, sec->name);
|
||||
if (sec->sh.sh_name == -1)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
data = elf_newdata(s);
|
||||
if (!data) {
|
||||
WARN_ELF("elf_newdata");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
data->d_buf = sec->name;
|
||||
data->d_size = strlen(name) + 1;
|
||||
data->d_align = 1;
|
||||
|
||||
sec->sh.sh_name = shstrtab->len;
|
||||
|
||||
shstrtab->len += strlen(name) + 1;
|
||||
shstrtab->changed = true;
|
||||
|
||||
list_add_tail(&sec->list, &elf->sections);
|
||||
elf_hash_add(elf->section_hash, &sec->hash, sec->idx);
|
||||
@@ -850,7 +1118,7 @@ static struct section *elf_create_rela_reloc_section(struct elf *elf, struct sec
|
||||
return sec;
|
||||
}
|
||||
|
||||
struct section *elf_create_reloc_section(struct elf *elf,
|
||||
static struct section *elf_create_reloc_section(struct elf *elf,
|
||||
struct section *base,
|
||||
int reltype)
|
||||
{
|
||||
@@ -920,14 +1188,11 @@ static int elf_rebuild_rela_reloc_section(struct section *sec, int nr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int elf_rebuild_reloc_section(struct elf *elf, struct section *sec)
|
||||
static int elf_rebuild_reloc_section(struct elf *elf, struct section *sec)
|
||||
{
|
||||
struct reloc *reloc;
|
||||
int nr;
|
||||
|
||||
sec->changed = true;
|
||||
elf->changed = true;
|
||||
|
||||
nr = 0;
|
||||
list_for_each_entry(reloc, &sec->reloc_list, list)
|
||||
nr++;
|
||||
@@ -991,9 +1256,15 @@ int elf_write(struct elf *elf)
|
||||
struct section *sec;
|
||||
Elf_Scn *s;
|
||||
|
||||
/* Update section headers for changed sections: */
|
||||
/* Update changed relocation sections and section headers: */
|
||||
list_for_each_entry(sec, &elf->sections, list) {
|
||||
if (sec->changed) {
|
||||
if (sec->base &&
|
||||
elf_rebuild_reloc_section(elf, sec)) {
|
||||
WARN("elf_rebuild_reloc_section");
|
||||
return -1;
|
||||
}
|
||||
|
||||
s = elf_getscn(elf->elf, sec->idx);
|
||||
if (!s) {
|
||||
WARN_ELF("elf_getscn");
|
||||
@@ -1005,6 +1276,7 @@ int elf_write(struct elf *elf)
|
||||
}
|
||||
|
||||
sec->changed = false;
|
||||
elf->changed = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -55,8 +55,12 @@ struct symbol {
|
||||
unsigned long offset;
|
||||
unsigned int len;
|
||||
struct symbol *pfunc, *cfunc, *alias;
|
||||
bool uaccess_safe;
|
||||
bool static_call_tramp;
|
||||
u8 uaccess_safe : 1;
|
||||
u8 static_call_tramp : 1;
|
||||
u8 retpoline_thunk : 1;
|
||||
u8 return_thunk : 1;
|
||||
u8 fentry : 1;
|
||||
u8 kcov : 1;
|
||||
};
|
||||
|
||||
struct reloc {
|
||||
@@ -70,7 +74,7 @@ struct reloc {
|
||||
struct symbol *sym;
|
||||
unsigned long offset;
|
||||
unsigned int type;
|
||||
int addend;
|
||||
s64 addend;
|
||||
int idx;
|
||||
bool jump_table_start;
|
||||
};
|
||||
@@ -122,8 +126,13 @@ static inline u32 reloc_hash(struct reloc *reloc)
|
||||
|
||||
struct elf *elf_open_read(const char *name, int flags);
|
||||
struct section *elf_create_section(struct elf *elf, const char *name, unsigned int sh_flags, size_t entsize, int nr);
|
||||
struct section *elf_create_reloc_section(struct elf *elf, struct section *base, int reltype);
|
||||
void elf_add_reloc(struct elf *elf, struct reloc *reloc);
|
||||
|
||||
int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
|
||||
unsigned int type, struct symbol *sym, s64 addend);
|
||||
int elf_add_reloc_to_insn(struct elf *elf, struct section *sec,
|
||||
unsigned long offset, unsigned int type,
|
||||
struct section *insn_sec, unsigned long insn_off);
|
||||
|
||||
int elf_write_insn(struct elf *elf, struct section *sec,
|
||||
unsigned long offset, unsigned int len,
|
||||
const char *insn);
|
||||
@@ -140,9 +149,6 @@ struct reloc *find_reloc_by_dest(const struct elf *elf, struct section *sec, uns
|
||||
struct reloc *find_reloc_by_dest_range(const struct elf *elf, struct section *sec,
|
||||
unsigned long offset, unsigned int len);
|
||||
struct symbol *find_func_containing(struct section *sec, unsigned long offset);
|
||||
void insn_to_reloc_sym_addend(struct section *sec, unsigned long offset,
|
||||
struct reloc *reloc);
|
||||
int elf_rebuild_reloc_section(struct elf *elf, struct section *sec);
|
||||
|
||||
#define for_each_sec(file, sec) \
|
||||
list_for_each_entry(sec, &file->elf->sections, list)
|
||||
|
||||
@@ -61,6 +61,8 @@ struct objtool_file *objtool_open_read(const char *_objname)
|
||||
|
||||
INIT_LIST_HEAD(&file.insn_list);
|
||||
hash_init(file.insn_hash);
|
||||
INIT_LIST_HEAD(&file.retpoline_call_list);
|
||||
INIT_LIST_HEAD(&file.return_thunk_list);
|
||||
INIT_LIST_HEAD(&file.static_call_list);
|
||||
INIT_LIST_HEAD(&file.mcount_loc_list);
|
||||
file.c_file = !vmlinux && find_section_by_name(file.elf, ".comment");
|
||||
|
||||
@@ -18,6 +18,8 @@ struct objtool_file {
|
||||
struct elf *elf;
|
||||
struct list_head insn_list;
|
||||
DECLARE_HASHTABLE(insn_hash, 20);
|
||||
struct list_head retpoline_call_list;
|
||||
struct list_head return_thunk_list;
|
||||
struct list_head static_call_list;
|
||||
struct list_head mcount_loc_list;
|
||||
bool ignore_unreachables, c_file, hints, rodata;
|
||||
@@ -27,7 +29,6 @@ struct objtool_file *objtool_open_read(const char *_objname);
|
||||
|
||||
int check(struct objtool_file *file);
|
||||
int orc_dump(const char *objname);
|
||||
int create_orc(struct objtool_file *file);
|
||||
int create_orc_sections(struct objtool_file *file);
|
||||
int orc_create(struct objtool_file *file);
|
||||
|
||||
#endif /* _OBJTOOL_H */
|
||||
|
||||
@@ -12,205 +12,231 @@
|
||||
#include "check.h"
|
||||
#include "warn.h"
|
||||
|
||||
int create_orc(struct objtool_file *file)
|
||||
static int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi,
|
||||
struct instruction *insn)
|
||||
{
|
||||
struct instruction *insn;
|
||||
struct cfi_reg *bp = &cfi->regs[CFI_BP];
|
||||
|
||||
for_each_insn(file, insn) {
|
||||
struct orc_entry *orc = &insn->orc;
|
||||
struct cfi_reg *cfa = &insn->cfi.cfa;
|
||||
struct cfi_reg *bp = &insn->cfi.regs[CFI_BP];
|
||||
memset(orc, 0, sizeof(*orc));
|
||||
|
||||
if (!insn->sec->text)
|
||||
continue;
|
||||
|
||||
orc->end = insn->cfi.end;
|
||||
|
||||
if (cfa->base == CFI_UNDEFINED) {
|
||||
orc->sp_reg = ORC_REG_UNDEFINED;
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (cfa->base) {
|
||||
case CFI_SP:
|
||||
orc->sp_reg = ORC_REG_SP;
|
||||
break;
|
||||
case CFI_SP_INDIRECT:
|
||||
orc->sp_reg = ORC_REG_SP_INDIRECT;
|
||||
break;
|
||||
case CFI_BP:
|
||||
orc->sp_reg = ORC_REG_BP;
|
||||
break;
|
||||
case CFI_BP_INDIRECT:
|
||||
orc->sp_reg = ORC_REG_BP_INDIRECT;
|
||||
break;
|
||||
case CFI_R10:
|
||||
orc->sp_reg = ORC_REG_R10;
|
||||
break;
|
||||
case CFI_R13:
|
||||
orc->sp_reg = ORC_REG_R13;
|
||||
break;
|
||||
case CFI_DI:
|
||||
orc->sp_reg = ORC_REG_DI;
|
||||
break;
|
||||
case CFI_DX:
|
||||
orc->sp_reg = ORC_REG_DX;
|
||||
break;
|
||||
default:
|
||||
WARN_FUNC("unknown CFA base reg %d",
|
||||
insn->sec, insn->offset, cfa->base);
|
||||
return -1;
|
||||
}
|
||||
|
||||
switch(bp->base) {
|
||||
case CFI_UNDEFINED:
|
||||
orc->bp_reg = ORC_REG_UNDEFINED;
|
||||
break;
|
||||
case CFI_CFA:
|
||||
orc->bp_reg = ORC_REG_PREV_SP;
|
||||
break;
|
||||
case CFI_BP:
|
||||
orc->bp_reg = ORC_REG_BP;
|
||||
break;
|
||||
default:
|
||||
WARN_FUNC("unknown BP base reg %d",
|
||||
insn->sec, insn->offset, bp->base);
|
||||
return -1;
|
||||
}
|
||||
|
||||
orc->sp_offset = cfa->offset;
|
||||
orc->bp_offset = bp->offset;
|
||||
orc->type = insn->cfi.type;
|
||||
if (!cfi) {
|
||||
orc->end = 0;
|
||||
orc->sp_reg = ORC_REG_UNDEFINED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
orc->end = cfi->end;
|
||||
|
||||
if (cfi->cfa.base == CFI_UNDEFINED) {
|
||||
orc->sp_reg = ORC_REG_UNDEFINED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (cfi->cfa.base) {
|
||||
case CFI_SP:
|
||||
orc->sp_reg = ORC_REG_SP;
|
||||
break;
|
||||
case CFI_SP_INDIRECT:
|
||||
orc->sp_reg = ORC_REG_SP_INDIRECT;
|
||||
break;
|
||||
case CFI_BP:
|
||||
orc->sp_reg = ORC_REG_BP;
|
||||
break;
|
||||
case CFI_BP_INDIRECT:
|
||||
orc->sp_reg = ORC_REG_BP_INDIRECT;
|
||||
break;
|
||||
case CFI_R10:
|
||||
orc->sp_reg = ORC_REG_R10;
|
||||
break;
|
||||
case CFI_R13:
|
||||
orc->sp_reg = ORC_REG_R13;
|
||||
break;
|
||||
case CFI_DI:
|
||||
orc->sp_reg = ORC_REG_DI;
|
||||
break;
|
||||
case CFI_DX:
|
||||
orc->sp_reg = ORC_REG_DX;
|
||||
break;
|
||||
default:
|
||||
WARN_FUNC("unknown CFA base reg %d",
|
||||
insn->sec, insn->offset, cfi->cfa.base);
|
||||
return -1;
|
||||
}
|
||||
|
||||
switch (bp->base) {
|
||||
case CFI_UNDEFINED:
|
||||
orc->bp_reg = ORC_REG_UNDEFINED;
|
||||
break;
|
||||
case CFI_CFA:
|
||||
orc->bp_reg = ORC_REG_PREV_SP;
|
||||
break;
|
||||
case CFI_BP:
|
||||
orc->bp_reg = ORC_REG_BP;
|
||||
break;
|
||||
default:
|
||||
WARN_FUNC("unknown BP base reg %d",
|
||||
insn->sec, insn->offset, bp->base);
|
||||
return -1;
|
||||
}
|
||||
|
||||
orc->sp_offset = cfi->cfa.offset;
|
||||
orc->bp_offset = bp->offset;
|
||||
orc->type = cfi->type;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int create_orc_entry(struct elf *elf, struct section *u_sec, struct section *ip_relocsec,
|
||||
unsigned int idx, struct section *insn_sec,
|
||||
unsigned long insn_off, struct orc_entry *o)
|
||||
static int write_orc_entry(struct elf *elf, struct section *orc_sec,
|
||||
struct section *ip_sec, unsigned int idx,
|
||||
struct section *insn_sec, unsigned long insn_off,
|
||||
struct orc_entry *o)
|
||||
{
|
||||
struct orc_entry *orc;
|
||||
struct reloc *reloc;
|
||||
|
||||
/* populate ORC data */
|
||||
orc = (struct orc_entry *)u_sec->data->d_buf + idx;
|
||||
orc = (struct orc_entry *)orc_sec->data->d_buf + idx;
|
||||
memcpy(orc, o, sizeof(*orc));
|
||||
|
||||
/* populate reloc for ip */
|
||||
reloc = malloc(sizeof(*reloc));
|
||||
if (!reloc) {
|
||||
perror("malloc");
|
||||
if (elf_add_reloc_to_insn(elf, ip_sec, idx * sizeof(int), R_X86_64_PC32,
|
||||
insn_sec, insn_off))
|
||||
return -1;
|
||||
}
|
||||
memset(reloc, 0, sizeof(*reloc));
|
||||
|
||||
insn_to_reloc_sym_addend(insn_sec, insn_off, reloc);
|
||||
if (!reloc->sym) {
|
||||
WARN("missing symbol for insn at offset 0x%lx",
|
||||
insn_off);
|
||||
return -1;
|
||||
}
|
||||
|
||||
reloc->type = R_X86_64_PC32;
|
||||
reloc->offset = idx * sizeof(int);
|
||||
reloc->sec = ip_relocsec;
|
||||
|
||||
elf_add_reloc(elf, reloc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int create_orc_sections(struct objtool_file *file)
|
||||
{
|
||||
struct instruction *insn, *prev_insn;
|
||||
struct section *sec, *u_sec, *ip_relocsec;
|
||||
unsigned int idx;
|
||||
struct orc_list_entry {
|
||||
struct list_head list;
|
||||
struct orc_entry orc;
|
||||
struct section *insn_sec;
|
||||
unsigned long insn_off;
|
||||
};
|
||||
|
||||
struct orc_entry empty = {
|
||||
.sp_reg = ORC_REG_UNDEFINED,
|
||||
static int orc_list_add(struct list_head *orc_list, struct orc_entry *orc,
|
||||
struct section *sec, unsigned long offset)
|
||||
{
|
||||
struct orc_list_entry *entry = malloc(sizeof(*entry));
|
||||
|
||||
if (!entry) {
|
||||
WARN("malloc failed");
|
||||
return -1;
|
||||
}
|
||||
|
||||
entry->orc = *orc;
|
||||
entry->insn_sec = sec;
|
||||
entry->insn_off = offset;
|
||||
|
||||
list_add_tail(&entry->list, orc_list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long alt_group_len(struct alt_group *alt_group)
|
||||
{
|
||||
return alt_group->last_insn->offset +
|
||||
alt_group->last_insn->len -
|
||||
alt_group->first_insn->offset;
|
||||
}
|
||||
|
||||
int orc_create(struct objtool_file *file)
|
||||
{
|
||||
struct section *sec, *orc_sec;
|
||||
unsigned int nr = 0, idx = 0;
|
||||
struct orc_list_entry *entry;
|
||||
struct list_head orc_list;
|
||||
|
||||
struct orc_entry null = {
|
||||
.sp_reg = ORC_REG_UNDEFINED,
|
||||
.bp_reg = ORC_REG_UNDEFINED,
|
||||
.type = UNWIND_HINT_TYPE_CALL,
|
||||
};
|
||||
|
||||
/* Build a deduplicated list of ORC entries: */
|
||||
INIT_LIST_HEAD(&orc_list);
|
||||
for_each_sec(file, sec) {
|
||||
struct orc_entry orc, prev_orc = {0};
|
||||
struct instruction *insn;
|
||||
bool empty = true;
|
||||
|
||||
if (!sec->text)
|
||||
continue;
|
||||
|
||||
sec_for_each_insn(file, sec, insn) {
|
||||
struct alt_group *alt_group = insn->alt_group;
|
||||
int i;
|
||||
|
||||
if (!alt_group) {
|
||||
if (init_orc_entry(&orc, insn->cfi, insn))
|
||||
return -1;
|
||||
if (!memcmp(&prev_orc, &orc, sizeof(orc)))
|
||||
continue;
|
||||
if (orc_list_add(&orc_list, &orc, sec,
|
||||
insn->offset))
|
||||
return -1;
|
||||
nr++;
|
||||
prev_orc = orc;
|
||||
empty = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Alternatives can have different stack layout
|
||||
* possibilities (but they shouldn't conflict).
|
||||
* Instead of traversing the instructions, use the
|
||||
* alt_group's flattened byte-offset-addressed CFI
|
||||
* array.
|
||||
*/
|
||||
for (i = 0; i < alt_group_len(alt_group); i++) {
|
||||
struct cfi_state *cfi = alt_group->cfi[i];
|
||||
if (!cfi)
|
||||
continue;
|
||||
/* errors are reported on the original insn */
|
||||
if (init_orc_entry(&orc, cfi, insn))
|
||||
return -1;
|
||||
if (!memcmp(&prev_orc, &orc, sizeof(orc)))
|
||||
continue;
|
||||
if (orc_list_add(&orc_list, &orc, insn->sec,
|
||||
insn->offset + i))
|
||||
return -1;
|
||||
nr++;
|
||||
prev_orc = orc;
|
||||
empty = false;
|
||||
}
|
||||
|
||||
/* Skip to the end of the alt_group */
|
||||
insn = alt_group->last_insn;
|
||||
}
|
||||
|
||||
/* Add a section terminator */
|
||||
if (!empty) {
|
||||
orc_list_add(&orc_list, &null, sec, sec->len);
|
||||
nr++;
|
||||
}
|
||||
}
|
||||
if (!nr)
|
||||
return 0;
|
||||
|
||||
/* Create .orc_unwind, .orc_unwind_ip and .rela.orc_unwind_ip sections: */
|
||||
sec = find_section_by_name(file->elf, ".orc_unwind");
|
||||
if (sec) {
|
||||
WARN("file already has .orc_unwind section, skipping");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* count the number of needed orcs */
|
||||
idx = 0;
|
||||
for_each_sec(file, sec) {
|
||||
if (!sec->text)
|
||||
continue;
|
||||
|
||||
prev_insn = NULL;
|
||||
sec_for_each_insn(file, sec, insn) {
|
||||
if (!prev_insn ||
|
||||
memcmp(&insn->orc, &prev_insn->orc,
|
||||
sizeof(struct orc_entry))) {
|
||||
idx++;
|
||||
}
|
||||
prev_insn = insn;
|
||||
}
|
||||
|
||||
/* section terminator */
|
||||
if (prev_insn)
|
||||
idx++;
|
||||
}
|
||||
if (!idx)
|
||||
orc_sec = elf_create_section(file->elf, ".orc_unwind", 0,
|
||||
sizeof(struct orc_entry), nr);
|
||||
if (!orc_sec)
|
||||
return -1;
|
||||
|
||||
|
||||
/* create .orc_unwind_ip and .rela.orc_unwind_ip sections */
|
||||
sec = elf_create_section(file->elf, ".orc_unwind_ip", 0, sizeof(int), idx);
|
||||
sec = elf_create_section(file->elf, ".orc_unwind_ip", 0, sizeof(int), nr);
|
||||
if (!sec)
|
||||
return -1;
|
||||
|
||||
ip_relocsec = elf_create_reloc_section(file->elf, sec, SHT_RELA);
|
||||
if (!ip_relocsec)
|
||||
return -1;
|
||||
|
||||
/* create .orc_unwind section */
|
||||
u_sec = elf_create_section(file->elf, ".orc_unwind", 0,
|
||||
sizeof(struct orc_entry), idx);
|
||||
|
||||
/* populate sections */
|
||||
idx = 0;
|
||||
for_each_sec(file, sec) {
|
||||
if (!sec->text)
|
||||
continue;
|
||||
|
||||
prev_insn = NULL;
|
||||
sec_for_each_insn(file, sec, insn) {
|
||||
if (!prev_insn || memcmp(&insn->orc, &prev_insn->orc,
|
||||
sizeof(struct orc_entry))) {
|
||||
|
||||
if (create_orc_entry(file->elf, u_sec, ip_relocsec, idx,
|
||||
insn->sec, insn->offset,
|
||||
&insn->orc))
|
||||
return -1;
|
||||
|
||||
idx++;
|
||||
}
|
||||
prev_insn = insn;
|
||||
}
|
||||
|
||||
/* section terminator */
|
||||
if (prev_insn) {
|
||||
if (create_orc_entry(file->elf, u_sec, ip_relocsec, idx,
|
||||
prev_insn->sec,
|
||||
prev_insn->offset + prev_insn->len,
|
||||
&empty))
|
||||
return -1;
|
||||
|
||||
idx++;
|
||||
}
|
||||
/* Write ORC entries to sections: */
|
||||
list_for_each_entry(entry, &orc_list, list) {
|
||||
if (write_orc_entry(file->elf, orc_sec, sec, idx++,
|
||||
entry->insn_sec, entry->insn_off,
|
||||
&entry->orc))
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (elf_rebuild_reloc_section(file->elf, ip_relocsec))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -55,6 +55,13 @@ void __weak arch_handle_alternative(unsigned short feature, struct special_alt *
|
||||
{
|
||||
}
|
||||
|
||||
static void reloc_to_sec_off(struct reloc *reloc, struct section **sec,
|
||||
unsigned long *off)
|
||||
{
|
||||
*sec = reloc->sym->sec;
|
||||
*off = reloc->sym->offset + reloc->addend;
|
||||
}
|
||||
|
||||
static int get_alt_entry(struct elf *elf, struct special_entry *entry,
|
||||
struct section *sec, int idx,
|
||||
struct special_alt *alt)
|
||||
@@ -87,14 +94,8 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
|
||||
WARN_FUNC("can't find orig reloc", sec, offset + entry->orig);
|
||||
return -1;
|
||||
}
|
||||
if (orig_reloc->sym->type != STT_SECTION) {
|
||||
WARN_FUNC("don't know how to handle non-section reloc symbol %s",
|
||||
sec, offset + entry->orig, orig_reloc->sym->name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
alt->orig_sec = orig_reloc->sym->sec;
|
||||
alt->orig_off = orig_reloc->addend;
|
||||
reloc_to_sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off);
|
||||
|
||||
if (!entry->group || alt->new_len) {
|
||||
new_reloc = find_reloc_by_dest(elf, sec, offset + entry->new);
|
||||
@@ -104,8 +105,7 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
|
||||
return -1;
|
||||
}
|
||||
|
||||
alt->new_sec = new_reloc->sym->sec;
|
||||
alt->new_off = (unsigned int)new_reloc->addend;
|
||||
reloc_to_sec_off(new_reloc, &alt->new_sec, &alt->new_off);
|
||||
|
||||
/* _ASM_EXTABLE_EX hack */
|
||||
if (alt->new_off >= 0x7ffffff0)
|
||||
@@ -152,7 +152,9 @@ int special_get_alts(struct elf *elf, struct list_head *alts)
|
||||
memset(alt, 0, sizeof(*alt));
|
||||
|
||||
ret = get_alt_entry(elf, entry, sec, idx, alt);
|
||||
if (ret)
|
||||
if (ret > 0)
|
||||
continue;
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
list_add_tail(&alt->list, alts);
|
||||
|
||||
@@ -16,11 +16,14 @@ arch/x86/include/asm/emulate_prefix.h
|
||||
arch/x86/lib/x86-opcode-map.txt
|
||||
arch/x86/tools/gen-insn-attr-x86.awk
|
||||
include/linux/static_call_types.h
|
||||
arch/x86/include/asm/inat.h -I '^#include [\"<]\(asm/\)*inat_types.h[\">]'
|
||||
arch/x86/include/asm/insn.h -I '^#include [\"<]\(asm/\)*inat.h[\">]'
|
||||
arch/x86/lib/inat.c -I '^#include [\"<]\(../include/\)*asm/insn.h[\">]'
|
||||
arch/x86/lib/insn.c -I '^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]' -I '^#include [\"<]\(../include/\)*asm/emulate_prefix.h[\">]'
|
||||
"
|
||||
|
||||
SYNC_CHECK_FILES='
|
||||
arch/x86/include/asm/inat.h
|
||||
arch/x86/include/asm/insn.h
|
||||
arch/x86/lib/inat.c
|
||||
arch/x86/lib/insn.c
|
||||
'
|
||||
fi
|
||||
|
||||
check_2 () {
|
||||
@@ -63,3 +66,9 @@ while read -r file_entry; do
|
||||
done <<EOF
|
||||
$FILES
|
||||
EOF
|
||||
|
||||
if [ "$SRCARCH" = "x86" ]; then
|
||||
for i in $SYNC_CHECK_FILES; do
|
||||
check $i '-I "^.*\/\*.*__ignore_sync_check__.*\*\/.*$"'
|
||||
done
|
||||
fi
|
||||
|
||||
@@ -25,12 +25,7 @@ int __weak orc_dump(const char *_objname)
|
||||
UNSUPPORTED("orc");
|
||||
}
|
||||
|
||||
int __weak create_orc(struct objtool_file *file)
|
||||
{
|
||||
UNSUPPORTED("orc");
|
||||
}
|
||||
|
||||
int __weak create_orc_sections(struct objtool_file *file)
|
||||
int __weak orc_create(struct objtool_file *file)
|
||||
{
|
||||
UNSUPPORTED("orc");
|
||||
}
|
||||
|
||||
@@ -222,18 +222,33 @@ ifdef PARSER_DEBUG
|
||||
endif
|
||||
|
||||
# Try different combinations to accommodate systems that only have
|
||||
# python[2][-config] in weird combinations but always preferring
|
||||
# python2 and python2-config as per pep-0394. If python2 or python
|
||||
# aren't found, then python3 is used.
|
||||
PYTHON_AUTO := python
|
||||
PYTHON_AUTO := $(if $(call get-executable,python3),python3,$(PYTHON_AUTO))
|
||||
PYTHON_AUTO := $(if $(call get-executable,python),python,$(PYTHON_AUTO))
|
||||
PYTHON_AUTO := $(if $(call get-executable,python2),python2,$(PYTHON_AUTO))
|
||||
override PYTHON := $(call get-executable-or-default,PYTHON,$(PYTHON_AUTO))
|
||||
PYTHON_AUTO_CONFIG := \
|
||||
$(if $(call get-executable,$(PYTHON)-config),$(PYTHON)-config,python-config)
|
||||
override PYTHON_CONFIG := \
|
||||
$(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON_AUTO_CONFIG))
|
||||
# python[2][3]-config in weird combinations in the following order of
|
||||
# priority from lowest to highest:
|
||||
# * python3-config
|
||||
# * python-config
|
||||
# * python2-config as per pep-0394.
|
||||
# * $(PYTHON)-config (If PYTHON is user supplied but PYTHON_CONFIG isn't)
|
||||
#
|
||||
PYTHON_AUTO := python-config
|
||||
PYTHON_AUTO := $(if $(call get-executable,python3-config),python3-config,$(PYTHON_AUTO))
|
||||
PYTHON_AUTO := $(if $(call get-executable,python-config),python-config,$(PYTHON_AUTO))
|
||||
PYTHON_AUTO := $(if $(call get-executable,python2-config),python2-config,$(PYTHON_AUTO))
|
||||
|
||||
# If PYTHON is defined but PYTHON_CONFIG isn't, then take $(PYTHON)-config as if it was the user
|
||||
# supplied value for PYTHON_CONFIG. Because it's "user supplied", error out if it doesn't exist.
|
||||
ifdef PYTHON
|
||||
ifndef PYTHON_CONFIG
|
||||
PYTHON_CONFIG_AUTO := $(call get-executable,$(PYTHON)-config)
|
||||
PYTHON_CONFIG := $(if $(PYTHON_CONFIG_AUTO),$(PYTHON_CONFIG_AUTO),\
|
||||
$(call $(error $(PYTHON)-config not found)))
|
||||
endif
|
||||
endif
|
||||
|
||||
# Select either auto detected python and python-config or use user supplied values if they are
|
||||
# defined. get-executable-or-default fails with an error if the first argument is supplied but
|
||||
# doesn't exist.
|
||||
override PYTHON_CONFIG := $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON_AUTO))
|
||||
override PYTHON := $(call get-executable-or-default,PYTHON,$(subst -config,,$(PYTHON_CONFIG)))
|
||||
|
||||
grep-libs = $(filter -l%,$(1))
|
||||
strip-libs = $(filter-out -l%,$(1))
|
||||
@@ -255,6 +270,9 @@ ifdef PYTHON_CONFIG
|
||||
PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
|
||||
PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null)
|
||||
FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
|
||||
ifeq ($(CC_NO_CLANG), 0)
|
||||
PYTHON_EMBED_CCOPTS := $(filter-out -ffat-lto-objects, $(PYTHON_EMBED_CCOPTS))
|
||||
endif
|
||||
endif
|
||||
|
||||
FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS)
|
||||
@@ -760,6 +778,9 @@ else
|
||||
LDFLAGS += $(PERL_EMBED_LDFLAGS)
|
||||
EXTLIBS += $(PERL_EMBED_LIBADD)
|
||||
CFLAGS += -DHAVE_LIBPERL_SUPPORT
|
||||
ifeq ($(CC_NO_CLANG), 0)
|
||||
CFLAGS += -Wno-compound-token-split-by-macro
|
||||
endif
|
||||
$(call detected,CONFIG_LIBPERL)
|
||||
endif
|
||||
endif
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
perf-y += header.o
|
||||
perf-y += machine.o
|
||||
perf-y += perf_regs.o
|
||||
perf-y += tsc.o
|
||||
perf-$(CONFIG_DWARF) += dwarf-regs.o
|
||||
|
||||
@@ -124,6 +124,12 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
|
||||
evsel__set_sample_bit(arm_spe_evsel, TIME);
|
||||
evsel__set_sample_bit(arm_spe_evsel, TID);
|
||||
|
||||
/*
|
||||
* Set this only so that perf report knows that SPE generates memory info. It has no effect
|
||||
* on the opening of the event or the SPE data produced.
|
||||
*/
|
||||
evsel__set_sample_bit(arm_spe_evsel, DATA_SRC);
|
||||
|
||||
/* Add dummy event to keep tracking */
|
||||
err = parse_events(evlist, "dummy:u", NULL);
|
||||
if (err)
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include "debug.h"
|
||||
#include "symbol.h"
|
||||
|
||||
/* On arm64, kernel text segment start at high memory address,
|
||||
* for example 0xffff 0000 8xxx xxxx. Modules start at a low memory
|
||||
* address, like 0xffff 0000 00ax xxxx. When only samll amount of
|
||||
* memory is used by modules, gap between end of module's text segment
|
||||
* and start of kernel text segment may be reach 2G.
|
||||
* Therefore do not fill this gap and do not assign it to the kernel dso map.
|
||||
*/
|
||||
|
||||
#define SYMBOL_LIMIT (1 << 12) /* 4K */
|
||||
|
||||
void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
|
||||
{
|
||||
if ((strchr(p->name, '[') && strchr(c->name, '[') == NULL) ||
|
||||
(strchr(p->name, '[') == NULL && strchr(c->name, '[')))
|
||||
/* Limit range of last symbol in module and kernel */
|
||||
p->end += SYMBOL_LIMIT;
|
||||
else
|
||||
p->end = c->start;
|
||||
pr_debug4("%s sym:%s end:%#lx\n", __func__, p->name, p->end);
|
||||
}
|
||||
@@ -34,19 +34,3 @@ int arch__fix_module_text_start(u64 *start, u64 *size, const char *name)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* On s390 kernel text segment start is located at very low memory addresses,
|
||||
* for example 0x10000. Modules are located at very high memory addresses,
|
||||
* for example 0x3ff xxxx xxxx. The gap between end of kernel text segment
|
||||
* and beginning of first module's text segment is very big.
|
||||
* Therefore do not fill this gap and do not assign it to the kernel dso map.
|
||||
*/
|
||||
void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
|
||||
{
|
||||
if (strchr(p->name, '[') == NULL && strchr(c->name, '['))
|
||||
/* Last kernel symbol mapped to end of page */
|
||||
p->end = roundup(p->end, page_size);
|
||||
else
|
||||
p->end = c->start;
|
||||
pr_debug4("%s sym:%s end:%#lx\n", __func__, p->name, p->end);
|
||||
}
|
||||
|
||||
@@ -1656,7 +1656,7 @@ static int __bench_numa(const char *name)
|
||||
"GB/sec,", "total-speed", "GB/sec total speed");
|
||||
|
||||
if (g->p.show_details >= 2) {
|
||||
char tname[14 + 2 * 10 + 1];
|
||||
char tname[14 + 2 * 11 + 1];
|
||||
struct thread_data *td;
|
||||
for (p = 0; p < g->p.nr_proc; p++) {
|
||||
for (t = 0; t < g->p.nr_threads; t++) {
|
||||
|
||||
@@ -918,8 +918,8 @@ percent_rmt_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
|
||||
double per_left;
|
||||
double per_right;
|
||||
|
||||
per_left = PERCENT(left, lcl_hitm);
|
||||
per_right = PERCENT(right, lcl_hitm);
|
||||
per_left = PERCENT(left, rmt_hitm);
|
||||
per_right = PERCENT(right, rmt_hitm);
|
||||
|
||||
return per_left - per_right;
|
||||
}
|
||||
@@ -2694,9 +2694,7 @@ static int perf_c2c__report(int argc, const char **argv)
|
||||
"the input file to process"),
|
||||
OPT_INCR('N', "node-info", &c2c.node_info,
|
||||
"show extra node info in report (repeat for more info)"),
|
||||
#ifdef HAVE_SLANG_SUPPORT
|
||||
OPT_BOOLEAN(0, "stdio", &c2c.use_stdio, "Use the stdio interface"),
|
||||
#endif
|
||||
OPT_BOOLEAN(0, "stats", &c2c.stats_only,
|
||||
"Display only statistic tables (implies --stdio)"),
|
||||
OPT_BOOLEAN(0, "full-symbols", &c2c.symbol_full,
|
||||
@@ -2725,6 +2723,10 @@ static int perf_c2c__report(int argc, const char **argv)
|
||||
if (argc)
|
||||
usage_with_options(report_c2c_usage, options);
|
||||
|
||||
#ifndef HAVE_SLANG_SUPPORT
|
||||
c2c.use_stdio = true;
|
||||
#endif
|
||||
|
||||
if (c2c.stats_only)
|
||||
c2c.use_stdio = true;
|
||||
|
||||
|
||||
@@ -340,6 +340,7 @@ static int report__setup_sample_type(struct report *rep)
|
||||
struct perf_session *session = rep->session;
|
||||
u64 sample_type = evlist__combined_sample_type(session->evlist);
|
||||
bool is_pipe = perf_data__is_pipe(session->data);
|
||||
struct evsel *evsel;
|
||||
|
||||
if (session->itrace_synth_opts->callchain ||
|
||||
session->itrace_synth_opts->add_callchain ||
|
||||
@@ -394,6 +395,19 @@ static int report__setup_sample_type(struct report *rep)
|
||||
}
|
||||
|
||||
if (sort__mode == SORT_MODE__MEMORY) {
|
||||
/*
|
||||
* FIXUP: prior to kernel 5.18, Arm SPE missed to set
|
||||
* PERF_SAMPLE_DATA_SRC bit in sample type. For backward
|
||||
* compatibility, set the bit if it's an old perf data file.
|
||||
*/
|
||||
evlist__for_each_entry(session->evlist, evsel) {
|
||||
if (strstr(evsel->name, "arm_spe") &&
|
||||
!(sample_type & PERF_SAMPLE_DATA_SRC)) {
|
||||
evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC;
|
||||
sample_type |= PERF_SAMPLE_DATA_SRC;
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) {
|
||||
ui__error("Selected --mem-mode but no mem data. "
|
||||
"Did you call perf record without -d?\n");
|
||||
|
||||
@@ -75,6 +75,13 @@ include/uapi/asm-generic/mman-common.h
|
||||
include/uapi/asm-generic/unistd.h
|
||||
'
|
||||
|
||||
SYNC_CHECK_FILES='
|
||||
arch/x86/include/asm/inat.h
|
||||
arch/x86/include/asm/insn.h
|
||||
arch/x86/lib/inat.c
|
||||
arch/x86/lib/insn.c
|
||||
'
|
||||
|
||||
# These copies are under tools/perf/trace/beauty/ as they are not used to in
|
||||
# building object files only by scripts in tools/perf/trace/beauty/ to generate
|
||||
# tables that then gets included in .c files for things like id->string syscall
|
||||
@@ -129,6 +136,10 @@ for i in $FILES; do
|
||||
check $i -B
|
||||
done
|
||||
|
||||
for i in $SYNC_CHECK_FILES; do
|
||||
check $i '-I "^.*\/\*.*__ignore_sync_check__.*\*\/.*$"'
|
||||
done
|
||||
|
||||
# diff with extra ignore lines
|
||||
check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memcpy_\(erms\|orig\))"'
|
||||
check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memset_\(erms\|orig\))"'
|
||||
@@ -137,10 +148,6 @@ check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"'
|
||||
check include/linux/build_bug.h '-I "^#\(ifndef\|endif\)\( \/\/\)* static_assert$"'
|
||||
check include/linux/ctype.h '-I "isdigit("'
|
||||
check lib/ctype.c '-I "^EXPORT_SYMBOL" -I "^#include <linux/export.h>" -B'
|
||||
check arch/x86/include/asm/inat.h '-I "^#include [\"<]\(asm/\)*inat_types.h[\">]"'
|
||||
check arch/x86/include/asm/insn.h '-I "^#include [\"<]\(asm/\)*inat.h[\">]"'
|
||||
check arch/x86/lib/inat.c '-I "^#include [\"<]\(../include/\)*asm/insn.h[\">]"'
|
||||
check arch/x86/lib/insn.c '-I "^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]" -I "^#include [\"<]\(../include/\)*asm/emulate_prefix.h[\">]"'
|
||||
|
||||
# diff non-symmetric files
|
||||
check_2 tools/perf/arch/x86/entry/syscalls/syscall_64.tbl arch/x86/entry/syscalls/syscall_64.tbl
|
||||
|
||||
@@ -433,7 +433,7 @@ void pthread__unblock_sigwinch(void)
|
||||
static int libperf_print(enum libperf_print_level level,
|
||||
const char *fmt, va_list ap)
|
||||
{
|
||||
return eprintf(level, verbose, fmt, ap);
|
||||
return veprintf(level, verbose, fmt, ap);
|
||||
}
|
||||
|
||||
int main(int argc, const char **argv)
|
||||
|
||||
@@ -579,7 +579,7 @@ static int json_events(const char *fn,
|
||||
} else if (json_streq(map, field, "ExtSel")) {
|
||||
char *code = NULL;
|
||||
addfield(map, &code, "", "", val);
|
||||
eventcode |= strtoul(code, NULL, 0) << 21;
|
||||
eventcode |= strtoul(code, NULL, 0) << 8;
|
||||
free(code);
|
||||
} else if (json_streq(map, field, "EventName")) {
|
||||
addfield(map, &je.name, "", "", val);
|
||||
|
||||
@@ -2164,11 +2164,19 @@ struct sym_args {
|
||||
bool near;
|
||||
};
|
||||
|
||||
static bool kern_sym_name_match(const char *kname, const char *name)
|
||||
{
|
||||
size_t n = strlen(name);
|
||||
|
||||
return !strcmp(kname, name) ||
|
||||
(!strncmp(kname, name, n) && kname[n] == '\t');
|
||||
}
|
||||
|
||||
static bool kern_sym_match(struct sym_args *args, const char *name, char type)
|
||||
{
|
||||
/* A function with the same name, and global or the n'th found or any */
|
||||
return kallsyms__is_function(type) &&
|
||||
!strcmp(name, args->name) &&
|
||||
kern_sym_name_match(name, args->name) &&
|
||||
((args->global && isupper(type)) ||
|
||||
(args->selected && ++(args->cnt) == args->idx) ||
|
||||
(!args->global && !args->selected));
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
#define __PERF_DATA_H
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
enum perf_data_mode {
|
||||
PERF_DATA_MODE_WRITE,
|
||||
|
||||
@@ -20,8 +20,19 @@ static int __dso_id__cmp(struct dso_id *a, struct dso_id *b)
|
||||
if (a->ino > b->ino) return -1;
|
||||
if (a->ino < b->ino) return 1;
|
||||
|
||||
if (a->ino_generation > b->ino_generation) return -1;
|
||||
if (a->ino_generation < b->ino_generation) return 1;
|
||||
/*
|
||||
* Synthesized MMAP events have zero ino_generation, avoid comparing
|
||||
* them with MMAP events with actual ino_generation.
|
||||
*
|
||||
* I found it harmful because the mismatch resulted in a new
|
||||
* dso that did not have a build ID whereas the original dso did have a
|
||||
* build ID. The build ID was essential because the object was not found
|
||||
* otherwise. - Adrian
|
||||
*/
|
||||
if (a->ino_generation && b->ino_generation) {
|
||||
if (a->ino_generation > b->ino_generation) return -1;
|
||||
if (a->ino_generation < b->ino_generation) return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -30,7 +30,11 @@
|
||||
|
||||
#define BUILD_ID_URANDOM /* different uuid for each run */
|
||||
|
||||
#ifdef HAVE_LIBCRYPTO
|
||||
// FIXME, remove this and fix the deprecation warnings before its removed and
|
||||
// We'll break for good here...
|
||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
||||
|
||||
#ifdef HAVE_LIBCRYPTO_SUPPORT
|
||||
|
||||
#define BUILD_ID_MD5
|
||||
#undef BUILD_ID_SHA /* does not seem to work well when linked with Java */
|
||||
@@ -247,6 +251,7 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
|
||||
Elf_Data *d;
|
||||
Elf_Scn *scn;
|
||||
Elf_Ehdr *ehdr;
|
||||
Elf_Phdr *phdr;
|
||||
Elf_Shdr *shdr;
|
||||
uint64_t eh_frame_base_offset;
|
||||
char *strsym = NULL;
|
||||
@@ -281,6 +286,19 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
|
||||
ehdr->e_version = EV_CURRENT;
|
||||
ehdr->e_shstrndx= unwinding ? 4 : 2; /* shdr index for section name */
|
||||
|
||||
/*
|
||||
* setup program header
|
||||
*/
|
||||
phdr = elf_newphdr(e, 1);
|
||||
phdr[0].p_type = PT_LOAD;
|
||||
phdr[0].p_offset = 0;
|
||||
phdr[0].p_vaddr = 0;
|
||||
phdr[0].p_paddr = 0;
|
||||
phdr[0].p_filesz = csize;
|
||||
phdr[0].p_memsz = csize;
|
||||
phdr[0].p_flags = PF_X | PF_R;
|
||||
phdr[0].p_align = 8;
|
||||
|
||||
/*
|
||||
* setup text section
|
||||
*/
|
||||
|
||||
@@ -50,8 +50,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
|
||||
|
||||
#if GEN_ELF_CLASS == ELFCLASS64
|
||||
#define elf_newehdr elf64_newehdr
|
||||
#define elf_newphdr elf64_newphdr
|
||||
#define elf_getshdr elf64_getshdr
|
||||
#define Elf_Ehdr Elf64_Ehdr
|
||||
#define Elf_Phdr Elf64_Phdr
|
||||
#define Elf_Shdr Elf64_Shdr
|
||||
#define Elf_Sym Elf64_Sym
|
||||
#define ELF_ST_TYPE(a) ELF64_ST_TYPE(a)
|
||||
@@ -59,8 +61,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
|
||||
#define ELF_ST_VIS(a) ELF64_ST_VISIBILITY(a)
|
||||
#else
|
||||
#define elf_newehdr elf32_newehdr
|
||||
#define elf_newphdr elf32_newphdr
|
||||
#define elf_getshdr elf32_getshdr
|
||||
#define Elf_Ehdr Elf32_Ehdr
|
||||
#define Elf_Phdr Elf32_Phdr
|
||||
#define Elf_Shdr Elf32_Shdr
|
||||
#define Elf_Sym Elf32_Sym
|
||||
#define ELF_ST_TYPE(a) ELF32_ST_TYPE(a)
|
||||
|
||||
@@ -3,8 +3,9 @@
|
||||
//
|
||||
#ifndef HAVE_GET_CURRENT_DIR_NAME
|
||||
#include "get_current_dir_name.h"
|
||||
#include <limits.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
/* Android's 'bionic' library, for one, doesn't have this */
|
||||
|
||||
|
||||
@@ -3279,6 +3279,7 @@ static const char * const intel_pt_info_fmts[] = {
|
||||
[INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
|
||||
[INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
|
||||
[INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
|
||||
[INTEL_PT_MTC_FREQ_BITS] = " MTC freq bits %#"PRIx64"\n",
|
||||
[INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
|
||||
[INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
|
||||
[INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
|
||||
@@ -3293,8 +3294,12 @@ static void intel_pt_print_info(__u64 *arr, int start, int finish)
|
||||
if (!dump_trace)
|
||||
return;
|
||||
|
||||
for (i = start; i <= finish; i++)
|
||||
fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
|
||||
for (i = start; i <= finish; i++) {
|
||||
const char *fmt = intel_pt_info_fmts[i];
|
||||
|
||||
if (fmt)
|
||||
fprintf(stdout, fmt, arr[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_pt_print_info_str(const char *name, const char *str)
|
||||
|
||||
@@ -356,6 +356,12 @@ __add_event(struct list_head *list, int *idx,
|
||||
struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) :
|
||||
cpu_list ? perf_cpu_map__new(cpu_list) : NULL;
|
||||
|
||||
if (pmu)
|
||||
perf_pmu__warn_invalid_formats(pmu);
|
||||
|
||||
if (pmu && attr->type == PERF_TYPE_RAW)
|
||||
perf_pmu__warn_invalid_config(pmu, attr->config, name);
|
||||
|
||||
if (init_attr)
|
||||
event_attr_init(attr);
|
||||
|
||||
@@ -1442,7 +1448,9 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
|
||||
bool use_uncore_alias;
|
||||
LIST_HEAD(config_terms);
|
||||
|
||||
if (verbose > 1) {
|
||||
pmu = parse_state->fake_pmu ?: perf_pmu__find(name);
|
||||
|
||||
if (verbose > 1 && !(pmu && pmu->selectable)) {
|
||||
fprintf(stderr, "Attempting to add event pmu '%s' with '",
|
||||
name);
|
||||
if (head_config) {
|
||||
@@ -1455,7 +1463,6 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
|
||||
fprintf(stderr, "' that may result in non-fatal errors\n");
|
||||
}
|
||||
|
||||
pmu = parse_state->fake_pmu ?: perf_pmu__find(name);
|
||||
if (!pmu) {
|
||||
char *err_str;
|
||||
|
||||
|
||||
@@ -862,6 +862,23 @@ static struct perf_pmu *pmu_lookup(const char *name)
|
||||
return pmu;
|
||||
}
|
||||
|
||||
void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu)
|
||||
{
|
||||
struct perf_pmu_format *format;
|
||||
|
||||
/* fake pmu doesn't have format list */
|
||||
if (pmu == &perf_pmu__fake)
|
||||
return;
|
||||
|
||||
list_for_each_entry(format, &pmu->format, list)
|
||||
if (format->value >= PERF_PMU_FORMAT_VALUE_CONFIG_END) {
|
||||
pr_warning("WARNING: '%s' format '%s' requires 'perf_event_attr::config%d'"
|
||||
"which is not supported by this version of perf!\n",
|
||||
pmu->name, format->name, format->value);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static struct perf_pmu *pmu_find(const char *name)
|
||||
{
|
||||
struct perf_pmu *pmu;
|
||||
@@ -1716,3 +1733,36 @@ int perf_pmu__caps_parse(struct perf_pmu *pmu)
|
||||
|
||||
return nr_caps;
|
||||
}
|
||||
|
||||
void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
|
||||
char *name)
|
||||
{
|
||||
struct perf_pmu_format *format;
|
||||
__u64 masks = 0, bits;
|
||||
char buf[100];
|
||||
unsigned int i;
|
||||
|
||||
list_for_each_entry(format, &pmu->format, list) {
|
||||
if (format->value != PERF_PMU_FORMAT_VALUE_CONFIG)
|
||||
continue;
|
||||
|
||||
for_each_set_bit(i, format->bits, PERF_PMU_FORMAT_BITS)
|
||||
masks |= 1ULL << i;
|
||||
}
|
||||
|
||||
/*
|
||||
* Kernel doesn't export any valid format bits.
|
||||
*/
|
||||
if (masks == 0)
|
||||
return;
|
||||
|
||||
bits = config & ~masks;
|
||||
if (bits == 0)
|
||||
return;
|
||||
|
||||
bitmap_scnprintf((unsigned long *)&bits, sizeof(bits) * 8, buf, sizeof(buf));
|
||||
|
||||
pr_warning("WARNING: event '%s' not valid (bits %s of config "
|
||||
"'%llx' not supported by kernel)!\n",
|
||||
name ?: "N/A", buf, config);
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ enum {
|
||||
PERF_PMU_FORMAT_VALUE_CONFIG,
|
||||
PERF_PMU_FORMAT_VALUE_CONFIG1,
|
||||
PERF_PMU_FORMAT_VALUE_CONFIG2,
|
||||
PERF_PMU_FORMAT_VALUE_CONFIG_END,
|
||||
};
|
||||
|
||||
#define PERF_PMU_FORMAT_BITS 64
|
||||
@@ -120,4 +121,8 @@ int perf_pmu__convert_scale(const char *scale, char **end, double *sval);
|
||||
|
||||
int perf_pmu__caps_parse(struct perf_pmu *pmu);
|
||||
|
||||
void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
|
||||
char *name);
|
||||
void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu);
|
||||
|
||||
#endif /* __PMU_H */
|
||||
|
||||
@@ -27,8 +27,6 @@ num_dec [0-9]+
|
||||
|
||||
{num_dec} { return value(10); }
|
||||
config { return PP_CONFIG; }
|
||||
config1 { return PP_CONFIG1; }
|
||||
config2 { return PP_CONFIG2; }
|
||||
- { return '-'; }
|
||||
: { return ':'; }
|
||||
, { return ','; }
|
||||
|
||||
@@ -20,7 +20,7 @@ do { \
|
||||
|
||||
%}
|
||||
|
||||
%token PP_CONFIG PP_CONFIG1 PP_CONFIG2
|
||||
%token PP_CONFIG
|
||||
%token PP_VALUE PP_ERROR
|
||||
%type <num> PP_VALUE
|
||||
%type <bits> bit_term
|
||||
@@ -47,18 +47,11 @@ PP_CONFIG ':' bits
|
||||
$3));
|
||||
}
|
||||
|
|
||||
PP_CONFIG1 ':' bits
|
||||
PP_CONFIG PP_VALUE ':' bits
|
||||
{
|
||||
ABORT_ON(perf_pmu__new_format(format, name,
|
||||
PERF_PMU_FORMAT_VALUE_CONFIG1,
|
||||
$3));
|
||||
}
|
||||
|
|
||||
PP_CONFIG2 ':' bits
|
||||
{
|
||||
ABORT_ON(perf_pmu__new_format(format, name,
|
||||
PERF_PMU_FORMAT_VALUE_CONFIG2,
|
||||
$3));
|
||||
$2,
|
||||
$4));
|
||||
}
|
||||
|
||||
bits:
|
||||
|
||||
@@ -1760,8 +1760,10 @@ int parse_perf_probe_command(const char *cmd, struct perf_probe_event *pev)
|
||||
if (!pev->event && pev->point.function && pev->point.line
|
||||
&& !pev->point.lazy_line && !pev->point.offset) {
|
||||
if (asprintf(&pev->event, "%s_L%d", pev->point.function,
|
||||
pev->point.line) < 0)
|
||||
return -ENOMEM;
|
||||
pev->point.line) < 0) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* Copy arguments and ensure return probe has no C argument */
|
||||
|
||||
@@ -2056,6 +2056,7 @@ prefetch_event(char *buf, u64 head, size_t mmap_size,
|
||||
bool needs_swap, union perf_event *error)
|
||||
{
|
||||
union perf_event *event;
|
||||
u16 event_size;
|
||||
|
||||
/*
|
||||
* Ensure we have enough space remaining to read
|
||||
@@ -2068,15 +2069,23 @@ prefetch_event(char *buf, u64 head, size_t mmap_size,
|
||||
if (needs_swap)
|
||||
perf_event_header__bswap(&event->header);
|
||||
|
||||
if (head + event->header.size <= mmap_size)
|
||||
event_size = event->header.size;
|
||||
if (head + event_size <= mmap_size)
|
||||
return event;
|
||||
|
||||
/* We're not fetching the event so swap back again */
|
||||
if (needs_swap)
|
||||
perf_event_header__bswap(&event->header);
|
||||
|
||||
pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
|
||||
" fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
|
||||
/* Check if the event fits into the next mmapped buf. */
|
||||
if (event_size <= mmap_size - head % page_size) {
|
||||
/* Remap buf and fetch again. */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Invalid input. Event size should never exceed mmap_size. */
|
||||
pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:"
|
||||
" fuzzed or compressed perf.data?\n", __func__, head, event_size, mmap_size);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
from os import getenv
|
||||
from os import getenv, path
|
||||
from subprocess import Popen, PIPE
|
||||
from re import sub
|
||||
|
||||
cc = getenv("CC")
|
||||
cc_is_clang = b"clang version" in Popen([cc.split()[0], "-v"], stderr=PIPE).stderr.readline()
|
||||
src_feature_tests = getenv('srctree') + '/tools/build/feature'
|
||||
|
||||
def clang_has_option(option):
|
||||
return [o for o in Popen([cc, option], stderr=PIPE).stderr.readlines() if b"unknown argument" in o] == [ ]
|
||||
cc_output = Popen([cc, option, path.join(src_feature_tests, "test-hello.c") ], stderr=PIPE).stderr.readlines()
|
||||
return [o for o in cc_output if ((b"unknown argument" in o) or (b"is not supported" in o))] == [ ]
|
||||
|
||||
if cc_is_clang:
|
||||
from distutils.sysconfig import get_config_vars
|
||||
@@ -23,6 +25,8 @@ if cc_is_clang:
|
||||
vars[var] = sub("-fstack-protector-strong", "", vars[var])
|
||||
if not clang_has_option("-fno-semantic-interposition"):
|
||||
vars[var] = sub("-fno-semantic-interposition", "", vars[var])
|
||||
if not clang_has_option("-ffat-lto-objects"):
|
||||
vars[var] = sub("-ffat-lto-objects", "", vars[var])
|
||||
|
||||
from distutils.core import setup, Extension
|
||||
|
||||
|
||||
@@ -203,7 +203,7 @@ static void new_line_csv(struct perf_stat_config *config, void *ctx)
|
||||
|
||||
fputc('\n', os->fh);
|
||||
if (os->prefix)
|
||||
fprintf(os->fh, "%s%s", os->prefix, config->csv_sep);
|
||||
fprintf(os->fh, "%s", os->prefix);
|
||||
aggr_printout(config, os->evsel, os->id, os->nr);
|
||||
for (i = 0; i < os->nfields; i++)
|
||||
fputs(config->csv_sep, os->fh);
|
||||
|
||||
@@ -232,6 +232,33 @@ Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int elf_read_program_header(Elf *elf, u64 vaddr, GElf_Phdr *phdr)
|
||||
{
|
||||
size_t i, phdrnum;
|
||||
u64 sz;
|
||||
|
||||
if (elf_getphdrnum(elf, &phdrnum))
|
||||
return -1;
|
||||
|
||||
for (i = 0; i < phdrnum; i++) {
|
||||
if (gelf_getphdr(elf, i, phdr) == NULL)
|
||||
return -1;
|
||||
|
||||
if (phdr->p_type != PT_LOAD)
|
||||
continue;
|
||||
|
||||
sz = max(phdr->p_memsz, phdr->p_filesz);
|
||||
if (!sz)
|
||||
continue;
|
||||
|
||||
if (vaddr >= phdr->p_vaddr && (vaddr < phdr->p_vaddr + sz))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Not found any valid program header */
|
||||
return -1;
|
||||
}
|
||||
|
||||
static bool want_demangle(bool is_kernel_sym)
|
||||
{
|
||||
return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
|
||||
@@ -1181,6 +1208,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
|
||||
sym.st_value);
|
||||
used_opd = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* When loading symbols in a data mapping, ABS symbols (which
|
||||
* has a value of SHN_ABS in its st_shndx) failed at
|
||||
@@ -1217,11 +1245,33 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
|
||||
goto out_elf_end;
|
||||
} else if ((used_opd && runtime_ss->adjust_symbols) ||
|
||||
(!used_opd && syms_ss->adjust_symbols)) {
|
||||
pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
|
||||
"sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
|
||||
(u64)sym.st_value, (u64)shdr.sh_addr,
|
||||
(u64)shdr.sh_offset);
|
||||
sym.st_value -= shdr.sh_addr - shdr.sh_offset;
|
||||
GElf_Phdr phdr;
|
||||
|
||||
if (elf_read_program_header(syms_ss->elf,
|
||||
(u64)sym.st_value, &phdr)) {
|
||||
pr_debug4("%s: failed to find program header for "
|
||||
"symbol: %s st_value: %#" PRIx64 "\n",
|
||||
__func__, elf_name, (u64)sym.st_value);
|
||||
pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
|
||||
"sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n",
|
||||
__func__, (u64)sym.st_value, (u64)shdr.sh_addr,
|
||||
(u64)shdr.sh_offset);
|
||||
/*
|
||||
* Fail to find program header, let's rollback
|
||||
* to use shdr.sh_addr and shdr.sh_offset to
|
||||
* calibrate symbol's file address, though this
|
||||
* is not necessary for normal C ELF file, we
|
||||
* still need to handle java JIT symbols in this
|
||||
* case.
|
||||
*/
|
||||
sym.st_value -= shdr.sh_addr - shdr.sh_offset;
|
||||
} else {
|
||||
pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
|
||||
"p_vaddr: %#" PRIx64 " p_offset: %#" PRIx64 "\n",
|
||||
__func__, (u64)sym.st_value, (u64)phdr.p_vaddr,
|
||||
(u64)phdr.p_offset);
|
||||
sym.st_value -= phdr.p_vaddr - phdr.p_offset;
|
||||
}
|
||||
}
|
||||
|
||||
demangled = demangle_sym(dso, kmodule, elf_name);
|
||||
@@ -1245,7 +1295,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
|
||||
* For misannotated, zeroed, ASM function sizes.
|
||||
*/
|
||||
if (nr > 0) {
|
||||
symbols__fixup_end(&dso->symbols);
|
||||
symbols__fixup_end(&dso->symbols, false);
|
||||
symbols__fixup_duplicate(&dso->symbols);
|
||||
if (kmap) {
|
||||
/*
|
||||
@@ -1952,8 +2002,8 @@ static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
|
||||
* unusual. One significant peculiarity is that the mapping (start -> pgoff)
|
||||
* is not the same for the kernel map and the modules map. That happens because
|
||||
* the data is copied adjacently whereas the original kcore has gaps. Finally,
|
||||
* kallsyms and modules files are compared with their copies to check that
|
||||
* modules have not been loaded or unloaded while the copies were taking place.
|
||||
* kallsyms file is compared with its copy to check that modules have not been
|
||||
* loaded or unloaded while the copies were taking place.
|
||||
*
|
||||
* Return: %0 on success, %-1 on failure.
|
||||
*/
|
||||
@@ -2016,9 +2066,6 @@ int kcore_copy(const char *from_dir, const char *to_dir)
|
||||
goto out_extract_close;
|
||||
}
|
||||
|
||||
if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
|
||||
goto out_extract_close;
|
||||
|
||||
if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
|
||||
goto out_extract_close;
|
||||
|
||||
|
||||
@@ -101,11 +101,6 @@ static int prefix_underscores_count(const char *str)
|
||||
return tail - str;
|
||||
}
|
||||
|
||||
void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
|
||||
{
|
||||
p->end = c->start;
|
||||
}
|
||||
|
||||
const char * __weak arch__normalize_symbol_name(const char *name)
|
||||
{
|
||||
return name;
|
||||
@@ -217,7 +212,8 @@ again:
|
||||
}
|
||||
}
|
||||
|
||||
void symbols__fixup_end(struct rb_root_cached *symbols)
|
||||
/* Update zero-sized symbols using the address of the next symbol */
|
||||
void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms)
|
||||
{
|
||||
struct rb_node *nd, *prevnd = rb_first_cached(symbols);
|
||||
struct symbol *curr, *prev;
|
||||
@@ -231,8 +227,29 @@ void symbols__fixup_end(struct rb_root_cached *symbols)
|
||||
prev = curr;
|
||||
curr = rb_entry(nd, struct symbol, rb_node);
|
||||
|
||||
if (prev->end == prev->start || prev->end != curr->start)
|
||||
arch__symbols__fixup_end(prev, curr);
|
||||
/*
|
||||
* On some architecture kernel text segment start is located at
|
||||
* some low memory address, while modules are located at high
|
||||
* memory addresses (or vice versa). The gap between end of
|
||||
* kernel text segment and beginning of first module's text
|
||||
* segment is very big. Therefore do not fill this gap and do
|
||||
* not assign it to the kernel dso map (kallsyms).
|
||||
*
|
||||
* In kallsyms, it determines module symbols using '[' character
|
||||
* like in:
|
||||
* ffffffffc1937000 T hdmi_driver_init [snd_hda_codec_hdmi]
|
||||
*/
|
||||
if (prev->end == prev->start) {
|
||||
/* Last kernel/module symbol mapped to end of page */
|
||||
if (is_kallsyms && (!strchr(prev->name, '[') !=
|
||||
!strchr(curr->name, '[')))
|
||||
prev->end = roundup(prev->end + 4096, 4096);
|
||||
else
|
||||
prev->end = curr->start;
|
||||
|
||||
pr_debug4("%s sym:%s end:%#" PRIx64 "\n",
|
||||
__func__, prev->name, prev->end);
|
||||
}
|
||||
}
|
||||
|
||||
/* Last entry */
|
||||
@@ -1456,7 +1473,7 @@ int __dso__load_kallsyms(struct dso *dso, const char *filename,
|
||||
if (kallsyms__delta(kmap, filename, &delta))
|
||||
return -1;
|
||||
|
||||
symbols__fixup_end(&dso->symbols);
|
||||
symbols__fixup_end(&dso->symbols, true);
|
||||
symbols__fixup_duplicate(&dso->symbols);
|
||||
|
||||
if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
|
||||
@@ -1651,7 +1668,7 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
|
||||
#undef bfd_asymbol_section
|
||||
#endif
|
||||
|
||||
symbols__fixup_end(&dso->symbols);
|
||||
symbols__fixup_end(&dso->symbols, false);
|
||||
symbols__fixup_duplicate(&dso->symbols);
|
||||
dso->adjust_symbols = 1;
|
||||
|
||||
|
||||
@@ -192,7 +192,7 @@ void __symbols__insert(struct rb_root_cached *symbols, struct symbol *sym,
|
||||
bool kernel);
|
||||
void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym);
|
||||
void symbols__fixup_duplicate(struct rb_root_cached *symbols);
|
||||
void symbols__fixup_end(struct rb_root_cached *symbols);
|
||||
void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms);
|
||||
void maps__fixup_end(struct maps *maps);
|
||||
|
||||
typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
|
||||
@@ -230,7 +230,6 @@ const char *arch__normalize_symbol_name(const char *name);
|
||||
#define SYMBOL_A 0
|
||||
#define SYMBOL_B 1
|
||||
|
||||
void arch__symbols__fixup_end(struct symbol *p, struct symbol *c);
|
||||
int arch__compare_symbol_names(const char *namea, const char *nameb);
|
||||
int arch__compare_symbol_names_n(const char *namea, const char *nameb,
|
||||
unsigned int n);
|
||||
|
||||
@@ -4189,6 +4189,7 @@ rapl_dram_energy_units_probe(int model, double rapl_energy_units)
|
||||
case INTEL_FAM6_HASWELL_X: /* HSX */
|
||||
case INTEL_FAM6_BROADWELL_X: /* BDX */
|
||||
case INTEL_FAM6_XEON_PHI_KNL: /* KNL */
|
||||
case INTEL_FAM6_ICELAKE_X: /* ICX */
|
||||
return (rapl_dram_energy_units = 15.3 / 1000000);
|
||||
default:
|
||||
return (rapl_energy_units);
|
||||
|
||||
@@ -9,6 +9,7 @@ TARGETS += clone3
|
||||
TARGETS += core
|
||||
TARGETS += cpufreq
|
||||
TARGETS += cpu-hotplug
|
||||
TARGETS += damon
|
||||
TARGETS += drivers/dma-buf
|
||||
TARGETS += efivarfs
|
||||
TARGETS += exec
|
||||
|
||||
@@ -33,7 +33,7 @@ bool validate_extra_context(struct extra_context *extra, char **err)
|
||||
return false;
|
||||
|
||||
fprintf(stderr, "Validating EXTRA...\n");
|
||||
term = GET_RESV_NEXT_HEAD(extra);
|
||||
term = GET_RESV_NEXT_HEAD(&extra->head);
|
||||
if (!term || term->magic || term->size) {
|
||||
*err = "Missing terminator after EXTRA context";
|
||||
return false;
|
||||
|
||||
@@ -10,7 +10,6 @@ test_dev_cgroup
|
||||
/test_progs*
|
||||
test_tcpbpf_user
|
||||
!test_progs.h
|
||||
!test_progs.c
|
||||
test_verifier_log
|
||||
feature
|
||||
test_sock
|
||||
@@ -38,4 +37,3 @@ test_cpp
|
||||
/tools
|
||||
/runqslower
|
||||
/bench
|
||||
!
|
||||
@@ -4758,7 +4758,7 @@ static void do_test_pprint(int test_num)
|
||||
ret = snprintf(pin_path, sizeof(pin_path), "%s/%s",
|
||||
"/sys/fs/bpf", test->map_name);
|
||||
|
||||
if (CHECK(ret == sizeof(pin_path), "pin_path %s/%s is too long",
|
||||
if (CHECK(ret >= sizeof(pin_path), "pin_path %s/%s is too long",
|
||||
"/sys/fs/bpf", test->map_name)) {
|
||||
err = -1;
|
||||
goto done;
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2019 Facebook */
|
||||
|
||||
#define _GNU_SOURCE
|
||||
#include <netinet/in.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <unistd.h>
|
||||
#include <sched.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
@@ -21,6 +23,7 @@
|
||||
enum bpf_linum_array_idx {
|
||||
EGRESS_LINUM_IDX,
|
||||
INGRESS_LINUM_IDX,
|
||||
READ_SK_DST_PORT_LINUM_IDX,
|
||||
__NR_BPF_LINUM_ARRAY_IDX,
|
||||
};
|
||||
|
||||
@@ -43,8 +46,16 @@ static __u64 child_cg_id;
|
||||
static int linum_map_fd;
|
||||
static __u32 duration;
|
||||
|
||||
static __u32 egress_linum_idx = EGRESS_LINUM_IDX;
|
||||
static __u32 ingress_linum_idx = INGRESS_LINUM_IDX;
|
||||
static bool create_netns(void)
|
||||
{
|
||||
if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns"))
|
||||
return false;
|
||||
|
||||
if (!ASSERT_OK(system("ip link set dev lo up"), "bring up lo"))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void print_sk(const struct bpf_sock *sk, const char *prefix)
|
||||
{
|
||||
@@ -92,19 +103,24 @@ static void check_result(void)
|
||||
{
|
||||
struct bpf_tcp_sock srv_tp, cli_tp, listen_tp;
|
||||
struct bpf_sock srv_sk, cli_sk, listen_sk;
|
||||
__u32 ingress_linum, egress_linum;
|
||||
__u32 idx, ingress_linum, egress_linum, linum;
|
||||
int err;
|
||||
|
||||
err = bpf_map_lookup_elem(linum_map_fd, &egress_linum_idx,
|
||||
&egress_linum);
|
||||
idx = EGRESS_LINUM_IDX;
|
||||
err = bpf_map_lookup_elem(linum_map_fd, &idx, &egress_linum);
|
||||
CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
|
||||
"err:%d errno:%d\n", err, errno);
|
||||
|
||||
err = bpf_map_lookup_elem(linum_map_fd, &ingress_linum_idx,
|
||||
&ingress_linum);
|
||||
idx = INGRESS_LINUM_IDX;
|
||||
err = bpf_map_lookup_elem(linum_map_fd, &idx, &ingress_linum);
|
||||
CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
|
||||
"err:%d errno:%d\n", err, errno);
|
||||
|
||||
idx = READ_SK_DST_PORT_LINUM_IDX;
|
||||
err = bpf_map_lookup_elem(linum_map_fd, &idx, &linum);
|
||||
ASSERT_OK(err, "bpf_map_lookup_elem(linum_map_fd, READ_SK_DST_PORT_IDX)");
|
||||
ASSERT_EQ(linum, 0, "failure in read_sk_dst_port on line");
|
||||
|
||||
memcpy(&srv_sk, &skel->bss->srv_sk, sizeof(srv_sk));
|
||||
memcpy(&srv_tp, &skel->bss->srv_tp, sizeof(srv_tp));
|
||||
memcpy(&cli_sk, &skel->bss->cli_sk, sizeof(cli_sk));
|
||||
@@ -263,7 +279,7 @@ static void test(void)
|
||||
char buf[DATA_LEN];
|
||||
|
||||
/* Prepare listen_fd */
|
||||
listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
|
||||
listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0xcafe, 0);
|
||||
/* start_server() has logged the error details */
|
||||
if (CHECK_FAIL(listen_fd == -1))
|
||||
goto done;
|
||||
@@ -331,8 +347,12 @@ done:
|
||||
|
||||
void test_sock_fields(void)
|
||||
{
|
||||
struct bpf_link *egress_link = NULL, *ingress_link = NULL;
|
||||
int parent_cg_fd = -1, child_cg_fd = -1;
|
||||
struct bpf_link *link;
|
||||
|
||||
/* Use a dedicated netns to have a fixed listen port */
|
||||
if (!create_netns())
|
||||
return;
|
||||
|
||||
/* Create a cgroup, get fd, and join it */
|
||||
parent_cg_fd = test__join_cgroup(PARENT_CGROUP);
|
||||
@@ -353,17 +373,20 @@ void test_sock_fields(void)
|
||||
if (CHECK(!skel, "test_sock_fields__open_and_load", "failed\n"))
|
||||
goto done;
|
||||
|
||||
egress_link = bpf_program__attach_cgroup(skel->progs.egress_read_sock_fields,
|
||||
child_cg_fd);
|
||||
if (CHECK(IS_ERR(egress_link), "attach_cgroup(egress)", "err:%ld\n",
|
||||
PTR_ERR(egress_link)))
|
||||
link = bpf_program__attach_cgroup(skel->progs.egress_read_sock_fields, child_cg_fd);
|
||||
if (!ASSERT_OK_PTR(link, "attach_cgroup(egress_read_sock_fields)"))
|
||||
goto done;
|
||||
skel->links.egress_read_sock_fields = link;
|
||||
|
||||
ingress_link = bpf_program__attach_cgroup(skel->progs.ingress_read_sock_fields,
|
||||
child_cg_fd);
|
||||
if (CHECK(IS_ERR(ingress_link), "attach_cgroup(ingress)", "err:%ld\n",
|
||||
PTR_ERR(ingress_link)))
|
||||
link = bpf_program__attach_cgroup(skel->progs.ingress_read_sock_fields, child_cg_fd);
|
||||
if (!ASSERT_OK_PTR(link, "attach_cgroup(ingress_read_sock_fields)"))
|
||||
goto done;
|
||||
skel->links.ingress_read_sock_fields = link;
|
||||
|
||||
link = bpf_program__attach_cgroup(skel->progs.read_sk_dst_port, child_cg_fd);
|
||||
if (!ASSERT_OK_PTR(link, "attach_cgroup(read_sk_dst_port"))
|
||||
goto done;
|
||||
skel->links.read_sk_dst_port = link;
|
||||
|
||||
linum_map_fd = bpf_map__fd(skel->maps.linum_map);
|
||||
sk_pkt_out_cnt_fd = bpf_map__fd(skel->maps.sk_pkt_out_cnt);
|
||||
@@ -372,8 +395,7 @@ void test_sock_fields(void)
|
||||
test();
|
||||
|
||||
done:
|
||||
bpf_link__destroy(egress_link);
|
||||
bpf_link__destroy(ingress_link);
|
||||
test_sock_fields__detach(skel);
|
||||
test_sock_fields__destroy(skel);
|
||||
if (child_cg_fd != -1)
|
||||
close(child_cg_fd);
|
||||
|
||||
@@ -94,7 +94,7 @@ typedef void (* (*signal_t)(int, void (*)(int)))(int);
|
||||
|
||||
typedef char * (*fn_ptr_arr1_t[10])(int **);
|
||||
|
||||
typedef char * (* const (* const fn_ptr_arr2_t[5])())(char * (*)(int));
|
||||
typedef char * (* (* const fn_ptr_arr2_t[5])())(char * (*)(int));
|
||||
|
||||
struct struct_w_typedefs {
|
||||
int_t a;
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
enum bpf_linum_array_idx {
|
||||
EGRESS_LINUM_IDX,
|
||||
INGRESS_LINUM_IDX,
|
||||
READ_SK_DST_PORT_LINUM_IDX,
|
||||
__NR_BPF_LINUM_ARRAY_IDX,
|
||||
};
|
||||
|
||||
@@ -250,4 +251,48 @@ int ingress_read_sock_fields(struct __sk_buff *skb)
|
||||
return CG_OK;
|
||||
}
|
||||
|
||||
static __noinline bool sk_dst_port__load_word(struct bpf_sock *sk)
|
||||
{
|
||||
__u32 *word = (__u32 *)&sk->dst_port;
|
||||
return word[0] == bpf_htonl(0xcafe0000);
|
||||
}
|
||||
|
||||
static __noinline bool sk_dst_port__load_half(struct bpf_sock *sk)
|
||||
{
|
||||
__u16 *half = (__u16 *)&sk->dst_port;
|
||||
return half[0] == bpf_htons(0xcafe);
|
||||
}
|
||||
|
||||
static __noinline bool sk_dst_port__load_byte(struct bpf_sock *sk)
|
||||
{
|
||||
__u8 *byte = (__u8 *)&sk->dst_port;
|
||||
return byte[0] == 0xca && byte[1] == 0xfe;
|
||||
}
|
||||
|
||||
SEC("cgroup_skb/egress")
|
||||
int read_sk_dst_port(struct __sk_buff *skb)
|
||||
{
|
||||
__u32 linum, linum_idx;
|
||||
struct bpf_sock *sk;
|
||||
|
||||
linum_idx = READ_SK_DST_PORT_LINUM_IDX;
|
||||
|
||||
sk = skb->sk;
|
||||
if (!sk)
|
||||
RET_LOG();
|
||||
|
||||
/* Ignore everything but the SYN from the client socket */
|
||||
if (sk->state != BPF_TCP_SYN_SENT)
|
||||
return CG_OK;
|
||||
|
||||
if (!sk_dst_port__load_word(sk))
|
||||
RET_LOG();
|
||||
if (!sk_dst_port__load_half(sk))
|
||||
RET_LOG();
|
||||
if (!sk_dst_port__load_byte(sk))
|
||||
RET_LOG();
|
||||
|
||||
return CG_OK;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
@@ -100,7 +100,7 @@ struct bpf_test {
|
||||
enum bpf_prog_type prog_type;
|
||||
uint8_t flags;
|
||||
void (*fill_helper)(struct bpf_test *self);
|
||||
uint8_t runs;
|
||||
int runs;
|
||||
#define bpf_testdata_struct_t \
|
||||
struct { \
|
||||
uint32_t retval, retval_unpriv; \
|
||||
@@ -1054,7 +1054,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
|
||||
|
||||
run_errs = 0;
|
||||
run_successes = 0;
|
||||
if (!alignment_prevented_execution && fd_prog >= 0) {
|
||||
if (!alignment_prevented_execution && fd_prog >= 0 && test->runs >= 0) {
|
||||
uint32_t expected_val;
|
||||
int i;
|
||||
|
||||
|
||||
@@ -239,6 +239,7 @@
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
|
||||
.expected_attach_type = BPF_SK_LOOKUP,
|
||||
.runs = -1,
|
||||
},
|
||||
/* invalid 8-byte reads from a 4-byte fields in bpf_sk_lookup */
|
||||
{
|
||||
|
||||
@@ -901,3 +901,39 @@
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "unknown func",
|
||||
},
|
||||
{
|
||||
"reference tracking: try to leak released ptr reg",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_REG(BPF_REG_9, BPF_REG_0),
|
||||
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_ringbuf_reserve),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
|
||||
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_ringbuf_discard),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_9, BPF_REG_8, 0),
|
||||
BPF_EXIT_INSN()
|
||||
},
|
||||
.fixup_map_array_48b = { 4 },
|
||||
.fixup_map_ringbuf = { 11 },
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "R8 !read_ok"
|
||||
},
|
||||
|
||||
@@ -121,7 +121,25 @@
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"sk_fullsock(skb->sk): sk->dst_port [narrow load]",
|
||||
"sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port)),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"sk_fullsock(skb->sk): sk->dst_port [half load]",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
|
||||
@@ -139,7 +157,7 @@
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"sk_fullsock(skb->sk): sk->dst_port [load 2nd byte]",
|
||||
"sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
|
||||
@@ -149,7 +167,64 @@
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 1),
|
||||
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
.result = REJECT,
|
||||
.errstr = "invalid sock access",
|
||||
},
|
||||
{
|
||||
"sk_fullsock(skb->sk): sk->dst_port [byte load]",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_0, offsetof(struct bpf_sock, dst_port)),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 1),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
.result = REJECT,
|
||||
.errstr = "invalid sock access",
|
||||
},
|
||||
{
|
||||
"sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_sock, dst_port)),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
|
||||
@@ -219,7 +219,7 @@ int cg_find_unified_root(char *root, size_t len)
|
||||
|
||||
int cg_create(const char *cgroup)
|
||||
{
|
||||
return mkdir(cgroup, 0644);
|
||||
return mkdir(cgroup, 0755);
|
||||
}
|
||||
|
||||
int cg_wait_for_proc_count(const char *cgroup, int count)
|
||||
@@ -337,13 +337,13 @@ pid_t clone_into_cgroup(int cgroup_fd)
|
||||
#ifdef CLONE_ARGS_SIZE_VER2
|
||||
pid_t pid;
|
||||
|
||||
struct clone_args args = {
|
||||
struct __clone_args args = {
|
||||
.flags = CLONE_INTO_CGROUP,
|
||||
.exit_signal = SIGCHLD,
|
||||
.cgroup = cgroup_fd,
|
||||
};
|
||||
|
||||
pid = sys_clone3(&args, sizeof(struct clone_args));
|
||||
pid = sys_clone3(&args, sizeof(struct __clone_args));
|
||||
/*
|
||||
* Verify that this is a genuine test failure:
|
||||
* ENOSYS -> clone3() not available
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#define _GNU_SOURCE
|
||||
#include <linux/limits.h>
|
||||
#include <linux/sched.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/wait.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
#include <sched.h>
|
||||
#include <stdio.h>
|
||||
#include <errno.h>
|
||||
#include <signal.h>
|
||||
@@ -674,6 +677,166 @@ cleanup:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* cgroup migration permission check should be performed based on the
|
||||
* credentials at the time of open instead of write.
|
||||
*/
|
||||
static int test_cgcore_lesser_euid_open(const char *root)
|
||||
{
|
||||
const uid_t test_euid = 65534; /* usually nobody, any !root is fine */
|
||||
int ret = KSFT_FAIL;
|
||||
char *cg_test_a = NULL, *cg_test_b = NULL;
|
||||
char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL;
|
||||
int cg_test_b_procs_fd = -1;
|
||||
uid_t saved_uid;
|
||||
|
||||
cg_test_a = cg_name(root, "cg_test_a");
|
||||
cg_test_b = cg_name(root, "cg_test_b");
|
||||
|
||||
if (!cg_test_a || !cg_test_b)
|
||||
goto cleanup;
|
||||
|
||||
cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs");
|
||||
cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs");
|
||||
|
||||
if (!cg_test_a_procs || !cg_test_b_procs)
|
||||
goto cleanup;
|
||||
|
||||
if (cg_create(cg_test_a) || cg_create(cg_test_b))
|
||||
goto cleanup;
|
||||
|
||||
if (cg_enter_current(cg_test_a))
|
||||
goto cleanup;
|
||||
|
||||
if (chown(cg_test_a_procs, test_euid, -1) ||
|
||||
chown(cg_test_b_procs, test_euid, -1))
|
||||
goto cleanup;
|
||||
|
||||
saved_uid = geteuid();
|
||||
if (seteuid(test_euid))
|
||||
goto cleanup;
|
||||
|
||||
cg_test_b_procs_fd = open(cg_test_b_procs, O_RDWR);
|
||||
|
||||
if (seteuid(saved_uid))
|
||||
goto cleanup;
|
||||
|
||||
if (cg_test_b_procs_fd < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (write(cg_test_b_procs_fd, "0", 1) >= 0 || errno != EACCES)
|
||||
goto cleanup;
|
||||
|
||||
ret = KSFT_PASS;
|
||||
|
||||
cleanup:
|
||||
cg_enter_current(root);
|
||||
if (cg_test_b_procs_fd >= 0)
|
||||
close(cg_test_b_procs_fd);
|
||||
if (cg_test_b)
|
||||
cg_destroy(cg_test_b);
|
||||
if (cg_test_a)
|
||||
cg_destroy(cg_test_a);
|
||||
free(cg_test_b_procs);
|
||||
free(cg_test_a_procs);
|
||||
free(cg_test_b);
|
||||
free(cg_test_a);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct lesser_ns_open_thread_arg {
|
||||
const char *path;
|
||||
int fd;
|
||||
int err;
|
||||
};
|
||||
|
||||
static int lesser_ns_open_thread_fn(void *arg)
|
||||
{
|
||||
struct lesser_ns_open_thread_arg *targ = arg;
|
||||
|
||||
targ->fd = open(targ->path, O_RDWR);
|
||||
targ->err = errno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* cgroup migration permission check should be performed based on the cgroup
|
||||
* namespace at the time of open instead of write.
|
||||
*/
|
||||
static int test_cgcore_lesser_ns_open(const char *root)
|
||||
{
|
||||
static char stack[65536];
|
||||
const uid_t test_euid = 65534; /* usually nobody, any !root is fine */
|
||||
int ret = KSFT_FAIL;
|
||||
char *cg_test_a = NULL, *cg_test_b = NULL;
|
||||
char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL;
|
||||
int cg_test_b_procs_fd = -1;
|
||||
struct lesser_ns_open_thread_arg targ = { .fd = -1 };
|
||||
pid_t pid;
|
||||
int status;
|
||||
|
||||
cg_test_a = cg_name(root, "cg_test_a");
|
||||
cg_test_b = cg_name(root, "cg_test_b");
|
||||
|
||||
if (!cg_test_a || !cg_test_b)
|
||||
goto cleanup;
|
||||
|
||||
cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs");
|
||||
cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs");
|
||||
|
||||
if (!cg_test_a_procs || !cg_test_b_procs)
|
||||
goto cleanup;
|
||||
|
||||
if (cg_create(cg_test_a) || cg_create(cg_test_b))
|
||||
goto cleanup;
|
||||
|
||||
if (cg_enter_current(cg_test_b))
|
||||
goto cleanup;
|
||||
|
||||
if (chown(cg_test_a_procs, test_euid, -1) ||
|
||||
chown(cg_test_b_procs, test_euid, -1))
|
||||
goto cleanup;
|
||||
|
||||
targ.path = cg_test_b_procs;
|
||||
pid = clone(lesser_ns_open_thread_fn, stack + sizeof(stack),
|
||||
CLONE_NEWCGROUP | CLONE_FILES | CLONE_VM | SIGCHLD,
|
||||
&targ);
|
||||
if (pid < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (waitpid(pid, &status, 0) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (!WIFEXITED(status))
|
||||
goto cleanup;
|
||||
|
||||
cg_test_b_procs_fd = targ.fd;
|
||||
if (cg_test_b_procs_fd < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (cg_enter_current(cg_test_a))
|
||||
goto cleanup;
|
||||
|
||||
if ((status = write(cg_test_b_procs_fd, "0", 1)) >= 0 || errno != ENOENT)
|
||||
goto cleanup;
|
||||
|
||||
ret = KSFT_PASS;
|
||||
|
||||
cleanup:
|
||||
cg_enter_current(root);
|
||||
if (cg_test_b_procs_fd >= 0)
|
||||
close(cg_test_b_procs_fd);
|
||||
if (cg_test_b)
|
||||
cg_destroy(cg_test_b);
|
||||
if (cg_test_a)
|
||||
cg_destroy(cg_test_a);
|
||||
free(cg_test_b_procs);
|
||||
free(cg_test_a_procs);
|
||||
free(cg_test_b);
|
||||
free(cg_test_a);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define T(x) { x, #x }
|
||||
struct corecg_test {
|
||||
int (*fn)(const char *root);
|
||||
@@ -689,6 +852,8 @@ struct corecg_test {
|
||||
T(test_cgcore_proc_migration),
|
||||
T(test_cgcore_thread_migration),
|
||||
T(test_cgcore_destroy),
|
||||
T(test_cgcore_lesser_euid_open),
|
||||
T(test_cgcore_lesser_ns_open),
|
||||
};
|
||||
#undef T
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
./with_stress.sh -s subsys -s fork ./test_core
|
||||
./with_stress.sh -s subsys -s fork ${OUTPUT:-.}/test_core
|
||||
|
||||
@@ -172,6 +172,17 @@ flooding_filters_add()
|
||||
local lsb
|
||||
local i
|
||||
|
||||
# Prevent unwanted packets from entering the bridge and interfering
|
||||
# with the test.
|
||||
tc qdisc add dev br0 clsact
|
||||
tc filter add dev br0 egress protocol all pref 1 handle 1 \
|
||||
matchall skip_hw action drop
|
||||
tc qdisc add dev $h1 clsact
|
||||
tc filter add dev $h1 egress protocol all pref 1 handle 1 \
|
||||
flower skip_hw dst_mac de:ad:be:ef:13:37 action pass
|
||||
tc filter add dev $h1 egress protocol all pref 2 handle 2 \
|
||||
matchall skip_hw action drop
|
||||
|
||||
tc qdisc add dev $rp2 clsact
|
||||
|
||||
for i in $(eval echo {1..$num_remotes}); do
|
||||
@@ -194,6 +205,12 @@ flooding_filters_del()
|
||||
done
|
||||
|
||||
tc qdisc del dev $rp2 clsact
|
||||
|
||||
tc filter del dev $h1 egress protocol all pref 2 handle 2 matchall
|
||||
tc filter del dev $h1 egress protocol all pref 1 handle 1 flower
|
||||
tc qdisc del dev $h1 clsact
|
||||
tc filter del dev br0 egress protocol all pref 1 handle 1 matchall
|
||||
tc qdisc del dev br0 clsact
|
||||
}
|
||||
|
||||
flooding_check_packets()
|
||||
|
||||
@@ -185,7 +185,7 @@ setup_prepare()
|
||||
|
||||
tc filter add dev $eth0 ingress chain $(IS2 0 0) pref 1 \
|
||||
protocol ipv4 flower skip_sw ip_proto udp dst_port 5201 \
|
||||
action police rate 50mbit burst 64k \
|
||||
action police rate 50mbit burst 64k conform-exceed drop/pipe \
|
||||
action goto chain $(IS2 1 0)
|
||||
}
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@ check_error 'p:^/bar vfs_read' # NO_GROUP_NAME
|
||||
check_error 'p:^12345678901234567890123456789012345678901234567890123456789012345/bar vfs_read' # GROUP_TOO_LONG
|
||||
|
||||
check_error 'p:^foo.1/bar vfs_read' # BAD_GROUP_NAME
|
||||
check_error 'p:foo/^ vfs_read' # NO_EVENT_NAME
|
||||
check_error 'p:foo/^12345678901234567890123456789012345678901234567890123456789012345 vfs_read' # EVENT_TOO_LONG
|
||||
check_error 'p:foo/^bar.1 vfs_read' # BAD_EVENT_NAME
|
||||
|
||||
|
||||
@@ -3,11 +3,11 @@ INCLUDES := -I../include -I../../
|
||||
CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES)
|
||||
LDLIBS := -lpthread -lrt
|
||||
|
||||
HEADERS := \
|
||||
LOCAL_HDRS := \
|
||||
../include/futextest.h \
|
||||
../include/atomic.h \
|
||||
../include/logging.h
|
||||
TEST_GEN_FILES := \
|
||||
TEST_GEN_PROGS := \
|
||||
futex_wait_timeout \
|
||||
futex_wait_wouldblock \
|
||||
futex_requeue_pi \
|
||||
@@ -21,5 +21,3 @@ TEST_PROGS := run.sh
|
||||
top_srcdir = ../../../../..
|
||||
KSFT_KHDR_INSTALL := 1
|
||||
include ../../lib.mk
|
||||
|
||||
$(TEST_GEN_FILES): $(HEADERS)
|
||||
|
||||
@@ -2,10 +2,10 @@
|
||||
CFLAGS := $(CFLAGS) -Wall -D_GNU_SOURCE
|
||||
LDLIBS += -lm
|
||||
|
||||
uname_M := $(shell uname -m 2>/dev/null || echo not)
|
||||
ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
|
||||
ARCH ?= $(shell uname -m 2>/dev/null || echo not)
|
||||
ARCH_PROCESSED := $(shell echo $(ARCH) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
|
||||
|
||||
ifeq (x86,$(ARCH))
|
||||
ifeq (x86,$(ARCH_PROCESSED))
|
||||
TEST_GEN_FILES := msr aperf
|
||||
endif
|
||||
|
||||
|
||||
@@ -73,20 +73,19 @@ void ucall_uninit(struct kvm_vm *vm)
|
||||
|
||||
void ucall(uint64_t cmd, int nargs, ...)
|
||||
{
|
||||
struct ucall uc = {
|
||||
.cmd = cmd,
|
||||
};
|
||||
struct ucall uc = {};
|
||||
va_list va;
|
||||
int i;
|
||||
|
||||
WRITE_ONCE(uc.cmd, cmd);
|
||||
nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
|
||||
|
||||
va_start(va, nargs);
|
||||
for (i = 0; i < nargs; ++i)
|
||||
uc.args[i] = va_arg(va, uint64_t);
|
||||
WRITE_ONCE(uc.args[i], va_arg(va, uint64_t));
|
||||
va_end(va);
|
||||
|
||||
*ucall_exit_mmio_addr = (vm_vaddr_t)&uc;
|
||||
WRITE_ONCE(*ucall_exit_mmio_addr, (vm_vaddr_t)&uc);
|
||||
}
|
||||
|
||||
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
|
||||
|
||||
@@ -1253,6 +1253,6 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
|
||||
|
||||
asm volatile("vmcall"
|
||||
: "=a"(r)
|
||||
: "b"(a0), "c"(a1), "d"(a2), "S"(a3));
|
||||
: "a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3));
|
||||
return r;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user