Skip to content

Commit

Permalink
Merge tag 'pull-tcg-20241013' of https://gitlab.com/rth7680/qemu into…
Browse files Browse the repository at this point in the history
… staging

linux-user/i386: Emulate orig_ax
linux-user/vm86: Fix compilation with Clang
tcg: remove singlestep_enabled from DisasContextBase
accel/tcg: Add TCGCPUOps.tlb_fill_align
target/hppa: Handle alignment faults in hppa_get_physical_address
target/arm: Fix alignment fault priority in get_phys_addr_lpae

# -----BEGIN PGP SIGNATURE-----
#
# iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmcMRU4dHHJpY2hhcmQu
# aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV9dSQf+MUJq//oig+bDeUlQ
# v3uBMFVi1DBYI1Y/xVODADpn8Ltv5s9v7N+/phi+St2W65OzGNYviHvq/abeyhdo
# M40LGtOvjO6Mns+Z9NKTobtT8n4ap4JJyoFjuXFTHkMMDiQ/v7FkEJJoS3W2bemi
# zmKYF/vWe3bwI+d3+dyaUjA92gSs+Hlj8uEVBlzn3ubA19ZdvtyfKURPQynrkwlo
# dFtAOFRFBU6vrlJSBElxUfYO4jC4Cng19EOrWvIsuKAkACuhiHgah10i3WKw8Asz
# 1iRUYXe0EOlX2RYNTD+Oj5j0cViRylirgPtIhEIPBuDP7m1Jy1JO4dVARUJBBU71
# Zd4Uuw==
# =EX+a
# -----END PGP SIGNATURE-----
# gpg: Signature made Sun 13 Oct 2024 23:10:22 BST
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "[email protected]"
# gpg: Good signature from "Richard Henderson <[email protected]>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* tag 'pull-tcg-20241013' of https://gitlab.com/rth7680/qemu: (27 commits)
  target/arm: Fix alignment fault priority in get_phys_addr_lpae
  target/arm: Implement TCGCPUOps.tlb_fill_align
  target/arm: Move device detection earlier in get_phys_addr_lpae
  target/arm: Pass MemOp to get_phys_addr_lpae
  target/arm: Pass MemOp through get_phys_addr_twostage
  target/arm: Pass MemOp to get_phys_addr_nogpc
  target/arm: Pass MemOp to get_phys_addr_gpc
  target/arm: Pass MemOp to get_phys_addr_with_space_nogpc
  target/arm: Pass MemOp to get_phys_addr
  target/hppa: Implement TCGCPUOps.tlb_fill_align
  target/hppa: Handle alignment faults in hppa_get_physical_address
  target/hppa: Fix priority of T, D, and B page faults
  target/hppa: Perform access rights before protection id check
  target/hppa: Add MemOp argument to hppa_get_physical_address
  accel/tcg: Use the alignment test in tlb_fill_align
  accel/tcg: Add TCGCPUOps.tlb_fill_align
  include/exec/memop: Introduce memop_atomicity_bits
  include/exec/memop: Rename get_alignment_bits
  include/exec/memop: Move get_alignment_bits from tcg.h
  accel/tcg: Assert noreturn from write-only page for atomics
  ...

Signed-off-by: Peter Maydell <[email protected]>
  • Loading branch information
pm215 committed Oct 14, 2024
2 parents b38d263 + e530581 commit 3860a2a
Show file tree
Hide file tree
Showing 41 changed files with 463 additions and 358 deletions.
160 changes: 82 additions & 78 deletions accel/tcg/cputlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -1221,22 +1221,35 @@ void tlb_set_page(CPUState *cpu, vaddr addr,
}

/*
* Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
* caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
* be discarded and looked up again (e.g. via tlb_entry()).
* Note: tlb_fill_align() can trigger a resize of the TLB.
* This means that all of the caller's prior references to the TLB table
* (e.g. CPUTLBEntry pointers) must be discarded and looked up again
* (e.g. via tlb_entry()).
*/
static void tlb_fill(CPUState *cpu, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
static bool tlb_fill_align(CPUState *cpu, vaddr addr, MMUAccessType type,
int mmu_idx, MemOp memop, int size,
bool probe, uintptr_t ra)
{
bool ok;
const TCGCPUOps *ops = cpu->cc->tcg_ops;
CPUTLBEntryFull full;

/*
* This is not a probe, so only valid return is success; failure
* should result in exception + longjmp to the cpu loop.
*/
ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
access_type, mmu_idx, false, retaddr);
assert(ok);
if (ops->tlb_fill_align) {
if (ops->tlb_fill_align(cpu, &full, addr, type, mmu_idx,
memop, size, probe, ra)) {
tlb_set_page_full(cpu, mmu_idx, addr, &full);
return true;
}
} else {
/* Legacy behaviour is alignment before paging. */
if (addr & ((1u << memop_alignment_bits(memop)) - 1)) {
ops->do_unaligned_access(cpu, addr, type, mmu_idx, ra);
}
if (ops->tlb_fill(cpu, addr, size, type, mmu_idx, probe, ra)) {
return true;
}
}
assert(probe);
return false;
}

static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
Expand Down Expand Up @@ -1351,22 +1364,22 @@ static int probe_access_internal(CPUState *cpu, vaddr addr,

if (!tlb_hit_page(tlb_addr, page_addr)) {
if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) {
if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
mmu_idx, nonfault, retaddr)) {
if (!tlb_fill_align(cpu, addr, access_type, mmu_idx,
0, fault_size, nonfault, retaddr)) {
/* Non-faulting page table read failed. */
*phost = NULL;
*pfull = NULL;
return TLB_INVALID_MASK;
}

/* TLB resize via tlb_fill may have moved the entry. */
/* TLB resize via tlb_fill_align may have moved the entry. */
index = tlb_index(cpu, mmu_idx, addr);
entry = tlb_entry(cpu, mmu_idx, addr);

/*
* With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
* to force the next access through tlb_fill. We've just
* called tlb_fill, so we know that this entry *is* valid.
* to force the next access through tlb_fill_align. We've just
* called tlb_fill_align, so we know that this entry *is* valid.
*/
flags &= ~TLB_INVALID_MASK;
}
Expand Down Expand Up @@ -1607,16 +1620,17 @@ typedef struct MMULookupLocals {
* mmu_lookup1: translate one page
* @cpu: generic cpu state
* @data: lookup parameters
* @memop: memory operation for the access, or 0
* @mmu_idx: virtual address context
* @access_type: load/store/code
* @ra: return address into tcg generated code, or 0
*
* Resolve the translation for the one page at @data.addr, filling in
* the rest of @data with the results. If the translation fails,
* tlb_fill will longjmp out. Return true if the softmmu tlb for
* tlb_fill_align will longjmp out. Return true if the softmmu tlb for
* @mmu_idx may have resized.
*/
static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, MemOp memop,
int mmu_idx, MMUAccessType access_type, uintptr_t ra)
{
vaddr addr = data->addr;
Expand All @@ -1631,7 +1645,8 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
if (!tlb_hit(tlb_addr, addr)) {
if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
addr & TARGET_PAGE_MASK)) {
tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra);
tlb_fill_align(cpu, addr, access_type, mmu_idx,
memop, data->size, false, ra);
maybe_resized = true;
index = tlb_index(cpu, mmu_idx, addr);
entry = tlb_entry(cpu, mmu_idx, addr);
Expand All @@ -1643,6 +1658,25 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
flags |= full->slow_flags[access_type];

if (likely(!maybe_resized)) {
/* Alignment has not been checked by tlb_fill_align. */
int a_bits = memop_alignment_bits(memop);

/*
* This alignment check differs from the one above, in that this is
* based on the atomicity of the operation. The intended use case is
* the ARM memory type field of each PTE, where access to pages with
* Device memory type require alignment.
*/
if (unlikely(flags & TLB_CHECK_ALIGNED)) {
int at_bits = memop_atomicity_bits(memop);
a_bits = MAX(a_bits, at_bits);
}
if (unlikely(addr & ((1 << a_bits) - 1))) {
cpu_unaligned_access(cpu, addr, access_type, mmu_idx, ra);
}
}

data->full = full;
data->flags = flags;
/* Compute haddr speculatively; depending on flags it might be invalid. */
Expand Down Expand Up @@ -1699,7 +1733,6 @@ static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data,
static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
{
unsigned a_bits;
bool crosspage;
int flags;

Expand All @@ -1708,20 +1741,14 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,

tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);

/* Handle CPU specific unaligned behaviour */
a_bits = get_alignment_bits(l->memop);
if (addr & ((1 << a_bits) - 1)) {
cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
}

l->page[0].addr = addr;
l->page[0].size = memop_size(l->memop);
l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
l->page[1].size = 0;
crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;

if (likely(!crosspage)) {
mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra);

flags = l->page[0].flags;
if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
Expand All @@ -1740,8 +1767,8 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
* Lookup both pages, recognizing exceptions from either. If the
* second lookup potentially resized, refresh first CPUTLBEntryFull.
*/
mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) {
mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra);
if (mmu_lookup1(cpu, &l->page[1], 0, l->mmu_idx, type, ra)) {
uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
}
Expand All @@ -1760,31 +1787,6 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
tcg_debug_assert((flags & TLB_BSWAP) == 0);
}

/*
* This alignment check differs from the one above, in that this is
* based on the atomicity of the operation. The intended use case is
* the ARM memory type field of each PTE, where access to pages with
* Device memory type require alignment.
*/
if (unlikely(flags & TLB_CHECK_ALIGNED)) {
MemOp size = l->memop & MO_SIZE;

switch (l->memop & MO_ATOM_MASK) {
case MO_ATOM_NONE:
size = MO_8;
break;
case MO_ATOM_IFALIGN_PAIR:
case MO_ATOM_WITHIN16_PAIR:
size = size ? size - 1 : 0;
break;
default:
break;
}
if (addr & ((1 << size) - 1)) {
cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
}
}

return crosspage;
}

Expand All @@ -1797,34 +1799,18 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
{
uintptr_t mmu_idx = get_mmuidx(oi);
MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop);
uintptr_t index;
CPUTLBEntry *tlbe;
vaddr tlb_addr;
void *hostaddr;
CPUTLBEntryFull *full;
bool did_tlb_fill = false;

tcg_debug_assert(mmu_idx < NB_MMU_MODES);

/* Adjust the given return address. */
retaddr -= GETPC_ADJ;

/* Enforce guest required alignment. */
if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
/* ??? Maybe indicate atomic op to cpu_unaligned_access */
cpu_unaligned_access(cpu, addr, MMU_DATA_STORE,
mmu_idx, retaddr);
}

/* Enforce qemu required alignment. */
if (unlikely(addr & (size - 1))) {
/* We get here if guest alignment was not requested,
or was not enforced by cpu_unaligned_access above.
We might widen the access and emulate, but for now
mark an exception and exit the cpu loop. */
goto stop_the_world;
}

index = tlb_index(cpu, mmu_idx, addr);
tlbe = tlb_entry(cpu, mmu_idx, addr);

Expand All @@ -1833,8 +1819,9 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
if (!tlb_hit(tlb_addr, addr)) {
if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE,
addr & TARGET_PAGE_MASK)) {
tlb_fill(cpu, addr, size,
MMU_DATA_STORE, mmu_idx, retaddr);
tlb_fill_align(cpu, addr, MMU_DATA_STORE, mmu_idx,
mop, size, false, retaddr);
did_tlb_fill = true;
index = tlb_index(cpu, mmu_idx, addr);
tlbe = tlb_entry(cpu, mmu_idx, addr);
}
Expand All @@ -1848,15 +1835,32 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
* but addr_read will only be -1 if PAGE_READ was unset.
*/
if (unlikely(tlbe->addr_read == -1)) {
tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
tlb_fill_align(cpu, addr, MMU_DATA_LOAD, mmu_idx,
0, size, false, retaddr);
/*
* Since we don't support reads and writes to different
* addresses, and we do have the proper page loaded for
* write, this shouldn't ever return. But just in case,
* handle via stop-the-world.
* write, this shouldn't ever return.
*/
g_assert_not_reached();
}

/* Enforce guest required alignment, if not handled by tlb_fill_align. */
if (!did_tlb_fill && (addr & ((1 << memop_alignment_bits(mop)) - 1))) {
cpu_unaligned_access(cpu, addr, MMU_DATA_STORE, mmu_idx, retaddr);
}

/* Enforce qemu required alignment. */
if (unlikely(addr & (size - 1))) {
/*
* We get here if guest alignment was not requested, or was not
* enforced by cpu_unaligned_access or tlb_fill_align above.
* We might widen the access and emulate, but for now
* mark an exception and exit the cpu loop.
*/
goto stop_the_world;
}

/* Collect tlb flags for read. */
tlb_addr |= tlbe->addr_read;

Expand Down
1 change: 0 additions & 1 deletion accel/tcg/translator.c
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,6 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
db->is_jmp = DISAS_NEXT;
db->num_insns = 0;
db->max_insns = *max_insns;
db->singlestep_enabled = cflags & CF_SINGLE_STEP;
db->insn_start = NULL;
db->fake_insn = false;
db->host_addr[0] = host_pc;
Expand Down
4 changes: 2 additions & 2 deletions accel/tcg/user-exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -954,7 +954,7 @@ void page_reset_target_data(target_ulong start, target_ulong last) { }
static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr,
MemOp mop, uintptr_t ra, MMUAccessType type)
{
int a_bits = get_alignment_bits(mop);
int a_bits = memop_alignment_bits(mop);
void *ret;

/* Enforce guest required alignment. */
Expand Down Expand Up @@ -1236,7 +1236,7 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
int size, uintptr_t retaddr)
{
MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop);
int a_bits = memop_alignment_bits(mop);
void *ret;

/* Enforce guest required alignment. */
Expand Down
2 changes: 1 addition & 1 deletion configs/targets/i386-linux-user.mak
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
TARGET_ARCH=i386
TARGET_SYSTBL_ABI=i386
TARGET_SYSTBL=syscall_32.tbl
TARGET_XML_FILES= gdb-xml/i386-32bit.xml
TARGET_XML_FILES= gdb-xml/i386-32bit.xml gdb-xml/i386-32bit-linux.xml
2 changes: 1 addition & 1 deletion configs/targets/x86_64-linux-user.mak
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@ TARGET_ARCH=x86_64
TARGET_BASE_ARCH=i386
TARGET_SYSTBL_ABI=common,64
TARGET_SYSTBL=syscall_64.tbl
TARGET_XML_FILES= gdb-xml/i386-64bit.xml
TARGET_XML_FILES= gdb-xml/i386-64bit.xml gdb-xml/i386-64bit-linux.xml
11 changes: 11 additions & 0 deletions gdb-xml/i386-32bit-linux.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
<?xml version="1.0"?>
<!-- Copyright (C) 2010-2024 Free Software Foundation, Inc.
Copying and distribution of this file, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. -->

<!DOCTYPE feature SYSTEM "gdb-target.dtd">
<feature name="org.gnu.gdb.i386.linux">
<reg name="orig_eax" bitsize="32" type="int"/>
</feature>
11 changes: 11 additions & 0 deletions gdb-xml/i386-64bit-linux.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
<?xml version="1.0"?>
<!-- Copyright (C) 2010-2024 Free Software Foundation, Inc.
Copying and distribution of this file, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. -->

<!DOCTYPE feature SYSTEM "gdb-target.dtd">
<feature name="org.gnu.gdb.i386.linux">
<reg name="orig_rax" bitsize="64" type="int"/>
</feature>
13 changes: 12 additions & 1 deletion include/exec/cpu-common.h
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,17 @@ static inline ArchCPU *env_archcpu(CPUArchState *env)
return (void *)env - sizeof(CPUState);
}

/**
* env_cpu_const(env)
* @env: The architecture environment
*
* Return the CPUState associated with the environment.
*/
static inline const CPUState *env_cpu_const(const CPUArchState *env)
{
return (void *)env - sizeof(CPUState);
}

/**
* env_cpu(env)
* @env: The architecture environment
Expand All @@ -246,7 +257,7 @@ static inline ArchCPU *env_archcpu(CPUArchState *env)
*/
static inline CPUState *env_cpu(CPUArchState *env)
{
return (void *)env - sizeof(CPUState);
return (CPUState *)env_cpu_const(env);
}

#ifndef CONFIG_USER_ONLY
Expand Down
Loading

0 comments on commit 3860a2a

Please sign in to comment.