refactor(asmjit): removed library for something changes

This commit is contained in:
notcpuid
2025-07-03 13:51:23 +03:00
parent eae3635040
commit 497ec443d5
152 changed files with 0 additions and 95470 deletions

View File

@@ -1,55 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_A64_H_INCLUDED
#define ASMJIT_A64_H_INCLUDED
//! \addtogroup asmjit_a64
//!
//! ### Emitters
//!
//! - \ref a64::Assembler - AArch64 assembler (must read, provides examples).
//! - \ref a64::Builder - AArch64 builder.
//! - \ref a64::Compiler - AArch64 compiler.
//! - \ref a64::Emitter - AArch64 emitter (abstract).
//!
//! ### Supported Instructions
//!
//! - Emitters:
//! - \ref a64::EmitterExplicitT - Provides all instructions that use explicit operands, provides also utility
//! functions. The member functions provided are part of all AArch64 emitters.
//!
//! - Instruction representation:
//! - \ref a64::Inst::Id - instruction identifiers.
//!
//! ### Register Operands
//!
//! - \ref a64::Gp - General purpose register (abstracts 32-bit and 64-bit general purpose registers).
//! - \ref a64::Vec - Vector register (abstracts B, H, S, D, and Q NEON register with possible element type and index).
//!
//! ### Memory Operands
//!
//! - \ref a64::Mem - AArch64 memory operand that provides support for all ARM addressing features including base,
//! index, pre/post increment, and ARM-specific shift addressing + index extending.
//!
//! ### Other
//!
//! - \ref arm::Shift - Shift operation and value.
//! - \ref arm::Utils - Utilities that can help during code generation for AArch32 and AArch64.
#include "./arm.h"
#include "asmjit-scope-begin.h"
#include "arm/a64assembler.h"
#include "arm/a64builder.h"
#include "arm/a64compiler.h"
#include "arm/a64emitter.h"
#include "arm/a64globals.h"
#include "arm/a64instdb.h"
#include "arm/a64operand.h"
#include "asmjit-scope-end.h"
#endif // ASMJIT_A64_H_INCLUDED

View File

@@ -1,77 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_H_INCLUDED
#define ASMJIT_ARM_H_INCLUDED
//! \addtogroup asmjit_arm
//!
//! ### Namespaces
//!
//! - \ref arm - arm namespace provides common functionality for both AArch32 and AArch64 backends.
//! - \ref a32 - a32 namespace provides support for AArch32 architecture. In addition it includes
//! \ref arm namespace, so you can only use a single namespace when targeting AArch32 architecture.
//! - \ref a64 - a64 namespace provides support for AArch64 architecture. In addition it includes
//! \ref arm namespace, so you can only use a single namespace when targeting AArch64 architecture.
//!
//! ### Emitters
//!
//! - AArch32
//! - \ref a32::Assembler - AArch32 assembler (must read, provides examples).
//! - \ref a32::Builder - AArch32 builder.
//! - \ref a32::Compiler - AArch32 compiler.
//! - \ref a32::Emitter - AArch32 emitter (abstract).
//!
//! - AArch64
//! - \ref a64::Assembler - AArch64 assembler (must read, provides examples).
//! - \ref a64::Builder - AArch64 builder.
//! - \ref a64::Compiler - AArch64 compiler.
//! - \ref a64::Emitter - AArch64 emitter (abstract).
//!
//! ### Supported Instructions
//!
//! - AArch32:
//! - Emitters:
//! - \ref a32::EmitterExplicitT - Provides all instructions that use explicit operands, provides also
//! utility functions. The member functions provided are part of all AArch32 emitters.
//! - Instruction representation:
//! - \ref a32::Inst::Id - instruction identifiers.
//!
//! - AArch64:
//! - Emitters:
//! - \ref a64::EmitterExplicitT - Provides all instructions that use explicit operands, provides also
//! utility functions. The member functions provided are part of all AArch64 emitters.
//! - Instruction representation:
//! - \ref a64::Inst::Id - instruction identifiers.
//!
//! ### Register Operands
//!
//! - AArch32:
//! - \ref a32::Gp - 32-bit general purpose register used by AArch32:
//! - \ref a32::Vec - Vector (SIMD) register.
//!
//! - AArch64:
//! - \ref a64::Gp - 32-bit or 64-bit general purpose register used by AArch64:
//! - \ref a64::Vec - Vector (SIMD) register.
//!
//! ### Memory Operands
//!
//! - \ref arm::Mem - AArch32/AArch64 memory operand that provides support for all ARM addressing features
//! including base, index, pre/post increment, and ARM-specific shift addressing and index extending.
//!
//! ### Other
//!
//! - \ref arm::Shift - Shift operation and value (both AArch32 and AArch64).
//! - \ref arm::DataType - Data type that is part of an instruction in AArch32 mode.
//! - \ref arm::Utils - Utilities that can help during code generation for AArch32 and AArch64.
#include "core.h"
#include "asmjit-scope-begin.h"
#include "arm/armglobals.h"
#include "arm/armutils.h"
#include "asmjit-scope-end.h"
#endif // ASMJIT_ARM_H_INCLUDED

View File

@@ -1,82 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64ARCHTRAITS_P_H_INCLUDED
#define ASMJIT_ARM_A64ARCHTRAITS_P_H_INCLUDED
#include "../core/archtraits.h"
#include "../core/misc_p.h"
#include "../core/type.h"
#include "../arm/a64globals.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \cond INTERNAL
//! \addtogroup asmjit_a64
//! \{
static const constexpr ArchTraits a64ArchTraits = {
// SP/FP/LR/PC.
Gp::kIdSp, Gp::kIdFp, Gp::kIdLr, 0xFFu,
// Reserved.
{ 0u, 0u, 0u },
// HW stack alignment (AArch64 requires stack aligned to 16 bytes at HW level).
16u,
// Min/max stack offset - byte addressing is the worst, vec.q addressing the best.
4095, 65520,
// Supported register types.
0u | (1u << uint32_t(RegType::kGp32 ))
| (1u << uint32_t(RegType::kGp64 ))
| (1u << uint32_t(RegType::kVec8 ))
| (1u << uint32_t(RegType::kVec16 ))
| (1u << uint32_t(RegType::kVec32 ))
| (1u << uint32_t(RegType::kVec64 ))
| (1u << uint32_t(RegType::kVec128))
| (1u << uint32_t(RegType::kMask )),
// Instruction hints [Gp, Vec, Mask, Extra].
{{
InstHints::kPushPop,
InstHints::kPushPop,
InstHints::kNoHints,
InstHints::kNoHints
}},
// TypeIdToRegType.
#define V(index) (index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt8) ? RegType::kGp32 : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt8) ? RegType::kGp32 : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt16) ? RegType::kGp32 : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt16) ? RegType::kGp32 : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt32) ? RegType::kGp32 : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt32) ? RegType::kGp32 : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt64) ? RegType::kGp64 : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt64) ? RegType::kGp64 : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kIntPtr) ? RegType::kGp64 : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUIntPtr) ? RegType::kGp64 : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kFloat32) ? RegType::kVec32 : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kFloat64) ? RegType::kVec64 : RegType::kNone)
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
#undef V
// Word names of 8-bit, 16-bit, 32-bit, and 64-bit quantities.
{
ArchTypeNameId::kByte,
ArchTypeNameId::kHWord,
ArchTypeNameId::kWord,
ArchTypeNameId::kXWord
}
};
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_A64ARCHTRAITS_P_H_INCLUDED

File diff suppressed because it is too large Load Diff

View File

@@ -1,61 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64ASSEMBLER_H_INCLUDED
#define ASMJIT_ARM_A64ASSEMBLER_H_INCLUDED
#include "../core/assembler.h"
#include "../arm/a64emitter.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \addtogroup asmjit_a64
//! \{
//! AArch64 assembler implementation.
class ASMJIT_VIRTAPI Assembler
: public BaseAssembler,
public EmitterExplicitT<Assembler> {
public:
using Base = BaseAssembler;
//! \name Construction & Destruction
//! \{
ASMJIT_API Assembler(CodeHolder* code = nullptr) noexcept;
ASMJIT_API ~Assembler() noexcept override;
//! \}
//! \name Emit
//! \{
ASMJIT_API Error _emit(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) override;
//! \}
//! \name Align
//! \{
ASMJIT_API Error align(AlignMode alignMode, uint32_t alignment) override;
//! \}
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder& code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder& code) noexcept override;
//! \}
};
//! \}
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_A64ASSEMBLER_H_INCLUDED

View File

@@ -1,57 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64) && !defined(ASMJIT_NO_BUILDER)
#include "../arm/a64assembler.h"
#include "../arm/a64builder.h"
#include "../arm/a64emithelper_p.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::Builder - Construction & Destruction
// =========================================
Builder::Builder(CodeHolder* code) noexcept : BaseBuilder() {
_archMask = uint64_t(1) << uint32_t(Arch::kAArch64);
initEmitterFuncs(this);
if (code) {
code->attach(this);
}
}
Builder::~Builder() noexcept {}
// a64::Builder - Events
// =====================
Error Builder::onAttach(CodeHolder& code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
_instructionAlignment = uint8_t(4);
updateEmitterFuncs(this);
return kErrorOk;
}
Error Builder::onDetach(CodeHolder& code) noexcept {
return Base::onDetach(code);
}
// a64::Builder - Finalize
// =======================
Error Builder::finalize() {
ASMJIT_PROPAGATE(runPasses());
Assembler a(_code);
a.addEncodingOptions(encodingOptions());
a.addDiagnosticOptions(diagnosticOptions());
return serializeTo(&a);
}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64 && !ASMJIT_NO_BUILDER

View File

@@ -1,57 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64BUILDER_H_INCLUDED
#define ASMJIT_ARM_A64BUILDER_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_BUILDER
#include "../core/builder.h"
#include "../arm/a64emitter.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \addtogroup asmjit_a64
//! \{
//! AArch64 builder implementation.
class ASMJIT_VIRTAPI Builder
: public BaseBuilder,
public EmitterExplicitT<Builder> {
public:
ASMJIT_NONCOPYABLE(Builder)
using Base = BaseBuilder;
//! \name Construction & Destruction
//! \{
ASMJIT_API explicit Builder(CodeHolder* code = nullptr) noexcept;
ASMJIT_API ~Builder() noexcept override;
//! \}
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder& code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder& code) noexcept override;
//! \}
//! \name Finalize
//! \{
ASMJIT_API Error finalize() override;
//! \}
};
//! \}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_BUILDER
#endif // ASMJIT_ARM_A64BUILDER_H_INCLUDED

View File

@@ -1,72 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64) && !defined(ASMJIT_NO_COMPILER)
#include "../arm/a64assembler.h"
#include "../arm/a64compiler.h"
#include "../arm/a64emithelper_p.h"
#include "../arm/a64rapass_p.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::Compiler - Construction & Destruction
// ==========================================
Compiler::Compiler(CodeHolder* code) noexcept : BaseCompiler() {
_archMask = uint64_t(1) << uint32_t(Arch::kAArch64);
initEmitterFuncs(this);
if (code) {
code->attach(this);
}
}
Compiler::~Compiler() noexcept {}
// a64::Compiler - Events
// ======================
Error Compiler::onAttach(CodeHolder& code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
Error err = addPassT<ARMRAPass>();
if (ASMJIT_UNLIKELY(err)) {
onDetach(code);
return err;
}
_instructionAlignment = uint8_t(4);
updateEmitterFuncs(this);
return kErrorOk;
}
Error Compiler::onDetach(CodeHolder& code) noexcept {
return Base::onDetach(code);
}
Error Compiler::onReinit(CodeHolder& code) noexcept {
Error err = Base::onReinit(code);
if (err == kErrorOk) {
err = addPassT<ARMRAPass>();
}
return err;
}
// a64::Compiler - Finalize
// ========================
Error Compiler::finalize() {
ASMJIT_PROPAGATE(runPasses());
Assembler a(_code);
a.addEncodingOptions(encodingOptions());
a.addDiagnosticOptions(diagnosticOptions());
return serializeTo(&a);
}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64 && !ASMJIT_NO_COMPILER

View File

@@ -1,260 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64COMPILER_H_INCLUDED
#define ASMJIT_ARM_A64COMPILER_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/compiler.h"
#include "../core/type.h"
#include "../arm/a64emitter.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \addtogroup asmjit_a64
//! \{
//! AArch64 compiler implementation.
class ASMJIT_VIRTAPI Compiler
: public BaseCompiler,
public EmitterExplicitT<Compiler> {
public:
ASMJIT_NONCOPYABLE(Compiler)
using Base = BaseCompiler;
//! \name Construction & Destruction
//! \{
ASMJIT_API explicit Compiler(CodeHolder* code = nullptr) noexcept;
ASMJIT_API ~Compiler() noexcept override;
//! \}
//! \name Virtual Registers
//! \{
//! \cond INTERNAL
template<typename RegT, typename Type>
ASMJIT_INLINE_NODEBUG RegT _newRegInternal(const Type& type) {
RegT reg(Globals::NoInit);
_newReg(&reg, type, nullptr);
return reg;
}
template<typename RegT, typename Type>
ASMJIT_INLINE_NODEBUG RegT _newRegInternal(const Type& type, const char* s) {
#ifndef ASMJIT_NO_LOGGING
RegT reg(Globals::NoInit);
_newReg(&reg, type, s);
return reg;
#else
DebugUtils::unused(s);
return _newRegInternal<RegT>(type);
#endif
}
template<typename RegT, typename Type, typename... Args>
ASMJIT_INLINE_NODEBUG RegT _newRegInternal(const Type& type, const char* s, Args&&... args) {
#ifndef ASMJIT_NO_LOGGING
RegT reg(Globals::NoInit);
_newRegFmt(&reg, type, s, std::forward<Args>(args)...);
return reg;
#else
DebugUtils::unused(s, std::forward<Args>(args)...);
return _newRegInternal<RegT>(type);
#endif
}
//! \endcond
template<typename RegT, typename... Args>
ASMJIT_INLINE_NODEBUG RegT newSimilarReg(const RegT& ref, Args&&... args) {
return _newRegInternal<RegT>(ref, std::forward<Args>(args)...);
}
template<typename... Args>
ASMJIT_INLINE_NODEBUG Reg newReg(TypeId typeId, Args&&... args) { return _newRegInternal<Reg>(typeId, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newGp(TypeId typeId, Args&&... args) { return _newRegInternal<Gp>(typeId, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Vec newVec(TypeId typeId, Args&&... args) { return _newRegInternal<Vec>(typeId, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newInt32(Args&&... args) { return _newRegInternal<Gp>(TypeId::kInt32, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newUInt32(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt32, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newInt64(Args&&... args) { return _newRegInternal<Gp>(TypeId::kInt64, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newUInt64(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt64, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newIntPtr(Args&&... args) { return _newRegInternal<Gp>(TypeId::kIntPtr, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newUIntPtr(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUIntPtr, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newGp32(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt32, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newGp64(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt64, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newGpw(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt32, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newGpx(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt64, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newGpz(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUIntPtr, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Vec newVecS(Args&&... args) { return _newRegInternal<Vec>(TypeId::kFloat32, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Vec newVecD(Args&&... args) { return _newRegInternal<Vec>(TypeId::kFloat64, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Vec newVecQ(Args&&... args) { return _newRegInternal<Vec>(TypeId::kUInt8x16, std::forward<Args>(args)...); }
//! \}
//! \name Stack
//! \{
//! Creates a new memory chunk allocated on the current function's stack.
ASMJIT_INLINE_NODEBUG Mem newStack(uint32_t size, uint32_t alignment, const char* name = nullptr) {
Mem m(Globals::NoInit);
_newStack(&m, size, alignment, name);
return m;
}
//! \}
//! \name Constants
//! \{
//! Put data to a constant-pool and get a memory reference to it.
ASMJIT_INLINE_NODEBUG Mem newConst(ConstPoolScope scope, const void* data, size_t size) {
Mem m(Globals::NoInit);
_newConst(&m, scope, data, size);
return m;
}
//! Put a BYTE `val` to a constant-pool (8 bits).
ASMJIT_INLINE_NODEBUG Mem newByteConst(ConstPoolScope scope, uint8_t val) noexcept { return newConst(scope, &val, 1); }
//! Put a HWORD `val` to a constant-pool (16 bits).
ASMJIT_INLINE_NODEBUG Mem newHWordConst(ConstPoolScope scope, uint16_t val) noexcept { return newConst(scope, &val, 2); }
//! Put a WORD `val` to a constant-pool (32 bits).
ASMJIT_INLINE_NODEBUG Mem newWordConst(ConstPoolScope scope, uint32_t val) noexcept { return newConst(scope, &val, 4); }
//! Put a DWORD `val` to a constant-pool (64 bits).
ASMJIT_INLINE_NODEBUG Mem newDWordConst(ConstPoolScope scope, uint64_t val) noexcept { return newConst(scope, &val, 8); }
//! Put a WORD `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newInt16Const(ConstPoolScope scope, int16_t val) noexcept { return newConst(scope, &val, 2); }
//! Put a WORD `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newUInt16Const(ConstPoolScope scope, uint16_t val) noexcept { return newConst(scope, &val, 2); }
//! Put a DWORD `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newInt32Const(ConstPoolScope scope, int32_t val) noexcept { return newConst(scope, &val, 4); }
//! Put a DWORD `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newUInt32Const(ConstPoolScope scope, uint32_t val) noexcept { return newConst(scope, &val, 4); }
//! Put a QWORD `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newInt64Const(ConstPoolScope scope, int64_t val) noexcept { return newConst(scope, &val, 8); }
//! Put a QWORD `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newUInt64Const(ConstPoolScope scope, uint64_t val) noexcept { return newConst(scope, &val, 8); }
//! Put a SP-FP `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newFloatConst(ConstPoolScope scope, float val) noexcept { return newConst(scope, &val, 4); }
//! Put a DP-FP `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newDoubleConst(ConstPoolScope scope, double val) noexcept { return newConst(scope, &val, 8); }
//! \}
//! \name Instruction Options
//! \{
//! Force the compiler to not follow the conditional or unconditional jump.
ASMJIT_INLINE_NODEBUG Compiler& unfollow() noexcept { _instOptions |= InstOptions::kUnfollow; return *this; }
//! \}
//! \name Compiler specific
//! \{
//! Special pseudo-instruction that can be used to load a memory address into `o0` GP register.
//!
//! \note At the moment this instruction is only useful to load a stack allocated address into a GP register
//! for further use. It makes very little sense to use it for anything else. The semantics of this instruction
//! is the same as X86 `LEA` (load effective address) instruction.
ASMJIT_INLINE_NODEBUG Error loadAddressOf(const Gp& o0, const Mem& o1) { return _emitter()->_emitI(Inst::kIdAdr, o0, o1); }
//! \}
//! \name Function Call & Ret Intrinsics
//! \{
//! Invoke a function call without `target` type enforcement.
ASMJIT_INLINE_NODEBUG Error invoke_(InvokeNode** out, const Operand_& target, const FuncSignature& signature) {
return addInvokeNode(out, Inst::kIdBlr, target, signature);
}
//! Invoke a function call of the given `target` and `signature` and store the added node to `out`.
//!
//! Creates a new \ref InvokeNode, initializes all the necessary members to match the given function `signature`,
//! adds the node to the compiler, and stores its pointer to `out`. The operation is atomic, if anything fails
//! nullptr is stored in `out` and error code is returned.
ASMJIT_INLINE_NODEBUG Error invoke(InvokeNode** out, const Gp& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
//! \overload
ASMJIT_INLINE_NODEBUG Error invoke(InvokeNode** out, const Mem& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
//! \overload
ASMJIT_INLINE_NODEBUG Error invoke(InvokeNode** out, const Label& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
//! \overload
ASMJIT_INLINE_NODEBUG Error invoke(InvokeNode** out, const Imm& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
//! \overload
ASMJIT_INLINE_NODEBUG Error invoke(InvokeNode** out, uint64_t target, const FuncSignature& signature) { return invoke_(out, Imm(int64_t(target)), signature); }
//! Return.
ASMJIT_INLINE_NODEBUG Error ret() { return addRet(Operand(), Operand()); }
//! \overload
ASMJIT_INLINE_NODEBUG Error ret(const Reg& o0) { return addRet(o0, Operand()); }
//! \overload
ASMJIT_INLINE_NODEBUG Error ret(const Reg& o0, const Reg& o1) { return addRet(o0, o1); }
//! \}
//! \name Jump Tables Support
//! \{
using EmitterExplicitT<Compiler>::br;
//! Adds a jump to the given `target` with the provided jump `annotation`.
ASMJIT_INLINE_NODEBUG Error br(const Reg& target, JumpAnnotation* annotation) { return emitAnnotatedJump(Inst::kIdBr, target, annotation); }
//! \}
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder& code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder& code) noexcept override;
ASMJIT_API Error onReinit(CodeHolder& code) noexcept override;
//! \}
//! \name Finalize
//! \{
ASMJIT_API Error finalize() override;
//! \}
};
//! \}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_COMPILER
#endif // ASMJIT_ARM_A64COMPILER_H_INCLUDED

View File

@@ -1,481 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64)
#include "../core/formatter.h"
#include "../core/funcargscontext_p.h"
#include "../core/string.h"
#include "../core/support.h"
#include "../core/type.h"
#include "../arm/a64emithelper_p.h"
#include "../arm/a64formatter_p.h"
#include "../arm/a64instapi_p.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::EmitHelper - Emit Operations
// =================================
ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove(
const Operand_& dst_,
const Operand_& src_, TypeId typeId, const char* comment) {
Emitter* emitter = _emitter->as<Emitter>();
// Invalid or abstract TypeIds are not allowed.
ASMJIT_ASSERT(TypeUtils::isValid(typeId) && !TypeUtils::isAbstract(typeId));
emitter->setInlineComment(comment);
if (dst_.isReg() && src_.isMem()) {
Reg dst(dst_.as<Reg>());
Mem src(src_.as<Mem>());
switch (typeId) {
case TypeId::kInt8:
case TypeId::kUInt8:
return emitter->ldrb(dst.as<Gp>(), src);
case TypeId::kInt16:
case TypeId::kUInt16:
return emitter->ldrh(dst.as<Gp>(), src);
case TypeId::kInt32:
case TypeId::kUInt32:
return emitter->ldr(dst.as<Gp>().w(), src);
case TypeId::kInt64:
case TypeId::kUInt64:
return emitter->ldr(dst.as<Gp>().x(), src);
default: {
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId)) {
return emitter->ldr(dst.as<Vec>().s(), src);
}
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId)) {
return emitter->ldr(dst.as<Vec>().d(), src);
}
if (TypeUtils::isVec128(typeId)) {
return emitter->ldr(dst.as<Vec>().q(), src);
}
break;
}
}
}
if (dst_.isMem() && src_.isReg()) {
Mem dst(dst_.as<Mem>());
Reg src(src_.as<Reg>());
switch (typeId) {
case TypeId::kInt8:
case TypeId::kUInt8:
return emitter->strb(src.as<Gp>(), dst);
case TypeId::kInt16:
case TypeId::kUInt16:
return emitter->strh(src.as<Gp>(), dst);
case TypeId::kInt32:
case TypeId::kUInt32:
return emitter->str(src.as<Gp>().w(), dst);
case TypeId::kInt64:
case TypeId::kUInt64:
return emitter->str(src.as<Gp>().x(), dst);
default: {
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId)) {
return emitter->str(src.as<Vec>().s(), dst);
}
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId)) {
return emitter->str(src.as<Vec>().d(), dst);
}
if (TypeUtils::isVec128(typeId)) {
return emitter->str(src.as<Vec>().q(), dst);
}
break;
}
}
}
if (dst_.isReg() && src_.isReg()) {
Reg dst(dst_.as<Reg>());
Reg src(src_.as<Reg>());
switch (typeId) {
case TypeId::kInt8:
case TypeId::kUInt8:
case TypeId::kInt16:
case TypeId::kUInt16:
case TypeId::kInt32:
case TypeId::kUInt32:
case TypeId::kInt64:
case TypeId::kUInt64:
return emitter->mov(dst.as<Gp>().x(), src.as<Gp>().x());
default: {
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId)) {
return emitter->fmov(dst.as<Vec>().s(), src.as<Vec>().s());
}
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId)) {
return emitter->mov(dst.as<Vec>().b8(), src.as<Vec>().b8());
}
if (TypeUtils::isVec128(typeId)) {
return emitter->mov(dst.as<Vec>().b16(), src.as<Vec>().b16());
}
break;
}
}
}
emitter->setInlineComment(nullptr);
return DebugUtils::errored(kErrorInvalidState);
}
Error EmitHelper::emitRegSwap(
const Reg& a,
const Reg& b, const char* comment) {
DebugUtils::unused(a, b, comment);
return DebugUtils::errored(kErrorInvalidState);
}
// TODO: [ARM] EmitArgMove is unfinished.
Error EmitHelper::emitArgMove(
const Reg& dst_, TypeId dstTypeId,
const Operand_& src_, TypeId srcTypeId, const char* comment) {
// Deduce optional `dstTypeId`, which may be `TypeId::kVoid` in some cases.
if (dstTypeId == TypeId::kVoid) {
dstTypeId = RegUtils::typeIdOf(dst_.regType());
}
// Invalid or abstract TypeIds are not allowed.
ASMJIT_ASSERT(TypeUtils::isValid(dstTypeId) && !TypeUtils::isAbstract(dstTypeId));
ASMJIT_ASSERT(TypeUtils::isValid(srcTypeId) && !TypeUtils::isAbstract(srcTypeId));
Reg dst(dst_.as<Reg>());
Operand src(src_);
uint32_t dstSize = TypeUtils::sizeOf(dstTypeId);
uint32_t srcSize = TypeUtils::sizeOf(srcTypeId);
if (TypeUtils::isInt(dstTypeId)) {
if (TypeUtils::isInt(srcTypeId)) {
uint32_t x = uint32_t(dstSize == 8);
dst.setSignature(OperandSignature{x ? RegTraits<RegType::kGp64>::kSignature : RegTraits<RegType::kGp32>::kSignature});
_emitter->setInlineComment(comment);
if (src.isReg()) {
src.setSignature(dst.signature());
return _emitter->emit(Inst::kIdMov, dst, src);
}
else if (src.isMem()) {
InstId instId = Inst::kIdNone;
switch (srcTypeId) {
case TypeId::kInt8: instId = Inst::kIdLdrsb; break;
case TypeId::kUInt8: instId = Inst::kIdLdrb; break;
case TypeId::kInt16: instId = Inst::kIdLdrsh; break;
case TypeId::kUInt16: instId = Inst::kIdLdrh; break;
case TypeId::kInt32: instId = x ? Inst::kIdLdrsw : Inst::kIdLdr; break;
case TypeId::kUInt32: instId = Inst::kIdLdr; break;
case TypeId::kInt64: instId = Inst::kIdLdr; break;
case TypeId::kUInt64: instId = Inst::kIdLdr; break;
default:
return DebugUtils::errored(kErrorInvalidState);
}
return _emitter->emit(instId, dst, src);
}
}
}
if (TypeUtils::isFloat(dstTypeId) || TypeUtils::isVec(dstTypeId)) {
if (TypeUtils::isFloat(srcTypeId) || TypeUtils::isVec(srcTypeId)) {
switch (srcSize) {
case 2: dst.as<Vec>().setSignature(RegTraits<RegType::kVec16>::kSignature); break;
case 4: dst.as<Vec>().setSignature(RegTraits<RegType::kVec32>::kSignature); break;
case 8: dst.as<Vec>().setSignature(RegTraits<RegType::kVec64>::kSignature); break;
case 16: dst.as<Vec>().setSignature(RegTraits<RegType::kVec128>::kSignature); break;
default:
return DebugUtils::errored(kErrorInvalidState);
}
_emitter->setInlineComment(comment);
if (src.isReg()) {
InstId instId = srcSize <= 4 ? Inst::kIdFmov_v : Inst::kIdMov_v;
src.setSignature(dst.signature());
return _emitter->emit(instId, dst, src);
}
else if (src.isMem()) {
return _emitter->emit(Inst::kIdLdr_v, dst, src);
}
}
}
return DebugUtils::errored(kErrorInvalidState);
}
// a64::EmitHelper - Emit Prolog & Epilog
// ======================================
struct LoadStoreInstructions {
InstId singleInstId;
InstId pairInstId;
};
struct PrologEpilogInfo {
struct RegPair {
uint8_t ids[2];
uint16_t offset;
};
struct GroupData {
RegPair pairs[16];
uint32_t pairCount;
};
Support::Array<GroupData, 2> groups;
uint32_t sizeTotal;
Error init(const FuncFrame& frame) noexcept {
uint32_t offset = 0;
for (RegGroup group : Support::EnumValues<RegGroup, RegGroup::kGp, RegGroup::kVec>{}) {
GroupData& data = groups[group];
uint32_t n = 0;
uint32_t pairCount = 0;
RegPair* pairs = data.pairs;
uint32_t slotSize = frame.saveRestoreRegSize(group);
uint32_t savedRegs = frame.savedRegs(group);
if (group == RegGroup::kGp && frame.hasPreservedFP()) {
// Must be at the beginning of the push/pop sequence.
ASMJIT_ASSERT(pairCount == 0);
pairs[0].offset = uint16_t(offset);
pairs[0].ids[0] = Gp::kIdFp;
pairs[0].ids[1] = Gp::kIdLr;
offset += slotSize * 2;
pairCount++;
savedRegs &= ~Support::bitMask(Gp::kIdFp, Gp::kIdLr);
}
Support::BitWordIterator<uint32_t> it(savedRegs);
while (it.hasNext()) {
pairs[pairCount].ids[n] = uint8_t(it.next());
if (++n == 2) {
pairs[pairCount].offset = uint16_t(offset);
offset += slotSize * 2;
n = 0;
pairCount++;
}
}
if (n == 1) {
pairs[pairCount].ids[1] = uint8_t(Reg::kIdBad);
pairs[pairCount].offset = uint16_t(offset);
offset += slotSize * 2;
pairCount++;
}
data.pairCount = pairCount;
}
sizeTotal = offset;
return kErrorOk;
}
};
ASMJIT_FAVOR_SIZE Error EmitHelper::emitProlog(const FuncFrame& frame) {
Emitter* emitter = _emitter->as<Emitter>();
PrologEpilogInfo pei;
ASMJIT_PROPAGATE(pei.init(frame));
static const Support::Array<Reg, 2> groupRegs = {{ x0, d0 }};
static const Support::Array<LoadStoreInstructions, 2> groupInsts = {{
{ Inst::kIdStr , Inst::kIdStp },
{ Inst::kIdStr_v, Inst::kIdStp_v }
}};
// Emit: 'bti {jc}' (indirect branch protection).
if (frame.hasIndirectBranchProtection()) {
ASMJIT_PROPAGATE(emitter->bti(Predicate::BTI::kJC));
}
uint32_t adjustInitialOffset = pei.sizeTotal;
for (RegGroup group : Support::EnumValues<RegGroup, RegGroup::kGp, RegGroup::kVec>{}) {
const PrologEpilogInfo::GroupData& data = pei.groups[group];
uint32_t pairCount = data.pairCount;
Reg regs[2] = { groupRegs[group], groupRegs[group] };
Mem mem = ptr(sp);
const LoadStoreInstructions& insts = groupInsts[group];
for (uint32_t i = 0; i < pairCount; i++) {
const PrologEpilogInfo::RegPair& pair = data.pairs[i];
regs[0].setId(pair.ids[0]);
regs[1].setId(pair.ids[1]);
mem.setOffsetLo32(pair.offset);
if (pair.offset == 0 && adjustInitialOffset) {
mem.setOffset(-int(adjustInitialOffset));
mem.makePreIndex();
}
if (pair.ids[1] == Reg::kIdBad) {
ASMJIT_PROPAGATE(emitter->emit(insts.singleInstId, regs[0], mem));
}
else {
ASMJIT_PROPAGATE(emitter->emit(insts.pairInstId, regs[0], regs[1], mem));
}
mem.resetOffsetMode();
if (i == 0 && frame.hasPreservedFP()) {
ASMJIT_PROPAGATE(emitter->mov(x29, sp));
}
}
}
if (frame.hasStackAdjustment()) {
uint32_t adj = frame.stackAdjustment();
if (adj <= 0xFFFu) {
ASMJIT_PROPAGATE(emitter->sub(sp, sp, adj));
}
else if (adj <= 0xFFFFFFu) {
// TODO: [ARM] Prolog - we must touch the pages otherwise it's undefined.
ASMJIT_PROPAGATE(emitter->sub(sp, sp, adj & 0x000FFFu));
ASMJIT_PROPAGATE(emitter->sub(sp, sp, adj & 0xFFF000u));
}
else {
return DebugUtils::errored(kErrorInvalidState);
}
}
return kErrorOk;
}
// TODO: [ARM] Emit epilog.
ASMJIT_FAVOR_SIZE Error EmitHelper::emitEpilog(const FuncFrame& frame) {
Emitter* emitter = _emitter->as<Emitter>();
PrologEpilogInfo pei;
ASMJIT_PROPAGATE(pei.init(frame));
static const Support::Array<Reg, 2> groupRegs = {{ x0, d0 }};
static const Support::Array<LoadStoreInstructions, 2> groupInsts = {{
{ Inst::kIdLdr , Inst::kIdLdp },
{ Inst::kIdLdr_v, Inst::kIdLdp_v }
}};
uint32_t adjustInitialOffset = pei.sizeTotal;
if (frame.hasStackAdjustment()) {
uint32_t adj = frame.stackAdjustment();
if (adj <= 0xFFFu) {
ASMJIT_PROPAGATE(emitter->add(sp, sp, adj));
}
else if (adj <= 0xFFFFFFu) {
ASMJIT_PROPAGATE(emitter->add(sp, sp, adj & 0x000FFFu));
ASMJIT_PROPAGATE(emitter->add(sp, sp, adj & 0xFFF000u));
}
else {
return DebugUtils::errored(kErrorInvalidState);
}
}
for (int g = 1; g >= 0; g--) {
RegGroup group = RegGroup(g);
const PrologEpilogInfo::GroupData& data = pei.groups[group];
uint32_t pairCount = data.pairCount;
Reg regs[2] = { groupRegs[group], groupRegs[group] };
Mem mem = ptr(sp);
const LoadStoreInstructions& insts = groupInsts[group];
for (int i = int(pairCount) - 1; i >= 0; i--) {
const PrologEpilogInfo::RegPair& pair = data.pairs[i];
regs[0].setId(pair.ids[0]);
regs[1].setId(pair.ids[1]);
mem.setOffsetLo32(pair.offset);
if (pair.offset == 0 && adjustInitialOffset) {
mem.setOffset(int(adjustInitialOffset));
mem.makePostIndex();
}
if (pair.ids[1] == Reg::kIdBad) {
ASMJIT_PROPAGATE(emitter->emit(insts.singleInstId, regs[0], mem));
}
else {
ASMJIT_PROPAGATE(emitter->emit(insts.pairInstId, regs[0], regs[1], mem));
}
mem.resetOffsetMode();
}
}
ASMJIT_PROPAGATE(emitter->ret(x30));
return kErrorOk;
}
static Error ASMJIT_CDECL Emitter_emitProlog(BaseEmitter* emitter, const FuncFrame& frame) {
EmitHelper emitHelper(emitter);
return emitHelper.emitProlog(frame);
}
static Error ASMJIT_CDECL Emitter_emitEpilog(BaseEmitter* emitter, const FuncFrame& frame) {
EmitHelper emitHelper(emitter);
return emitHelper.emitEpilog(frame);
}
static Error ASMJIT_CDECL Emitter_emitArgsAssignment(BaseEmitter* emitter, const FuncFrame& frame, const FuncArgsAssignment& args) {
EmitHelper emitHelper(emitter);
return emitHelper.emitArgsAssignment(frame, args);
}
void initEmitterFuncs(BaseEmitter* emitter) {
emitter->_funcs.emitProlog = Emitter_emitProlog;
emitter->_funcs.emitEpilog = Emitter_emitEpilog;
emitter->_funcs.emitArgsAssignment = Emitter_emitArgsAssignment;
#ifndef ASMJIT_NO_LOGGING
emitter->_funcs.formatInstruction = FormatterInternal::formatInstruction;
#endif
#ifndef ASMJIT_NO_VALIDATION
emitter->_funcs.validate = InstInternal::validate;
#endif
}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64

View File

@@ -1,55 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64EMITHELPER_P_H_INCLUDED
#define ASMJIT_ARM_A64EMITHELPER_P_H_INCLUDED
#include "../core/api-config.h"
#include "../core/emithelper_p.h"
#include "../core/func.h"
#include "../arm/a64emitter.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \cond INTERNAL
//! \addtogroup asmjit_a64
//! \{
class EmitHelper : public BaseEmitHelper {
public:
ASMJIT_INLINE_NODEBUG explicit EmitHelper(BaseEmitter* emitter = nullptr) noexcept
: BaseEmitHelper(emitter) {}
ASMJIT_INLINE_NODEBUG virtual ~EmitHelper() noexcept = default;
Error emitRegMove(
const Operand_& dst_,
const Operand_& src_, TypeId typeId, const char* comment = nullptr) override;
Error emitRegSwap(
const Reg& a,
const Reg& b, const char* comment = nullptr) override;
Error emitArgMove(
const Reg& dst_, TypeId dstTypeId,
const Operand_& src_, TypeId srcTypeId, const char* comment = nullptr) override;
Error emitProlog(const FuncFrame& frame);
Error emitEpilog(const FuncFrame& frame);
};
void initEmitterFuncs(BaseEmitter* emitter);
[[maybe_unused]]
static inline void updateEmitterFuncs(BaseEmitter* emitter) noexcept { DebugUtils::unused(emitter); }
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_A64EMITHELPER_P_H_INCLUDED

File diff suppressed because it is too large Load Diff

View File

@@ -1,66 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64) && !defined(ASMJIT_NO_LOGGING)
#include "../core/misc_p.h"
#include "../core/support.h"
#include "../arm/a64formatter_p.h"
#include "../arm/a64instapi_p.h"
#include "../arm/a64instdb_p.h"
#include "../arm/a64operand.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/compiler.h"
#endif
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::FormatterInternal - Format Instruction
// ===========================================
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatInstruction(
String& sb,
FormatFlags formatFlags,
const BaseEmitter* emitter,
Arch arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept {
// Format instruction options and instruction mnemonic.
InstId instId = inst.realId();
if (instId != Inst::kIdNone && instId < Inst::_kIdCount) {
InstStringifyOptions stringifyOptions =
Support::test(formatFlags, FormatFlags::kShowAliases)
? InstStringifyOptions::kAliases
: InstStringifyOptions::kNone;
ASMJIT_PROPAGATE(InstInternal::instIdToString(instId, stringifyOptions, sb));
}
else {
ASMJIT_PROPAGATE(sb.appendFormat("[InstId=#%u]", unsigned(instId)));
}
CondCode cc = inst.armCondCode();
if (cc != CondCode::kAL) {
ASMJIT_PROPAGATE(sb.append('.'));
ASMJIT_PROPAGATE(formatCondCode(sb, cc));
}
for (uint32_t i = 0; i < opCount; i++) {
const Operand_& op = operands[i];
if (op.isNone()) {
break;
}
ASMJIT_PROPAGATE(sb.append(i == 0 ? " " : ", "));
ASMJIT_PROPAGATE(formatOperand(sb, formatFlags, emitter, arch, op));
}
return kErrorOk;
}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64 && !ASMJIT_NO_LOGGING

View File

@@ -1,42 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64FORMATTER_P_H_INCLUDED
#define ASMJIT_ARM_A64FORMATTER_P_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_LOGGING
#include "../core/formatter.h"
#include "../core/string.h"
#include "../arm/armformatter_p.h"
#include "../arm/a64globals.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \cond INTERNAL
//! \addtogroup asmjit_a64
//! \{
namespace FormatterInternal {
using namespace arm::FormatterInternal;
Error ASMJIT_CDECL formatInstruction(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept;
} // {FormatterInternal}
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_LOGGING
#endif // ASMJIT_ARM_A64FORMATTER_P_H_INCLUDED

View File

@@ -1,214 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64)
#include "../arm/a64func_p.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
namespace FuncInternal {
static inline bool shouldTreatAsCDecl(CallConvId ccId) noexcept {
return ccId == CallConvId::kCDecl ||
ccId == CallConvId::kStdCall ||
ccId == CallConvId::kFastCall ||
ccId == CallConvId::kVectorCall ||
ccId == CallConvId::kThisCall ||
ccId == CallConvId::kRegParm1 ||
ccId == CallConvId::kRegParm2 ||
ccId == CallConvId::kRegParm3;
}
static RegType regTypeFromFpOrVecTypeId(TypeId typeId) noexcept {
if (typeId == TypeId::kFloat32) {
return RegType::kVec32;
}
else if (typeId == TypeId::kFloat64) {
return RegType::kVec64;
}
else if (TypeUtils::isVec32(typeId)) {
return RegType::kVec32;
}
else if (TypeUtils::isVec64(typeId)) {
return RegType::kVec64;
}
else if (TypeUtils::isVec128(typeId)) {
return RegType::kVec128;
}
else {
return RegType::kNone;
}
}
ASMJIT_FAVOR_SIZE Error initCallConv(CallConv& cc, CallConvId ccId, const Environment& environment) noexcept {
cc.setArch(environment.arch());
cc.setStrategy(environment.isDarwin() ? CallConvStrategy::kAArch64Apple : CallConvStrategy::kDefault);
cc.setSaveRestoreRegSize(RegGroup::kGp, 8);
cc.setSaveRestoreRegSize(RegGroup::kVec, 8);
cc.setSaveRestoreAlignment(RegGroup::kGp, 16);
cc.setSaveRestoreAlignment(RegGroup::kVec, 16);
cc.setSaveRestoreAlignment(RegGroup::kMask, 1);
cc.setSaveRestoreAlignment(RegGroup::kExtraVirt3, 1);
cc.setPassedOrder(RegGroup::kGp, 0, 1, 2, 3, 4, 5, 6, 7);
cc.setPassedOrder(RegGroup::kVec, 0, 1, 2, 3, 4, 5, 6, 7);
cc.setNaturalStackAlignment(16);
if (shouldTreatAsCDecl(ccId)) {
// ARM doesn't have that many calling conventions as we can find in X86 world, treat most conventions as __cdecl.
cc.setId(CallConvId::kCDecl);
cc.setPreservedRegs(RegGroup::kGp, Support::bitMask(Gp::kIdOs, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30));
cc.setPreservedRegs(RegGroup::kVec, Support::bitMask(8, 9, 10, 11, 12, 13, 14, 15));
}
else {
cc.setId(ccId);
cc.setSaveRestoreRegSize(RegGroup::kVec, 16);
cc.setPreservedRegs(RegGroup::kGp, Support::bitMask(4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30));
cc.setPreservedRegs(RegGroup::kVec, Support::bitMask(4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31));
}
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& signature) noexcept {
DebugUtils::unused(signature);
const CallConv& cc = func.callConv();
uint32_t stackOffset = 0;
uint32_t i;
uint32_t argCount = func.argCount();
// Minimum stack size of a single argument passed via stack. The standard AArch64 calling convention
// specifies 8 bytes, so each function argument would occupy at least 8 bytes even if it needs less.
// However, Apple has decided to not follow this rule and function argument can occupy less, for
// example two consecutive 32-bit arguments would occupy 8 bytes total, instead of 16 as specified
// by ARM.
uint32_t minStackArgSize = cc.strategy() == CallConvStrategy::kAArch64Apple ? 4u : 8u;
if (func.hasRet()) {
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
TypeId typeId = func._rets[valueIndex].typeId();
// Terminate at the first void type (end of the pack).
if (typeId == TypeId::kVoid)
break;
switch (typeId) {
case TypeId::kInt8:
case TypeId::kInt16:
case TypeId::kInt32: {
func._rets[valueIndex].initReg(RegType::kGp32, valueIndex, TypeId::kInt32);
break;
}
case TypeId::kUInt8:
case TypeId::kUInt16:
case TypeId::kUInt32: {
func._rets[valueIndex].initReg(RegType::kGp32, valueIndex, TypeId::kUInt32);
break;
}
case TypeId::kInt64:
case TypeId::kUInt64: {
func._rets[valueIndex].initReg(RegType::kGp64, valueIndex, typeId);
break;
}
default: {
RegType regType = regTypeFromFpOrVecTypeId(typeId);
if (regType == RegType::kNone) {
return DebugUtils::errored(kErrorInvalidRegType);
}
func._rets[valueIndex].initReg(regType, valueIndex, typeId);
break;
}
}
}
}
switch (cc.strategy()) {
case CallConvStrategy::kDefault:
case CallConvStrategy::kAArch64Apple: {
uint32_t gpzPos = 0;
uint32_t vecPos = 0;
for (i = 0; i < argCount; i++) {
FuncValue& arg = func._args[i][0];
TypeId typeId = arg.typeId();
if (TypeUtils::isInt(typeId)) {
uint32_t regId = Reg::kIdBad;
if (gpzPos < CallConv::kMaxRegArgsPerGroup) {
regId = cc._passedOrder[RegGroup::kGp].id[gpzPos];
}
if (regId != Reg::kIdBad) {
RegType regType = typeId <= TypeId::kUInt32 ? RegType::kGp32 : RegType::kGp64;
arg.assignRegData(regType, regId);
func.addUsedRegs(RegGroup::kGp, Support::bitMask(regId));
gpzPos++;
}
else {
uint32_t size = Support::max<uint32_t>(TypeUtils::sizeOf(typeId), minStackArgSize);
if (size >= 8) {
stackOffset = Support::alignUp(stackOffset, 8);
}
arg.assignStackOffset(int32_t(stackOffset));
stackOffset += size;
}
continue;
}
if (TypeUtils::isFloat(typeId) || TypeUtils::isVec(typeId)) {
uint32_t regId = Reg::kIdBad;
if (vecPos < CallConv::kMaxRegArgsPerGroup) {
regId = cc._passedOrder[RegGroup::kVec].id[vecPos];
}
if (regId != Reg::kIdBad) {
RegType regType = regTypeFromFpOrVecTypeId(typeId);
if (regType == RegType::kNone) {
return DebugUtils::errored(kErrorInvalidRegType);
}
arg.initTypeId(typeId);
arg.assignRegData(regType, regId);
func.addUsedRegs(RegGroup::kVec, Support::bitMask(regId));
vecPos++;
}
else {
uint32_t size = Support::max<uint32_t>(TypeUtils::sizeOf(typeId), minStackArgSize);
if (size >= 8) {
stackOffset = Support::alignUp(stackOffset, 8);
}
arg.assignStackOffset(int32_t(stackOffset));
stackOffset += size;
}
continue;
}
}
break;
}
default:
return DebugUtils::errored(kErrorInvalidState);
}
func._argStackSize = Support::alignUp(stackOffset, 8u);
return kErrorOk;
}
} // {FuncInternal}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64

View File

@@ -1,33 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64FUNC_P_H_INCLUDED
#define ASMJIT_ARM_A64FUNC_P_H_INCLUDED
#include "../core/func.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \cond INTERNAL
//! \addtogroup asmjit_a64
//! \{
//! AArch64-specific function API (calling conventions and other utilities).
namespace FuncInternal {
//! Initialize `CallConv` structure (AArch64 specific).
Error initCallConv(CallConv& cc, CallConvId ccId, const Environment& environment) noexcept;
//! Initialize `FuncDetail` (AArch64 specific).
Error initFuncDetail(FuncDetail& func, const FuncSignature& signature) noexcept;
} // {FuncInternal}
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_A64FUNC_P_H_INCLUDED

File diff suppressed because it is too large Load Diff

View File

@@ -1,252 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64)
#include "../core/cpuinfo.h"
#include "../core/misc_p.h"
#include "../core/support_p.h"
#include "../arm/a64instapi_p.h"
#include "../arm/a64instdb_p.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
namespace InstInternal {
// a64::InstInternal - Text
// ========================
#ifndef ASMJIT_NO_TEXT
Error instIdToString(InstId instId, InstStringifyOptions options, String& output) noexcept {
uint32_t realId = instId & uint32_t(InstIdParts::kRealId);
if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId))) {
return DebugUtils::errored(kErrorInvalidInstruction);
}
return InstNameUtils::decode(InstDB::_instNameIndexTable[realId], options, InstDB::_instNameStringTable, output);
}
InstId stringToInstId(const char* s, size_t len) noexcept {
if (ASMJIT_UNLIKELY(!s)) {
return BaseInst::kIdNone;
}
if (len == SIZE_MAX) {
len = strlen(s);
}
if (len == 0u || len > InstDB::instNameIndex.maxNameLength) {
return BaseInst::kIdNone;
}
return InstNameUtils::findInstruction(s, len, InstDB::_instNameIndexTable, InstDB::_instNameStringTable, InstDB::instNameIndex);
}
#endif // !ASMJIT_NO_TEXT
// a64::InstInternal - Validate
// ============================
#ifndef ASMJIT_NO_VALIDATION
ASMJIT_FAVOR_SIZE Error validate(const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) noexcept {
// TODO:
DebugUtils::unused(inst, operands, opCount, validationFlags);
return kErrorOk;
}
#endif // !ASMJIT_NO_VALIDATION
// a64::InstInternal - QueryRWInfo
// ===============================
#ifndef ASMJIT_NO_INTROSPECTION
struct InstRWInfoData {
uint8_t rwx[Globals::kMaxOpCount];
};
static const InstRWInfoData instRWInfoData[] = {
#define R uint8_t(OpRWFlags::kRead)
#define W uint8_t(OpRWFlags::kWrite)
#define X uint8_t(OpRWFlags::kRW)
{{ R, R, R, R, R, R }}, // kRWI_R
{{ R, W, R, R, R, R }}, // kRWI_RW
{{ R, X, R, R, R, R }}, // kRWI_RX
{{ R, R, W, R, R, R }}, // kRWI_RRW
{{ R, W, X, R, R, R }}, // kRWI_RWX
{{ W, R, R, R, R, R }}, // kRWI_W
{{ W, R, W, R, R, R }}, // kRWI_WRW
{{ W, R, X, R, R, R }}, // kRWI_WRX
{{ W, R, R, W, R, R }}, // kRWI_WRRW
{{ W, R, R, X, R, R }}, // kRWI_WRRX
{{ W, W, R, R, R, R }}, // kRWI_WW
{{ X, R, R, R, R, R }}, // kRWI_X
{{ X, R, X, R, R, R }}, // kRWI_XRX
{{ X, X, R, R, X, R }}, // kRWI_XXRRX
{{ W, R, R, R, R, R }}, // kRWI_LDn
{{ R, W, R, R, R, R }}, // kRWI_STn
{{ R, R, R, R, R, R }} // kRWI_TODO
#undef R
#undef W
#undef X
};
static const uint8_t elementTypeSize[8] = { 0, 1, 2, 4, 8, 4, 4, 0 };
Error queryRWInfo(const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept {
// Get the instruction data.
uint32_t realId = inst.id() & uint32_t(InstIdParts::kRealId);
if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId))) {
return DebugUtils::errored(kErrorInvalidInstruction);
}
out->_instFlags = InstRWFlags::kNone;
out->_opCount = uint8_t(opCount);
out->_rmFeature = 0;
out->_extraReg.reset();
out->_readFlags = CpuRWFlags::kNone; // TODO: [ARM] Read PSTATUS.
out->_writeFlags = CpuRWFlags::kNone; // TODO: [ARM] Write PSTATUS
const InstDB::InstInfo& instInfo = InstDB::_instInfoTable[realId];
const InstRWInfoData& rwInfo = instRWInfoData[instInfo.rwInfoIndex()];
if (instInfo.hasFlag(InstDB::kInstFlagConsecutive) && opCount > 2) {
for (uint32_t i = 0; i < opCount; i++) {
OpRWInfo& op = out->_operands[i];
const Operand_& srcOp = operands[i];
if (!srcOp.isRegOrMem()) {
op.reset();
continue;
}
OpRWFlags rwFlags = i < opCount - 1 ? (OpRWFlags)rwInfo.rwx[0] : (OpRWFlags)rwInfo.rwx[1];
op._opFlags = rwFlags & ~(OpRWFlags::kZExt);
op._physId = Reg::kIdBad;
op._rmSize = 0;
op._resetReserved();
uint64_t rByteMask = op.isRead() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
uint64_t wByteMask = op.isWrite() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
op._readByteMask = rByteMask;
op._writeByteMask = wByteMask;
op._extendByteMask = 0;
op._consecutiveLeadCount = 0;
if (srcOp.isReg()) {
if (i == 0) {
op._consecutiveLeadCount = uint8_t(opCount - 1);
}
else {
op.addOpFlags(OpRWFlags::kConsecutive);
}
}
else {
const Mem& memOp = srcOp.as<Mem>();
if (memOp.hasBase()) {
op.addOpFlags(OpRWFlags::kMemBaseRead);
if ((memOp.hasIndex() || memOp.hasOffset()) && memOp.isPreOrPost()) {
op.addOpFlags(OpRWFlags::kMemBaseWrite);
}
}
if (memOp.hasIndex()) {
op.addOpFlags(OpRWFlags::kMemIndexRead);
}
}
}
}
else {
for (uint32_t i = 0; i < opCount; i++) {
OpRWInfo& op = out->_operands[i];
const Operand_& srcOp = operands[i];
if (!srcOp.isRegOrMem()) {
op.reset();
continue;
}
OpRWFlags rwFlags = (OpRWFlags)rwInfo.rwx[i];
op._opFlags = rwFlags & ~(OpRWFlags::kZExt);
op._physId = Reg::kIdBad;
op._rmSize = 0;
op._resetReserved();
uint64_t rByteMask = op.isRead() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
uint64_t wByteMask = op.isWrite() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
op._readByteMask = rByteMask;
op._writeByteMask = wByteMask;
op._extendByteMask = 0;
op._consecutiveLeadCount = 0;
if (srcOp.isReg()) {
if (srcOp.as<Vec>().hasElementIndex()) {
// Only part of the vector is accessed if element index [] is used.
VecElementType elementType = srcOp.as<Vec>().elementType();
uint32_t elementIndex = srcOp.as<Vec>().elementIndex();
uint32_t elementSize = elementTypeSize[size_t(elementType)];
uint64_t accessMask = uint64_t(Support::lsbMask<uint32_t>(elementSize)) << (elementIndex * elementSize);
op._readByteMask &= accessMask;
op._writeByteMask &= accessMask;
}
// TODO: [ARM] RW info is not finished.
}
else {
const Mem& memOp = srcOp.as<Mem>();
if (memOp.hasBase()) {
op.addOpFlags(OpRWFlags::kMemBaseRead);
if ((memOp.hasIndex() || memOp.hasOffset()) && memOp.isPreOrPost()) {
op.addOpFlags(OpRWFlags::kMemBaseWrite);
}
}
if (memOp.hasIndex()) {
op.addOpFlags(OpRWFlags::kMemIndexRead);
}
}
}
}
return kErrorOk;
}
#endif // !ASMJIT_NO_INTROSPECTION
// a64::InstInternal - QueryFeatures
// =================================
#ifndef ASMJIT_NO_INTROSPECTION
Error queryFeatures(const BaseInst& inst, const Operand_* operands, size_t opCount, CpuFeatures* out) noexcept {
// TODO: [ARM] QueryFeatures not implemented yet.
DebugUtils::unused(inst, operands, opCount, out);
return kErrorOk;
}
#endif // !ASMJIT_NO_INTROSPECTION
} // {InstInternal}
// a64::InstInternal - Unit
// ========================
#if defined(ASMJIT_TEST)
UNIT(arm_inst_api_text) {
// TODO:
}
#endif
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64

View File

@@ -1,41 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64INSTAPI_P_H_INCLUDED
#define ASMJIT_ARM_A64INSTAPI_P_H_INCLUDED
#include "../core/inst.h"
#include "../core/operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \cond INTERNAL
//! \addtogroup asmjit_a64
//! \{
namespace InstInternal {
#ifndef ASMJIT_NO_TEXT
Error ASMJIT_CDECL instIdToString(InstId instId, InstStringifyOptions options, String& output) noexcept;
InstId ASMJIT_CDECL stringToInstId(const char* s, size_t len) noexcept;
#endif // !ASMJIT_NO_TEXT
#ifndef ASMJIT_NO_VALIDATION
Error ASMJIT_CDECL validate(const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) noexcept;
#endif // !ASMJIT_NO_VALIDATION
#ifndef ASMJIT_NO_INTROSPECTION
Error ASMJIT_CDECL queryRWInfo(const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept;
Error ASMJIT_CDECL queryFeatures(const BaseInst& inst, const Operand_* operands, size_t opCount, CpuFeatures* out) noexcept;
#endif // !ASMJIT_NO_INTROSPECTION
} // {InstInternal}
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_A64INSTAPI_P_H_INCLUDED

File diff suppressed because it is too large Load Diff

View File

@@ -1,77 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64INSTDB_H_INCLUDED
#define ASMJIT_ARM_A64INSTDB_H_INCLUDED
#include "../arm/a64globals.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \addtogroup asmjit_a64
//! \{
//! Instruction database (AArch64).
namespace InstDB {
//! Instruction flags.
enum InstFlags : uint32_t {
//! The instruction provides conditional execution.
kInstFlagCond = 0x00000001u,
//! SIMD instruction that processes elements in pairs.
kInstFlagPair = 0x00000002u,
//! SIMD instruction that does widening (Long).
kInstFlagLong = 0x00000004u,
//! SIMD instruction that does narrowing (Narrow).
kInstFlagNarrow = 0x00000008u,
//! SIMD element access of half-words can only be used with v0..15.
kInstFlagVH0_15 = 0x00000010u,
//! Instruction uses consecutive registers if the number of operands is greater than 2.
kInstFlagConsecutive = 0x00000080u
};
//! Instruction information (AArch64).
struct InstInfo {
//! Instruction encoding type.
uint32_t _encoding : 8;
//! Index to data specific to each encoding type.
uint32_t _encodingDataIndex : 8;
uint32_t _reserved : 16;
uint16_t _rwInfoIndex;
uint16_t _flags;
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t rwInfoIndex() const noexcept { return _rwInfoIndex; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t flags() const noexcept { return _flags; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(uint32_t flag) const { return (_flags & flag) != 0; }
//! \}
};
ASMJIT_VARAPI const InstInfo _instInfoTable[];
[[nodiscard]]
static inline const InstInfo& infoById(InstId instId) noexcept {
instId &= uint32_t(InstIdParts::kRealId);
ASMJIT_ASSERT(Inst::isDefinedId(instId));
return _instInfoTable[instId];
}
} // {InstDB}
//! \}
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_A64INSTDB_H_INCLUDED

View File

@@ -1,885 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64INSTDB_H_P_INCLUDED
#define ASMJIT_ARM_A64INSTDB_H_P_INCLUDED
#include "../core/codeholder.h"
#include "../core/instdb_p.h"
#include "../arm/a64instdb.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \cond INTERNAL
//! \addtogroup asmjit_a64
//! \{
namespace InstDB {
// a64::InstDB - Constants Used by Instructions
// ============================================
// GP register types supported by base instructions.
static constexpr uint32_t kW = 0x1;
static constexpr uint32_t kX = 0x2;
static constexpr uint32_t kWX = 0x3;
// GP high register IDs supported by the instruction.
static constexpr uint32_t kZR = Gp::kIdZr;
static constexpr uint32_t kSP = Gp::kIdSp;
// a64::InstDB - RWInfo
// ====================
enum RWInfoType : uint32_t {
kRWI_R,
kRWI_RW,
kRWI_RX,
kRWI_RRW,
kRWI_RWX,
kRWI_W,
kRWI_WRW,
kRWI_WRX,
kRWI_WRRW,
kRWI_WRRX,
kRWI_WW,
kRWI_X,
kRWI_XRX,
kRWI_XXRRX,
kRWI_LDn,
kRWI_STn,
kRWI_SpecialStart = kRWI_LDn
};
// a64::InstDB - ElementType
// =========================
enum InstElementType : uint8_t {
kET_None = uint8_t(VecElementType::kNone),
kET_B = uint8_t(VecElementType::kB),
kET_H = uint8_t(VecElementType::kH),
kET_S = uint8_t(VecElementType::kS),
kET_D = uint8_t(VecElementType::kD),
kET_2H = uint8_t(VecElementType::kH2),
kET_4B = uint8_t(VecElementType::kB4)
};
// a64::InstDB - GpType
// ====================
enum GpType : uint8_t {
kGp_W,
kGp_X,
kGp_X_SP
};
// a64::InstDB - OPSig
// ===================
enum kOpSignature : uint32_t {
kOp_GpW = RegTraits<RegType::kGp32>::kSignature,
kOp_GpX = RegTraits<RegType::kGp64>::kSignature,
kOp_B = RegTraits<RegType::kVec8>::kSignature,
kOp_H = RegTraits<RegType::kVec16>::kSignature,
kOp_S = RegTraits<RegType::kVec32>::kSignature,
kOp_D = RegTraits<RegType::kVec64>::kSignature,
kOp_Q = RegTraits<RegType::kVec128>::kSignature,
kOp_V8B = kOp_D | Vec::kSignatureElementB,
kOp_V4H = kOp_D | Vec::kSignatureElementH,
kOp_V2S = kOp_D | Vec::kSignatureElementS,
kOp_V16B = kOp_Q | Vec::kSignatureElementB,
kOp_V8H = kOp_Q | Vec::kSignatureElementH,
kOp_V4S = kOp_Q | Vec::kSignatureElementS,
kOp_V2D = kOp_Q | Vec::kSignatureElementD
};
// a64::InstDB - HFConv
// ====================
enum kHFConv : uint32_t {
//! FP16 version of the instruction is not available.
kHF_N,
//! Doesn't do any change to the opcode.
kHF_0,
kHF_A,
kHF_B,
kHF_C,
kHF_D,
kHF_Count
};
// a64::InstDB - VOType
// ====================
//! Vector operand type combinations used by FP&SIMD instructions.
enum VOType : uint32_t {
kVO_V_B,
kVO_V_BH,
kVO_V_BH_4S,
kVO_V_BHS,
kVO_V_BHS_D2,
kVO_V_HS,
kVO_V_S,
kVO_V_B8H4,
kVO_V_B8H4S2,
kVO_V_B8D1,
kVO_V_H4S2,
kVO_V_B16,
kVO_V_B16H8,
kVO_V_B16H8S4,
kVO_V_B16D2,
kVO_V_H8S4,
kVO_V_S4,
kVO_V_D2,
kVO_SV_BHS,
kVO_SV_B8H4S2,
kVO_SV_HS,
kVO_V_Any,
kVO_SV_Any,
kVO_Count
};
// a64::InstDB - EncodingId
// ========================
// ${EncodingId:Begin}
// ------------------- Automatically generated, do not edit -------------------
enum EncodingId : uint32_t {
kEncodingNone = 0,
kEncodingBaseAddSub,
kEncodingBaseAdr,
kEncodingBaseAtDcIcTlbi,
kEncodingBaseAtomicCasp,
kEncodingBaseAtomicOp,
kEncodingBaseAtomicSt,
kEncodingBaseBfc,
kEncodingBaseBfi,
kEncodingBaseBfm,
kEncodingBaseBfx,
kEncodingBaseBranchCmp,
kEncodingBaseBranchReg,
kEncodingBaseBranchRel,
kEncodingBaseBranchTst,
kEncodingBaseCCmp,
kEncodingBaseCInc,
kEncodingBaseCSel,
kEncodingBaseCSet,
kEncodingBaseCmpCmn,
kEncodingBaseExtend,
kEncodingBaseExtract,
kEncodingBaseLdSt,
kEncodingBaseLdpStp,
kEncodingBaseLdxp,
kEncodingBaseLogical,
kEncodingBaseMinMax,
kEncodingBaseMov,
kEncodingBaseMovKNZ,
kEncodingBaseMrs,
kEncodingBaseMsr,
kEncodingBaseMvnNeg,
kEncodingBaseOp,
kEncodingBaseOpImm,
kEncodingBaseOpX16,
kEncodingBasePrfm,
kEncodingBaseR,
kEncodingBaseRM_NoImm,
kEncodingBaseRM_SImm10,
kEncodingBaseRM_SImm9,
kEncodingBaseRR,
kEncodingBaseRRII,
kEncodingBaseRRR,
kEncodingBaseRRRR,
kEncodingBaseRev,
kEncodingBaseShift,
kEncodingBaseStx,
kEncodingBaseStxp,
kEncodingBaseSys,
kEncodingBaseTst,
kEncodingFSimdPair,
kEncodingFSimdSV,
kEncodingFSimdVV,
kEncodingFSimdVVV,
kEncodingFSimdVVVV,
kEncodingFSimdVVVe,
kEncodingISimdPair,
kEncodingISimdSV,
kEncodingISimdVV,
kEncodingISimdVVV,
kEncodingISimdVVVI,
kEncodingISimdVVVV,
kEncodingISimdVVVVx,
kEncodingISimdVVVe,
kEncodingISimdVVVx,
kEncodingISimdVVx,
kEncodingISimdWWV,
kEncodingSimdBicOrr,
kEncodingSimdCmp,
kEncodingSimdDot,
kEncodingSimdDup,
kEncodingSimdFcadd,
kEncodingSimdFccmpFccmpe,
kEncodingSimdFcm,
kEncodingSimdFcmla,
kEncodingSimdFcmpFcmpe,
kEncodingSimdFcsel,
kEncodingSimdFcvt,
kEncodingSimdFcvtLN,
kEncodingSimdFcvtSV,
kEncodingSimdFmlal,
kEncodingSimdFmov,
kEncodingSimdIns,
kEncodingSimdLdNStN,
kEncodingSimdLdSt,
kEncodingSimdLdpStp,
kEncodingSimdLdurStur,
kEncodingSimdMov,
kEncodingSimdMoviMvni,
kEncodingSimdShift,
kEncodingSimdShiftES,
kEncodingSimdSm3tt,
kEncodingSimdSmovUmov,
kEncodingSimdSxtlUxtl,
kEncodingSimdTblTbx
};
// ----------------------------------------------------------------------------
// ${EncodingId:End}
// a64::InstDB::EncodingData
// =========================
namespace EncodingData {
#define M_OPCODE(field, bits) \
uint32_t _##field : bits; \
ASMJIT_INLINE_CONSTEXPR uint32_t field() const noexcept { return uint32_t(_##field) << (32 - bits); }
struct BaseOp {
uint32_t opcode;
};
struct BaseOpX16 {
uint32_t opcode;
};
struct BaseOpImm {
uint32_t opcode;
uint16_t immBits;
uint16_t immOffset;
};
struct BaseR {
uint32_t opcode;
uint32_t rType : 8;
uint32_t rHiId : 8;
uint32_t rShift : 8;
};
struct BaseRR {
uint32_t opcode;
uint32_t aType : 2;
uint32_t aHiId : 6;
uint32_t aShift : 5;
uint32_t bType : 2;
uint32_t bHiId : 6;
uint32_t bShift : 5;
uint32_t uniform : 1;
};
struct BaseRRR {
M_OPCODE(opcode, 22)
uint32_t aType : 2;
uint32_t aHiId : 6;
uint32_t bType : 2;
uint32_t bHiId : 6;
uint32_t cType : 2;
uint32_t cHiId : 6;
uint32_t uniform : 1;
};
struct BaseRRRR {
M_OPCODE(opcode, 22)
uint32_t aType : 2;
uint32_t aHiId : 6;
uint32_t bType : 2;
uint32_t bHiId : 6;
uint32_t cType : 2;
uint32_t cHiId : 6;
uint32_t dType : 2;
uint32_t dHiId : 6;
uint32_t uniform : 1;
};
struct BaseRRII {
M_OPCODE(opcode, 22)
uint32_t aType : 2;
uint32_t aHiId : 6;
uint32_t bType : 2;
uint32_t bHiId : 6;
uint32_t aImmSize : 6;
uint32_t aImmDiscardLsb : 5;
uint32_t aImmOffset : 5;
uint32_t bImmSize : 6;
uint32_t bImmDiscardLsb : 5;
uint32_t bImmOffset : 5;
};
struct BaseAtDcIcTlbi {
uint32_t immVerifyMask : 14;
uint32_t immVerifyData : 14;
uint32_t mandatoryReg : 1;
};
struct BaseAdcSbc {
uint32_t opcode;
};
struct BaseMinMax {
uint32_t regOp;
uint32_t immOp;
};
struct BaseAddSub {
uint32_t shiftedOp : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|Rd|
uint32_t extendedOp : 10; // sf|.......|..|.|Rm|Opt|Imm3|Rn|Rd|
uint32_t immediateOp: 10; // sf|.......|Sh| Imm:12 |Rn|Rd|
};
struct BaseAdr {
M_OPCODE(opcode, 22)
OffsetType offsetType : 8;
};
struct BaseBfm {
uint32_t opcode; // sf|........|N|ImmR:6|ImmS:6|Rn|Rd|
};
struct BaseCmpCmn {
uint32_t shiftedOp : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|11111|
uint32_t extendedOp : 10; // sf|.......|..|.|Rm|Opt|Imm3|Rn|11111|
uint32_t immediateOp: 10; // sf|.......|Sh| Imm:12 |Rn|11111|
};
struct BaseExtend {
M_OPCODE(opcode, 22) // sf|........|N|......|......|Rn|Rd|
uint32_t rType : 2;
uint32_t u : 1;
};
struct BaseLogical {
uint32_t shiftedOp : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|Rd|
uint32_t immediateOp: 10; // sf|........|N|ImmR:6|ImmS:6|Rn|Rd|
uint32_t negateImm : 1 ; // True if this is an operation that must negate IMM.
};
struct BaseMvnNeg {
uint32_t opcode;
};
struct BaseShift {
M_OPCODE(registerOp, 22)
M_OPCODE(immediateOp, 22)
uint32_t ror : 2;
};
struct BaseTst {
uint32_t shiftedOp : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|11111|
uint32_t immediateOp: 10; // sf|........|N|ImmR:6|ImmS:6|Rn|11111|
};
struct BaseRM_NoImm {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t rHiId : 6;
uint32_t xOffset : 5;
};
struct BaseRM_SImm9 {
M_OPCODE(offsetOp, 22)
M_OPCODE(prePostOp, 22)
uint32_t rType : 2;
uint32_t rHiId : 6;
uint32_t xOffset : 5;
uint32_t immShift : 4;
};
struct BaseRM_SImm10 {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t rHiId : 6;
uint32_t xOffset : 5;
uint32_t immShift : 4;
};
struct BasePrfm {
uint32_t registerOp : 11;
uint32_t sOffsetOp : 10;
uint32_t uOffsetOp : 11;
uint32_t literalOp;
};
struct BaseLdSt {
uint32_t uOffsetOp : 10;
uint32_t prePostOp : 11;
uint32_t registerOp : 11;
uint32_t literalOp : 8;
uint32_t rType : 2;
uint32_t xOffset : 5;
uint32_t uOffsetShift : 3;
uint32_t uAltInstId : 14;
};
struct BaseLdpStp {
uint32_t offsetOp : 10;
uint32_t prePostOp : 10;
uint32_t rType : 2;
uint32_t xOffset : 5;
uint32_t offsetShift : 3;
};
struct BaseStx {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
};
struct BaseLdxp {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
};
struct BaseStxp {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
};
struct BaseAtomicOp {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
uint32_t zr : 1;
};
struct BaseAtomicSt {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
};
struct BaseAtomicCasp {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
};
using BaseBranchReg = BaseOp;
using BaseBranchRel = BaseOp;
using BaseBranchCmp = BaseOp;
using BaseBranchTst = BaseOp;
using BaseExtract = BaseOp;
using BaseBfc = BaseOp;
using BaseBfi = BaseOp;
using BaseBfx = BaseOp;
using BaseCCmp = BaseOp;
using BaseCInc = BaseOp;
using BaseCSet = BaseOp;
using BaseCSel = BaseOp;
using BaseMovKNZ = BaseOp;
using BaseMull = BaseOp;
struct FSimdGeneric {
uint32_t _scalarOp : 28;
uint32_t _scalarHf : 4;
uint32_t _vectorOp : 28;
uint32_t _vectorHf : 4;
constexpr uint32_t scalarOp() const noexcept { return uint32_t(_scalarOp) << 10; }
constexpr uint32_t vectorOp() const noexcept { return uint32_t(_vectorOp) << 10; }
constexpr uint32_t scalarHf() const noexcept { return uint32_t(_scalarHf); }
constexpr uint32_t vectorHf() const noexcept { return uint32_t(_vectorHf); }
};
using FSimdVV = FSimdGeneric;
using FSimdVVV = FSimdGeneric;
using FSimdVVVV = FSimdGeneric;
struct FSimdSV {
uint32_t opcode;
};
struct FSimdVVVe {
uint32_t _scalarOp : 28;
uint32_t _scalarHf : 4;
uint32_t _vectorOp;
uint32_t _elementOp;
constexpr uint32_t scalarOp() const noexcept { return uint32_t(_scalarOp) << 10; }
constexpr uint32_t scalarHf() const noexcept { return uint32_t(_scalarHf); };
constexpr uint32_t vectorOp() const noexcept { return uint32_t(_vectorOp) << 10; }
constexpr uint32_t vectorHf() const noexcept { return kHF_C; }
constexpr uint32_t elementScalarOp() const noexcept { return (uint32_t(_elementOp) << 10) | (0x5u << 28); }
constexpr uint32_t elementVectorOp() const noexcept { return (uint32_t(_elementOp) << 10); }
};
struct SimdFcadd {
uint32_t _opcode;
constexpr uint32_t opcode() const noexcept { return _opcode << 10; }
};
struct SimdFcmla {
uint32_t _regularOp;
uint32_t _elementOp;
constexpr uint32_t regularOp() const noexcept { return uint32_t(_regularOp) << 10; }
constexpr uint32_t elementOp() const noexcept { return (uint32_t(_elementOp) << 10); }
};
struct SimdFccmpFccmpe {
uint32_t _opcode;
constexpr uint32_t opcode() const noexcept { return _opcode; }
};
struct SimdFcm {
uint32_t _registerOp : 28;
uint32_t _registerHf : 4;
uint32_t _zeroOp : 28;
constexpr bool hasRegisterOp() const noexcept { return _registerOp != 0; }
constexpr bool hasZeroOp() const noexcept { return _zeroOp != 0; }
constexpr uint32_t registerScalarOp() const noexcept { return (uint32_t(_registerOp) << 10) | (0x5u << 28); }
constexpr uint32_t registerVectorOp() const noexcept { return uint32_t(_registerOp) << 10; }
constexpr uint32_t registerScalarHf() const noexcept { return uint32_t(_registerHf); }
constexpr uint32_t registerVectorHf() const noexcept { return uint32_t(_registerHf); }
constexpr uint32_t zeroScalarOp() const noexcept { return (uint32_t(_zeroOp) << 10) | (0x5u << 28); }
constexpr uint32_t zeroVectorOp() const noexcept { return (uint32_t(_zeroOp) << 10); }
};
struct SimdFcmpFcmpe {
uint32_t _opcode;
constexpr uint32_t opcode() const noexcept { return _opcode; }
};
struct SimdFcvtLN {
uint32_t _opcode : 22;
uint32_t _isCvtxn : 1;
uint32_t _hasScalar : 1;
constexpr uint32_t scalarOp() const noexcept { return (uint32_t(_opcode) << 10) | (0x5u << 28); }
constexpr uint32_t vectorOp() const noexcept { return (uint32_t(_opcode) << 10); }
constexpr uint32_t isCvtxn() const noexcept { return _isCvtxn; }
constexpr uint32_t hasScalar() const noexcept { return _hasScalar; }
};
struct SimdFcvtSV {
uint32_t _vectorIntOp;
uint32_t _vectorFpOp;
uint32_t _generalOp : 31;
uint32_t _isFloatToInt : 1;
constexpr uint32_t scalarIntOp() const noexcept { return (uint32_t(_vectorIntOp) << 10) | (0x5u << 28); }
constexpr uint32_t vectorIntOp() const noexcept { return uint32_t(_vectorIntOp) << 10; }
constexpr uint32_t scalarFpOp() const noexcept { return (uint32_t(_vectorFpOp) << 10) | (0x5u << 28); }
constexpr uint32_t vectorFpOp() const noexcept { return uint32_t(_vectorFpOp) << 10; }
constexpr uint32_t generalOp() const noexcept { return (uint32_t(_generalOp) << 10); }
constexpr uint32_t isFloatToInt() const noexcept { return _isFloatToInt; }
constexpr uint32_t isFixedPoint() const noexcept { return _vectorFpOp != 0; }
};
struct SimdFmlal {
uint32_t _vectorOp;
uint32_t _elementOp;
uint8_t _optionalQ;
uint8_t tA;
uint8_t tB;
uint8_t tElement;
constexpr uint32_t vectorOp() const noexcept { return uint32_t(_vectorOp) << 10; }
constexpr uint32_t elementOp() const noexcept { return uint32_t(_elementOp) << 10; }
constexpr uint32_t optionalQ() const noexcept { return _optionalQ; }
};
struct FSimdPair {
uint32_t _scalarOp;
uint32_t _vectorOp;
constexpr uint32_t scalarOp() const noexcept { return uint32_t(_scalarOp) << 10; }
constexpr uint32_t vectorOp() const noexcept { return uint32_t(_vectorOp) << 10; }
};
struct ISimdVV {
M_OPCODE(opcode, 22)
uint32_t vecOpType : 6;
};
struct ISimdVVx {
M_OPCODE(opcode, 22)
uint32_t op0Signature;
uint32_t op1Signature;
};
struct ISimdSV {
M_OPCODE(opcode, 22)
uint32_t vecOpType : 6;
};
struct ISimdVVV {
M_OPCODE(opcode, 22)
uint32_t vecOpType : 6;
};
struct ISimdVVVx {
M_OPCODE(opcode, 22)
uint32_t op0Signature;
uint32_t op1Signature;
uint32_t op2Signature;
};
struct ISimdWWV {
M_OPCODE(opcode, 22)
uint32_t vecOpType : 6;
};
struct ISimdVVVe {
uint32_t regularOp : 26; // 22 bits used.
uint32_t regularVecType : 6;
uint32_t elementOp : 26; // 22 bits used.
uint32_t elementVecType : 6;
};
struct ISimdVVVI {
M_OPCODE(opcode, 22)
uint32_t vecOpType : 6;
uint32_t immSize : 4;
uint32_t immShift : 4;
uint32_t imm64HasOneBitLess : 1;
};
struct ISimdVVVV {
uint32_t opcode : 22;
uint32_t vecOpType : 6;
};
struct ISimdVVVVx {
uint32_t opcode;
uint32_t op0Signature;
uint32_t op1Signature;
uint32_t op2Signature;
uint32_t op3Signature;
};
struct SimdBicOrr {
uint32_t registerOp; // 22 bits used.
uint32_t immediateOp; // 22 bits used.
};
struct SimdCmp {
uint32_t regOp;
uint32_t zeroOp : 22;
uint32_t vecOpType : 6;
};
struct SimdDot {
uint32_t vectorOp; // 22 bits used.
uint32_t elementOp; // 22 bits used.
uint8_t tA; // Element-type of the first operand.
uint8_t tB; // Element-type of the second and third operands.
uint8_t tElement; // Element-type of the element index[] operand.
};
struct SimdMoviMvni {
uint32_t opcode : 31;
uint32_t inverted : 1;
};
struct SimdLdSt {
uint32_t uOffsetOp : 10;
uint32_t prePostOp : 11;
uint32_t registerOp : 11;
uint32_t literalOp : 8;
uint32_t uAltInstId : 16;
};
struct SimdLdNStN {
uint32_t singleOp;
uint32_t multipleOp : 22;
uint32_t n : 3;
uint32_t replicate : 1;
};
struct SimdLdpStp {
uint32_t offsetOp : 10;
uint32_t prePostOp : 10;
};
struct SimdLdurStur {
uint32_t opcode;
};
struct ISimdPair {
uint32_t opcode2; // 22 bits used.
uint32_t opcode3 : 26; // 22 bits used.
uint32_t opType3 : 6;
};
struct SimdShift {
uint32_t registerOp; // 22 bits used.
uint32_t immediateOp : 22; // 22 bits used.
uint32_t invertedImm : 1;
uint32_t vecOpType : 6;
};
struct SimdShiftES {
uint32_t opcode : 22;
uint32_t vecOpType : 6;
};
struct SimdSm3tt {
uint32_t opcode;
};
struct SimdSmovUmov {
uint32_t opcode : 22;
uint32_t vecOpType : 6;
uint32_t isSigned : 1;
};
struct SimdSxtlUxtl {
uint32_t opcode : 22;
uint32_t vecOpType : 6;
};
struct SimdTblTbx {
uint32_t opcode;
};
#undef M_OPCODE
// ${EncodingDataForward:Begin}
// ------------------- Automatically generated, do not edit -------------------
extern const BaseAddSub baseAddSub[4];
extern const BaseAdr baseAdr[2];
extern const BaseAtDcIcTlbi baseAtDcIcTlbi[4];
extern const BaseAtomicCasp baseAtomicCasp[4];
extern const BaseAtomicOp baseAtomicOp[123];
extern const BaseAtomicSt baseAtomicSt[48];
extern const BaseBfc baseBfc[1];
extern const BaseBfi baseBfi[3];
extern const BaseBfm baseBfm[3];
extern const BaseBfx baseBfx[3];
extern const BaseBranchCmp baseBranchCmp[2];
extern const BaseBranchReg baseBranchReg[3];
extern const BaseBranchRel baseBranchRel[3];
extern const BaseBranchTst baseBranchTst[2];
extern const BaseCCmp baseCCmp[2];
extern const BaseCInc baseCInc[3];
extern const BaseCSel baseCSel[4];
extern const BaseCSet baseCSet[2];
extern const BaseCmpCmn baseCmpCmn[2];
extern const BaseExtend baseExtend[5];
extern const BaseExtract baseExtract[1];
extern const BaseLdSt baseLdSt[9];
extern const BaseLdpStp baseLdpStp[6];
extern const BaseLdxp baseLdxp[2];
extern const BaseLogical baseLogical[8];
extern const BaseMinMax baseMinMax[4];
extern const BaseMovKNZ baseMovKNZ[3];
extern const BaseMvnNeg baseMvnNeg[3];
extern const BaseOp baseOp[24];
extern const BaseOpImm baseOpImm[15];
extern const BaseOpX16 baseOpX16[1];
extern const BasePrfm basePrfm[1];
extern const BaseR baseR[10];
extern const BaseRM_NoImm baseRM_NoImm[21];
extern const BaseRM_SImm10 baseRM_SImm10[2];
extern const BaseRM_SImm9 baseRM_SImm9[23];
extern const BaseRR baseRR[18];
extern const BaseRRII baseRRII[2];
extern const BaseRRR baseRRR[26];
extern const BaseRRRR baseRRRR[6];
extern const BaseShift baseShift[8];
extern const BaseStx baseStx[3];
extern const BaseStxp baseStxp[2];
extern const BaseTst baseTst[1];
extern const FSimdPair fSimdPair[5];
extern const FSimdSV fSimdSV[4];
extern const FSimdVV fSimdVV[17];
extern const FSimdVVV fSimdVVV[13];
extern const FSimdVVVV fSimdVVVV[4];
extern const FSimdVVVe fSimdVVVe[4];
extern const ISimdPair iSimdPair[1];
extern const ISimdSV iSimdSV[7];
extern const ISimdVV iSimdVV[29];
extern const ISimdVVV iSimdVVV[65];
extern const ISimdVVVI iSimdVVVI[2];
extern const ISimdVVVV iSimdVVVV[2];
extern const ISimdVVVVx iSimdVVVVx[1];
extern const ISimdVVVe iSimdVVVe[25];
extern const ISimdVVVx iSimdVVVx[17];
extern const ISimdVVx iSimdVVx[13];
extern const ISimdWWV iSimdWWV[8];
extern const SimdBicOrr simdBicOrr[2];
extern const SimdCmp simdCmp[7];
extern const SimdDot simdDot[5];
extern const SimdFcadd simdFcadd[1];
extern const SimdFccmpFccmpe simdFccmpFccmpe[2];
extern const SimdFcm simdFcm[5];
extern const SimdFcmla simdFcmla[1];
extern const SimdFcmpFcmpe simdFcmpFcmpe[2];
extern const SimdFcvtLN simdFcvtLN[6];
extern const SimdFcvtSV simdFcvtSV[12];
extern const SimdFmlal simdFmlal[6];
extern const SimdLdNStN simdLdNStN[12];
extern const SimdLdSt simdLdSt[2];
extern const SimdLdpStp simdLdpStp[4];
extern const SimdLdurStur simdLdurStur[2];
extern const SimdMoviMvni simdMoviMvni[2];
extern const SimdShift simdShift[40];
extern const SimdShiftES simdShiftES[2];
extern const SimdSm3tt simdSm3tt[4];
extern const SimdSmovUmov simdSmovUmov[2];
extern const SimdSxtlUxtl simdSxtlUxtl[4];
extern const SimdTblTbx simdTblTbx[2];
// ----------------------------------------------------------------------------
// ${EncodingDataForward:End}
} // {EncodingData}
// a64::InstDB - Tables
// ====================
#ifndef ASMJIT_NO_TEXT
extern const InstNameIndex instNameIndex;
extern const char _instNameStringTable[];
extern const uint32_t _instNameIndexTable[];
#endif // !ASMJIT_NO_TEXT
} // {InstDB}
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_A64_ARMINSTDB_H_P_INCLUDED

View File

@@ -1,85 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64)
#include "../core/misc_p.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::Operand - Tests
// ====================
#if defined(ASMJIT_TEST)
UNIT(a64_operand) {
INFO("Checking if a64::reg(...) matches built-in IDs");
EXPECT_EQ(w(5), w5);
EXPECT_EQ(x(5), x5);
INFO("Checking Gp register properties");
EXPECT_TRUE(Gp().isReg());
EXPECT_TRUE(w0.isReg());
EXPECT_TRUE(x0.isReg());
EXPECT_EQ(w0.id(), 0u);
EXPECT_EQ(x0.id(), 0u);
EXPECT_EQ(wzr.id(), Gp::kIdZr);
EXPECT_EQ(xzr.id(), Gp::kIdZr);
EXPECT_EQ(wsp.id(), Gp::kIdSp);
EXPECT_EQ(sp.id(), Gp::kIdSp);
EXPECT_EQ(w0.size(), 4u);
EXPECT_EQ(x0.size(), 8u);
EXPECT_EQ(w0.regType(), RegType::kGp32);
EXPECT_EQ(x0.regType(), RegType::kGp64);
EXPECT_EQ(w0.regGroup(), RegGroup::kGp);
EXPECT_EQ(x0.regGroup(), RegGroup::kGp);
INFO("Checking Vec register properties");
EXPECT_EQ(v0.regType(), RegType::kVec128);
EXPECT_EQ(d0.regType(), RegType::kVec64);
EXPECT_EQ(s0.regType(), RegType::kVec32);
EXPECT_EQ(h0.regType(), RegType::kVec16);
EXPECT_EQ(b0.regType(), RegType::kVec8);
EXPECT_EQ(v0.regGroup(), RegGroup::kVec);
EXPECT_EQ(d0.regGroup(), RegGroup::kVec);
EXPECT_EQ(s0.regGroup(), RegGroup::kVec);
EXPECT_EQ(h0.regGroup(), RegGroup::kVec);
EXPECT_EQ(b0.regGroup(), RegGroup::kVec);
INFO("Checking Vec register element[] access");
Vec vd_1 = v15.d(1);
EXPECT_EQ(vd_1.regType(), RegType::kVec128);
EXPECT_EQ(vd_1.regGroup(), RegGroup::kVec);
EXPECT_EQ(vd_1.id(), 15u);
EXPECT_TRUE(vd_1.isVecD2());
EXPECT_EQ(vd_1.elementType(), VecElementType::kD);
EXPECT_TRUE(vd_1.hasElementIndex());
EXPECT_EQ(vd_1.elementIndex(), 1u);
Vec vs_3 = v15.s(3);
EXPECT_EQ(vs_3.regType(), RegType::kVec128);
EXPECT_EQ(vs_3.regGroup(), RegGroup::kVec);
EXPECT_EQ(vs_3.id(), 15u);
EXPECT_TRUE(vs_3.isVecS4());
EXPECT_EQ(vs_3.elementType(), VecElementType::kS);
EXPECT_TRUE(vs_3.hasElementIndex());
EXPECT_EQ(vs_3.elementIndex(), 3u);
Vec vb_4 = v15.b4(3);
EXPECT_EQ(vb_4.regType(), RegType::kVec128);
EXPECT_EQ(vb_4.regGroup(), RegGroup::kVec);
EXPECT_EQ(vb_4.id(), 15u);
EXPECT_TRUE(vb_4.isVecB4x4());
EXPECT_EQ(vb_4.elementType(), VecElementType::kB4);
EXPECT_TRUE(vb_4.hasElementIndex());
EXPECT_EQ(vb_4.elementIndex(), 3u);
}
#endif
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64

File diff suppressed because it is too large Load Diff

View File

@@ -1,914 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64) && !defined(ASMJIT_NO_COMPILER)
#include "../core/cpuinfo.h"
#include "../core/formatter_p.h"
#include "../core/support.h"
#include "../core/type.h"
#include "../arm/a64assembler.h"
#include "../arm/a64compiler.h"
#include "../arm/a64emithelper_p.h"
#include "../arm/a64instapi_p.h"
#include "../arm/a64instdb_p.h"
#include "../arm/a64rapass_p.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::ARMRAPass - Helpers
// ========================
// TODO: [ARM] These should be shared with all backends.
[[maybe_unused]]
static inline uint64_t raImmMaskFromSize(uint32_t size) noexcept {
ASMJIT_ASSERT(size > 0 && size < 256);
static const uint64_t masks[] = {
0x00000000000000FFu, // 1
0x000000000000FFFFu, // 2
0x00000000FFFFFFFFu, // 4
0xFFFFFFFFFFFFFFFFu, // 8
0x0000000000000000u, // 16
0x0000000000000000u, // 32
0x0000000000000000u, // 64
0x0000000000000000u, // 128
0x0000000000000000u // 256
};
return masks[Support::ctz(size)];
}
static const RegMask raConsecutiveLeadCountToRegMaskFilter[5] = {
0xFFFFFFFFu, // [0] No consecutive.
0x00000000u, // [1] Invalid, never used.
0x7FFFFFFFu, // [2] 2 consecutive registers.
0x3FFFFFFFu, // [3] 3 consecutive registers.
0x1FFFFFFFu // [4] 4 consecutive registers.
};
[[nodiscard]]
static inline RATiedFlags raUseOutFlagsFromRWFlags(OpRWFlags rwFlags) noexcept {
static constexpr RATiedFlags map[] = {
RATiedFlags::kNone,
RATiedFlags::kRead | RATiedFlags::kUse, // kRead
RATiedFlags::kWrite | RATiedFlags::kOut, // kWrite
RATiedFlags::kRW | RATiedFlags::kUse, // kRW
};
return map[uint32_t(rwFlags & OpRWFlags::kRW)];
}
[[nodiscard]]
static inline RATiedFlags raRegRwFlags(OpRWFlags flags) noexcept {
return raUseOutFlagsFromRWFlags(flags);
}
[[nodiscard]]
static inline RATiedFlags raMemBaseRwFlags(OpRWFlags flags) noexcept {
constexpr uint32_t shift = Support::ConstCTZ<uint32_t(OpRWFlags::kMemBaseRW)>::value;
return raUseOutFlagsFromRWFlags(OpRWFlags(uint32_t(flags) >> shift) & OpRWFlags::kRW);
}
[[nodiscard]]
static inline RATiedFlags raMemIndexRwFlags(OpRWFlags flags) noexcept {
constexpr uint32_t shift = Support::ConstCTZ<uint32_t(OpRWFlags::kMemIndexRW)>::value;
return raUseOutFlagsFromRWFlags(OpRWFlags(uint32_t(flags) >> shift) & OpRWFlags::kRW);
}
// a64::RACFGBuilder
// =================
class RACFGBuilder : public RACFGBuilderT<RACFGBuilder> {
public:
Arch _arch;
inline RACFGBuilder(ARMRAPass* pass) noexcept
: RACFGBuilderT<RACFGBuilder>(pass),
_arch(pass->cc()->arch()) {}
[[nodiscard]]
inline Compiler* cc() const noexcept { return static_cast<Compiler*>(_cc); }
[[nodiscard]]
Error onInst(InstNode* inst, InstControlFlow& controlType, RAInstBuilder& ib) noexcept;
[[nodiscard]]
Error onBeforeInvoke(InvokeNode* invokeNode) noexcept;
[[nodiscard]]
Error onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept;
[[nodiscard]]
Error moveImmToRegArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_, Reg* out) noexcept;
[[nodiscard]]
Error moveImmToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_) noexcept;
[[nodiscard]]
Error moveRegToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const Reg& reg) noexcept;
[[nodiscard]]
Error onBeforeRet(FuncRetNode* funcRet) noexcept;
[[nodiscard]]
Error onRet(FuncRetNode* funcRet, RAInstBuilder& ib) noexcept;
};
// a64::RACFGBuilder - OnInst
// ==========================
// TODO: [ARM] This is just a workaround...
static InstControlFlow getControlFlowType(InstId instId) noexcept {
switch (BaseInst::extractRealId(instId)) {
case Inst::kIdB:
case Inst::kIdBr:
if (BaseInst::extractARMCondCode(instId) == CondCode::kAL) {
return InstControlFlow::kJump;
}
else {
return InstControlFlow::kBranch;
}
case Inst::kIdBl:
case Inst::kIdBlr:
return InstControlFlow::kCall;
case Inst::kIdCbz:
case Inst::kIdCbnz:
case Inst::kIdTbz:
case Inst::kIdTbnz:
return InstControlFlow::kBranch;
case Inst::kIdRet:
return InstControlFlow::kReturn;
default:
return InstControlFlow::kRegular;
}
}
Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstBuilder& ib) noexcept {
InstRWInfo rwInfo;
if (Inst::isDefinedId(inst->realId())) {
InstId instId = inst->id();
uint32_t opCount = inst->opCount();
const Operand* opArray = inst->operands();
ASMJIT_PROPAGATE(InstInternal::queryRWInfo(inst->baseInst(), opArray, opCount, &rwInfo));
const InstDB::InstInfo& instInfo = InstDB::infoById(instId);
uint32_t singleRegOps = 0;
ib.addInstRWFlags(rwInfo.instFlags());
if (opCount) {
uint32_t consecutiveOffset = 0xFFFFFFFFu;
uint32_t consecutiveParent = Globals::kInvalidId;
for (uint32_t i = 0; i < opCount; i++) {
const Operand& op = opArray[i];
const OpRWInfo& opRwInfo = rwInfo.operand(i);
if (op.isReg()) {
// Register Operand
// ----------------
const Reg& reg = op.as<Reg>();
RATiedFlags flags = raRegRwFlags(opRwInfo.opFlags());
uint32_t vIndex = Operand::virtIdToIndex(reg.id());
if (vIndex < Operand::kVirtIdCount) {
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
// Use RW instead of Write in case that not the whole register is overwritten. This is important for
// liveness as we cannot kill a register that will be used.
if ((flags & RATiedFlags::kRW) == RATiedFlags::kWrite) {
if (workReg->regByteMask() & ~(opRwInfo.writeByteMask() | opRwInfo.extendByteMask())) {
// Not write-only operation.
flags = (flags & ~RATiedFlags::kOut) | (RATiedFlags::kRead | RATiedFlags::kUse);
}
}
RegGroup group = workReg->group();
RegMask useRegs = _pass->_availableRegs[group];
RegMask outRegs = useRegs;
uint32_t useId = Reg::kIdBad;
uint32_t outId = Reg::kIdBad;
uint32_t useRewriteMask = 0;
uint32_t outRewriteMask = 0;
if (opRwInfo.consecutiveLeadCount()) {
// There must be a single consecutive register lead, otherwise the RW data is invalid.
if (consecutiveOffset != 0xFFFFFFFFu) {
return DebugUtils::errored(kErrorInvalidState);
}
// A consecutive lead register cannot be used as a consecutive +1/+2/+3 register, the registers must be distinct.
if (RATiedReg::consecutiveDataFromFlags(flags) != 0) {
return DebugUtils::errored(kErrorNotConsecutiveRegs);
}
flags |= RATiedFlags::kLeadConsecutive | RATiedReg::consecutiveDataToFlags(opRwInfo.consecutiveLeadCount() - 1);
consecutiveOffset = 0;
RegMask filter = raConsecutiveLeadCountToRegMaskFilter[opRwInfo.consecutiveLeadCount()];
if (Support::test(flags, RATiedFlags::kUse)) {
flags |= RATiedFlags::kUseConsecutive;
useRegs &= filter;
}
else {
flags |= RATiedFlags::kOutConsecutive;
outRegs &= filter;
}
}
if (Support::test(flags, RATiedFlags::kUse)) {
useRewriteMask = Support::bitMask(inst->_getRewriteIndex(&reg._baseId));
if (opRwInfo.hasOpFlag(OpRWFlags::kRegPhysId)) {
useId = opRwInfo.physId();
flags |= RATiedFlags::kUseFixed;
}
else if (opRwInfo.hasOpFlag(OpRWFlags::kConsecutive)) {
if (consecutiveOffset == 0xFFFFFFFFu) {
return DebugUtils::errored(kErrorInvalidState);
}
flags |= RATiedFlags::kUseConsecutive | RATiedReg::consecutiveDataToFlags(++consecutiveOffset);
}
}
else {
outRewriteMask = Support::bitMask(inst->_getRewriteIndex(&reg._baseId));
if (opRwInfo.hasOpFlag(OpRWFlags::kRegPhysId)) {
outId = opRwInfo.physId();
flags |= RATiedFlags::kOutFixed;
}
else if (opRwInfo.hasOpFlag(OpRWFlags::kConsecutive)) {
if (consecutiveOffset == 0xFFFFFFFFu) {
return DebugUtils::errored(kErrorInvalidState);
}
flags |= RATiedFlags::kOutConsecutive | RATiedReg::consecutiveDataToFlags(++consecutiveOffset);
}
}
// Special cases regarding element access.
if (reg.as<Vec>().hasElementIndex()) {
// Only the first 0..15 registers can be used if the register uses
// element accessor that accesses half-words (h[0..7] elements).
if (instInfo.hasFlag(InstDB::kInstFlagVH0_15) && reg.as<Vec>().elementType() == VecElementType::kH) {
if (Support::test(flags, RATiedFlags::kUse)) {
useId &= 0x0000FFFFu;
}
else {
outId &= 0x0000FFFFu;
}
}
}
ASMJIT_PROPAGATE(ib.add(workReg, flags, useRegs, useId, useRewriteMask, outRegs, outId, outRewriteMask, opRwInfo.rmSize(), consecutiveParent));
if (singleRegOps == i) {
singleRegOps++;
}
if (Support::test(flags, RATiedFlags::kLeadConsecutive | RATiedFlags::kUseConsecutive | RATiedFlags::kOutConsecutive)) {
consecutiveParent = workReg->workId();
}
}
}
else if (op.isMem()) {
// Memory Operand
// --------------
const Mem& mem = op.as<Mem>();
if (mem.isRegHome()) {
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(mem.baseId()), &workReg));
if (ASMJIT_UNLIKELY(!_pass->getOrCreateStackSlot(workReg))) {
return DebugUtils::errored(kErrorOutOfMemory);
}
}
else if (mem.hasBaseReg()) {
uint32_t vIndex = Operand::virtIdToIndex(mem.baseId());
if (vIndex < Operand::kVirtIdCount) {
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
RATiedFlags flags = raMemBaseRwFlags(opRwInfo.opFlags());
RegGroup group = workReg->group();
RegMask allocable = _pass->_availableRegs[group];
// Base registers have never fixed id on ARM.
const uint32_t useId = Reg::kIdBad;
const uint32_t outId = Reg::kIdBad;
uint32_t useRewriteMask = 0;
uint32_t outRewriteMask = 0;
if (Support::test(flags, RATiedFlags::kUse)) {
useRewriteMask = Support::bitMask(inst->_getRewriteIndex(&mem._baseId));
}
else {
outRewriteMask = Support::bitMask(inst->_getRewriteIndex(&mem._baseId));
}
ASMJIT_PROPAGATE(ib.add(workReg, flags, allocable, useId, useRewriteMask, allocable, outId, outRewriteMask));
}
}
if (mem.hasIndexReg()) {
uint32_t vIndex = Operand::virtIdToIndex(mem.indexId());
if (vIndex < Operand::kVirtIdCount) {
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
RATiedFlags flags = raMemIndexRwFlags(opRwInfo.opFlags());
RegGroup group = workReg->group();
RegMask allocable = _pass->_availableRegs[group];
// Index registers have never fixed id on ARM.
const uint32_t useId = Reg::kIdBad;
const uint32_t outId = Reg::kIdBad;
uint32_t useRewriteMask = 0;
uint32_t outRewriteMask = 0;
if (Support::test(flags, RATiedFlags::kUse)) {
useRewriteMask = Support::bitMask(inst->_getRewriteIndex(&mem._data[Operand::kDataMemIndexId]));
}
else {
outRewriteMask = Support::bitMask(inst->_getRewriteIndex(&mem._data[Operand::kDataMemIndexId]));
}
ASMJIT_PROPAGATE(ib.add(workReg, RATiedFlags::kUse | RATiedFlags::kRead, allocable, useId, useRewriteMask, allocable, outId, outRewriteMask));
}
}
}
}
}
controlType = getControlFlowType(instId);
}
return kErrorOk;
}
// a64::RACFGBuilder - OnInvoke
// ============================
Error RACFGBuilder::onBeforeInvoke(InvokeNode* invokeNode) noexcept {
const FuncDetail& fd = invokeNode->detail();
uint32_t argCount = invokeNode->argCount();
cc()->_setCursor(invokeNode->prev());
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
const FuncValuePack& argPack = fd.argPack(argIndex);
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
if (!argPack[valueIndex])
break;
const FuncValue& arg = argPack[valueIndex];
const Operand& op = invokeNode->arg(argIndex, valueIndex);
if (op.isNone())
continue;
if (op.isReg()) {
const Reg& reg = op.as<Reg>();
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
if (arg.isReg()) {
RegGroup regGroup = workReg->group();
RegGroup argGroup = RegUtils::groupOf(arg.regType());
if (regGroup != argGroup) {
// TODO: [ARM] Conversion is not supported.
return DebugUtils::errored(kErrorInvalidAssignment);
}
}
else {
ASMJIT_PROPAGATE(moveRegToStackArg(invokeNode, arg, reg));
}
}
else if (op.isImm()) {
if (arg.isReg()) {
Reg reg;
ASMJIT_PROPAGATE(moveImmToRegArg(invokeNode, arg, op.as<Imm>(), &reg));
invokeNode->_args[argIndex][valueIndex] = reg;
}
else {
ASMJIT_PROPAGATE(moveImmToStackArg(invokeNode, arg, op.as<Imm>()));
}
}
}
}
cc()->_setCursor(invokeNode);
if (fd.hasRet()) {
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
const FuncValue& ret = fd.ret(valueIndex);
if (!ret) {
break;
}
const Operand& op = invokeNode->ret(valueIndex);
if (op.isReg()) {
const Reg& reg = op.as<Reg>();
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
if (ret.isReg()) {
RegGroup regGroup = workReg->group();
RegGroup retGroup = RegUtils::groupOf(ret.regType());
if (regGroup != retGroup) {
// TODO: [ARM] Conversion is not supported.
return DebugUtils::errored(kErrorInvalidAssignment);
}
}
}
}
}
// This block has function call(s).
_curBlock->addFlags(RABlockFlags::kHasFuncCalls);
_pass->func()->frame().addAttributes(FuncAttributes::kHasFuncCalls);
_pass->func()->frame().updateCallStackSize(fd.argStackSize());
return kErrorOk;
}
Error RACFGBuilder::onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept {
uint32_t argCount = invokeNode->argCount();
const FuncDetail& fd = invokeNode->detail();
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
const FuncValuePack& argPack = fd.argPack(argIndex);
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
if (!argPack[valueIndex]) {
continue;
}
const FuncValue& arg = argPack[valueIndex];
const Operand& op = invokeNode->arg(argIndex, valueIndex);
if (op.isNone()) {
continue;
}
if (op.isReg()) {
const Reg& reg = op.as<Reg>();
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
if (arg.isIndirect()) {
RegGroup regGroup = workReg->group();
if (regGroup != RegGroup::kGp) {
return DebugUtils::errored(kErrorInvalidState);
}
ASMJIT_PROPAGATE(ib.addCallArg(workReg, arg.regId()));
}
else if (arg.isReg()) {
RegGroup regGroup = workReg->group();
RegGroup argGroup = RegUtils::groupOf(arg.regType());
if (regGroup == argGroup) {
ASMJIT_PROPAGATE(ib.addCallArg(workReg, arg.regId()));
}
}
}
}
}
for (uint32_t retIndex = 0; retIndex < Globals::kMaxValuePack; retIndex++) {
const FuncValue& ret = fd.ret(retIndex);
if (!ret) {
break;
}
const Operand& op = invokeNode->ret(retIndex);
if (op.isReg()) {
const Reg& reg = op.as<Reg>();
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
if (ret.isReg()) {
RegGroup regGroup = workReg->group();
RegGroup retGroup = RegUtils::groupOf(ret.regType());
if (regGroup == retGroup) {
ASMJIT_PROPAGATE(ib.addCallRet(workReg, ret.regId()));
}
}
else {
return DebugUtils::errored(kErrorInvalidAssignment);
}
}
}
// Setup clobbered registers.
ib._clobbered[0] = Support::lsbMask<RegMask>(_pass->_physRegCount[RegGroup(0)]) & ~fd.preservedRegs(RegGroup(0));
ib._clobbered[1] = Support::lsbMask<RegMask>(_pass->_physRegCount[RegGroup(1)]) & ~fd.preservedRegs(RegGroup(1));
ib._clobbered[2] = Support::lsbMask<RegMask>(_pass->_physRegCount[RegGroup(2)]) & ~fd.preservedRegs(RegGroup(2));
ib._clobbered[3] = Support::lsbMask<RegMask>(_pass->_physRegCount[RegGroup(3)]) & ~fd.preservedRegs(RegGroup(3));
return kErrorOk;
}
// a64::RACFGBuilder - MoveImmToRegArg
// ===================================
Error RACFGBuilder::moveImmToRegArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_, Reg* out) noexcept {
DebugUtils::unused(invokeNode);
ASMJIT_ASSERT(arg.isReg());
Imm imm(imm_);
TypeId typeId = TypeId::kVoid;
switch (arg.typeId()) {
case TypeId::kInt8 : typeId = TypeId::kUInt64; imm.signExtend8Bits(); break;
case TypeId::kUInt8 : typeId = TypeId::kUInt64; imm.zeroExtend8Bits(); break;
case TypeId::kInt16 : typeId = TypeId::kUInt64; imm.signExtend16Bits(); break;
case TypeId::kUInt16: typeId = TypeId::kUInt64; imm.zeroExtend16Bits(); break;
case TypeId::kInt32 : typeId = TypeId::kUInt64; imm.signExtend32Bits(); break;
case TypeId::kUInt32: typeId = TypeId::kUInt64; imm.zeroExtend32Bits(); break;
case TypeId::kInt64 : typeId = TypeId::kUInt64; break;
case TypeId::kUInt64: typeId = TypeId::kUInt64; break;
default:
return DebugUtils::errored(kErrorInvalidAssignment);
}
ASMJIT_PROPAGATE(cc()->_newReg(out, typeId, nullptr));
cc()->virtRegById(out->id())->setWeight(BaseRAPass::kCallArgWeight);
return cc()->mov(out->as<Gp>(), imm);
}
// a64::RACFGBuilder - MoveImmToStackArg
// =====================================
Error RACFGBuilder::moveImmToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_) noexcept {
Reg reg;
ASMJIT_PROPAGATE(moveImmToRegArg(invokeNode, arg, imm_, &reg));
ASMJIT_PROPAGATE(moveRegToStackArg(invokeNode, arg, reg));
return kErrorOk;
}
// a64::RACFGBuilder - MoveRegToStackArg
// =====================================
Error RACFGBuilder::moveRegToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const Reg& reg) noexcept {
DebugUtils::unused(invokeNode);
Mem stackPtr = ptr(_pass->_sp.as<Gp>(), arg.stackOffset());
if (reg.isGp()) {
return cc()->str(reg.as<Gp>(), stackPtr);
}
if (reg.isVec()) {
return cc()->str(reg.as<Vec>(), stackPtr);
}
return DebugUtils::errored(kErrorInvalidState);
}
// a64::RACFGBuilder - OnReg
// =========================
Error RACFGBuilder::onBeforeRet(FuncRetNode* funcRet) noexcept {
DebugUtils::unused(funcRet);
return kErrorOk;
}
Error RACFGBuilder::onRet(FuncRetNode* funcRet, RAInstBuilder& ib) noexcept {
const FuncDetail& funcDetail = _pass->func()->detail();
const Operand* opArray = funcRet->operands();
uint32_t opCount = funcRet->opCount();
for (uint32_t i = 0; i < opCount; i++) {
const Operand& op = opArray[i];
if (op.isNone()) {
continue;
}
const FuncValue& ret = funcDetail.ret(i);
if (ASMJIT_UNLIKELY(!ret.isReg())) {
return DebugUtils::errored(kErrorInvalidAssignment);
}
if (op.isReg()) {
// Register return value.
const Reg& reg = op.as<Reg>();
uint32_t vIndex = Operand::virtIdToIndex(reg.id());
if (vIndex < Operand::kVirtIdCount) {
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
RegGroup group = workReg->group();
RegMask allocable = _pass->_availableRegs[group];
ASMJIT_PROPAGATE(ib.add(workReg, RATiedFlags::kUse | RATiedFlags::kRead, allocable, ret.regId(), 0, 0, Reg::kIdBad, 0));
}
}
else {
return DebugUtils::errored(kErrorInvalidAssignment);
}
}
return kErrorOk;
}
// a64::ARMRAPass - Construction & Destruction
// ===========================================
ARMRAPass::ARMRAPass() noexcept
: BaseRAPass() { _iEmitHelper = &_emitHelper; }
ARMRAPass::~ARMRAPass() noexcept {}
// a64::ARMRAPass - OnInit / OnDone
// ================================
void ARMRAPass::onInit() noexcept {
Arch arch = cc()->arch();
_emitHelper._emitter = _cb;
_archTraits = &ArchTraits::byArch(arch);
_physRegCount.set(RegGroup::kGp, 32);
_physRegCount.set(RegGroup::kVec, 32);
_physRegCount.set(RegGroup::kMask, 0);
_physRegCount.set(RegGroup::kExtraVirt3, 0);
_buildPhysIndex();
_availableRegs[RegGroup::kGp] = Support::lsbMask<uint32_t>(_physRegCount.get(RegGroup::kGp));
_availableRegs[RegGroup::kVec] = Support::lsbMask<uint32_t>(_physRegCount.get(RegGroup::kVec));
_availableRegs[RegGroup::kMask] = Support::lsbMask<uint32_t>(_physRegCount.get(RegGroup::kMask));
_availableRegs[RegGroup::kExtraVirt3] = Support::lsbMask<uint32_t>(_physRegCount.get(RegGroup::kExtraVirt3));
_scratchRegIndexes[0] = uint8_t(27);
_scratchRegIndexes[1] = uint8_t(28);
const FuncFrame& frame = _func->frame();
// The architecture specific setup makes implicitly all registers available. So
// make unavailable all registers that are special and cannot be used in general.
bool hasFP = frame.hasPreservedFP();
// Apple ABI requires that the frame-pointer register is not changed by leaf functions and properly updated
// by non-leaf functions. So, let's make this register unavailable as it's just not safe to update it.
if (hasFP || cc()->environment().isDarwin()) {
makeUnavailable(RegGroup::kGp, Gp::kIdFp);
}
makeUnavailable(RegGroup::kGp, Gp::kIdSp);
makeUnavailable(RegGroup::kGp, Gp::kIdOs); // OS-specific use, usually TLS.
makeUnavailable(frame._unavailableRegs);
_sp = sp;
_fp = x29;
}
void ARMRAPass::onDone() noexcept {}
// a64::ARMRAPass - BuildCFG
// =========================
Error ARMRAPass::buildCFG() noexcept {
return RACFGBuilder(this).run();
}
// a64::ARMRAPass - Rewrite
// ========================
ASMJIT_FAVOR_SPEED Error ARMRAPass::_rewrite(BaseNode* first, BaseNode* stop) noexcept {
uint32_t virtCount = cc()->_vRegArray.size();
BaseNode* node = first;
while (node != stop) {
BaseNode* next = node->next();
if (node->isInst()) {
InstNode* inst = node->as<InstNode>();
RAInst* raInst = node->passData<RAInst>();
Operand* operands = inst->operands();
uint32_t opCount = inst->opCount();
uint32_t i;
// Rewrite virtual registers into physical registers.
if (raInst) {
// If the instruction contains pass data (raInst) then it was a subject
// for register allocation and must be rewritten to use physical regs.
RATiedReg* tiedRegs = raInst->tiedRegs();
uint32_t tiedCount = raInst->tiedCount();
for (i = 0; i < tiedCount; i++) {
RATiedReg* tiedReg = &tiedRegs[i];
Support::BitWordIterator<uint32_t> useIt(tiedReg->useRewriteMask());
uint32_t useId = tiedReg->useId();
while (useIt.hasNext()) {
inst->_rewriteIdAtIndex(useIt.next(), useId);
}
Support::BitWordIterator<uint32_t> outIt(tiedReg->outRewriteMask());
uint32_t outId = tiedReg->outId();
while (outIt.hasNext()) {
inst->_rewriteIdAtIndex(outIt.next(), outId);
}
}
// This data is allocated by Zone passed to `runOnFunction()`, which
// will be reset after the RA pass finishes. So reset this data to
// prevent having a dead pointer after the RA pass is complete.
node->resetPassData();
if (ASMJIT_UNLIKELY(node->type() != NodeType::kInst)) {
// FuncRet terminates the flow, it must either be removed if the exit
// label is next to it (optimization) or patched to an architecture
// dependent jump instruction that jumps to the function's exit before
// the epilog.
if (node->type() == NodeType::kFuncRet) {
RABlock* block = raInst->block();
if (!isNextTo(node, _func->exitNode())) {
cc()->_setCursor(node->prev());
ASMJIT_PROPAGATE(emitJump(_func->exitNode()->label()));
}
BaseNode* prev = node->prev();
cc()->removeNode(node);
block->setLast(prev);
}
}
}
// Rewrite stack slot addresses.
for (i = 0; i < opCount; i++) {
Operand& op = operands[i];
if (op.isMem()) {
BaseMem& mem = op.as<BaseMem>();
if (mem.isRegHome()) {
uint32_t virtIndex = Operand::virtIdToIndex(mem.baseId());
if (ASMJIT_UNLIKELY(virtIndex >= virtCount)) {
return DebugUtils::errored(kErrorInvalidVirtId);
}
VirtReg* virtReg = cc()->virtRegByIndex(virtIndex);
RAWorkReg* workReg = virtReg->workReg();
ASMJIT_ASSERT(workReg != nullptr);
RAStackSlot* slot = workReg->stackSlot();
int32_t offset = slot->offset();
mem._setBase(_sp.regType(), slot->baseRegId());
mem.clearRegHome();
mem.addOffsetLo32(offset);
}
}
}
// Rewrite `loadAddressOf()` construct.
if (inst->realId() == Inst::kIdAdr && inst->opCount() == 2 && inst->op(1).isMem()) {
BaseMem mem = inst->op(1).as<BaseMem>();
int64_t offset = mem.offset();
if (!mem.hasBaseOrIndex()) {
inst->setId(Inst::kIdMov);
inst->setOp(1, Imm(offset));
}
else {
if (mem.hasIndex()) {
return DebugUtils::errored(kErrorInvalidAddressIndex);
}
Gp dst = Gp::make_r64(inst->op(0).as<Gp>().id());
Gp base = Gp::make_r64(mem.baseId());
InstId arithInstId = offset < 0 ? Inst::kIdSub : Inst::kIdAdd;
uint64_t absOffset = offset < 0 ? Support::neg(uint64_t(offset)) : uint64_t(offset);
inst->setId(arithInstId);
inst->setOpCount(3);
inst->setOp(1, base);
inst->setOp(2, Imm(absOffset));
// Use two operations if the offset cannot be encoded with ADD/SUB.
if (absOffset > 0xFFFu && (absOffset & ~uint64_t(0xFFF000u)) != 0) {
if (absOffset <= 0xFFFFFFu) {
cc()->_setCursor(inst->prev());
ASMJIT_PROPAGATE(cc()->emit(arithInstId, dst, base, Imm(absOffset & 0xFFFu)));
inst->setOp(1, dst);
inst->setOp(2, Imm(absOffset & 0xFFF000u));
}
else {
cc()->_setCursor(inst->prev());
ASMJIT_PROPAGATE(cc()->emit(Inst::kIdMov, inst->op(0), Imm(absOffset)));
inst->setOp(1, base);
inst->setOp(2, dst);
}
}
}
}
}
node = next;
}
return kErrorOk;
}
// a64::ARMRAPass - Prolog & Epilog
// ================================
Error ARMRAPass::updateStackFrame() noexcept {
if (_func->frame().hasFuncCalls()) {
_func->frame().addDirtyRegs(RegGroup::kGp, Support::bitMask(Gp::kIdLr));
}
return BaseRAPass::updateStackFrame();
}
// a64::ARMRAPass - OnEmit
// =======================
Error ARMRAPass::emitMove(uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept {
RAWorkReg* wReg = workRegById(workId);
Reg dst(wReg->signature(), dstPhysId);
Reg src(wReg->signature(), srcPhysId);
const char* comment = nullptr;
#ifndef ASMJIT_NO_LOGGING
if (hasDiagnosticOption(DiagnosticOptions::kRAAnnotate)) {
_tmpString.clear();
Formatter::formatVirtRegNameWithPrefix(_tmpString, "<MOVE> ", 7u, wReg->virtReg());
comment = _tmpString.data();
}
#endif
return _emitHelper.emitRegMove(dst, src, wReg->typeId(), comment);
}
Error ARMRAPass::emitSwap(uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept {
DebugUtils::unused(aWorkId, aPhysId, bWorkId, bPhysId);
return DebugUtils::errored(kErrorInvalidState);
}
Error ARMRAPass::emitLoad(uint32_t workId, uint32_t dstPhysId) noexcept {
RAWorkReg* wReg = workRegById(workId);
Reg dstReg(wReg->signature(), dstPhysId);
BaseMem srcMem(workRegAsMem(wReg));
const char* comment = nullptr;
#ifndef ASMJIT_NO_LOGGING
if (hasDiagnosticOption(DiagnosticOptions::kRAAnnotate)) {
_tmpString.clear();
Formatter::formatVirtRegNameWithPrefix(_tmpString, "<LOAD> ", 7u, wReg->virtReg());
comment = _tmpString.data();
}
#endif
return _emitHelper.emitRegMove(dstReg, srcMem, wReg->typeId(), comment);
}
Error ARMRAPass::emitSave(uint32_t workId, uint32_t srcPhysId) noexcept {
RAWorkReg* wReg = workRegById(workId);
BaseMem dstMem(workRegAsMem(wReg));
Reg srcReg(wReg->signature(), srcPhysId);
const char* comment = nullptr;
#ifndef ASMJIT_NO_LOGGING
if (hasDiagnosticOption(DiagnosticOptions::kRAAnnotate)) {
_tmpString.clear();
Formatter::formatVirtRegNameWithPrefix(_tmpString, "<SAVE> ", 7u, wReg->virtReg());
comment = _tmpString.data();
}
#endif
return _emitHelper.emitRegMove(dstMem, srcReg, wReg->typeId(), comment);
}
Error ARMRAPass::emitJump(const Label& label) noexcept {
return cc()->b(label);
}
Error ARMRAPass::emitPreCall(InvokeNode* invokeNode) noexcept {
DebugUtils::unused(invokeNode);
return kErrorOk;
}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64 && !ASMJIT_NO_COMPILER

View File

@@ -1,112 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64RAPASS_P_H_INCLUDED
#define ASMJIT_ARM_A64RAPASS_P_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/compiler.h"
#include "../core/rabuilders_p.h"
#include "../core/rapass_p.h"
#include "../arm/a64assembler.h"
#include "../arm/a64compiler.h"
#include "../arm/a64emithelper_p.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \cond INTERNAL
//! \addtogroup asmjit_a64
//! \{
//! ARM register allocation pass.
//!
//! Takes care of generating function prologs and epilogs, and also performs
//! register allocation.
class ARMRAPass : public BaseRAPass {
public:
ASMJIT_NONCOPYABLE(ARMRAPass)
using Base = BaseRAPass;
//! \name Members
//! \{
EmitHelper _emitHelper;
//! \}
//! \name Construction & Destruction
//! \{
ARMRAPass() noexcept;
~ARMRAPass() noexcept override;
//! \}
//! \name Accessors
//! \{
//! Returns the compiler casted to `arm::Compiler`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Compiler* cc() const noexcept { return static_cast<Compiler*>(_cb); }
//! Returns emit helper.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG EmitHelper* emitHelper() noexcept { return &_emitHelper; }
//! \}
//! \name Events
//! \{
void onInit() noexcept override;
void onDone() noexcept override;
//! \}
//! \name CFG
//! \{
Error buildCFG() noexcept override;
//! \}
//! \name Rewrite
//! \{
Error _rewrite(BaseNode* first, BaseNode* stop) noexcept override;
//! \}
//! \name Prolog & Epilog
//! \{
Error updateStackFrame() noexcept override;
//! \}
//! \name Emit Helpers
//! \{
Error emitMove(uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept override;
Error emitSwap(uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept override;
Error emitLoad(uint32_t workId, uint32_t dstPhysId) noexcept override;
Error emitSave(uint32_t workId, uint32_t srcPhysId) noexcept override;
Error emitJump(const Label& label) noexcept override;
Error emitPreCall(InvokeNode* invokeNode) noexcept override;
//! \}
};
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_COMPILER
#endif // ASMJIT_ARM_A64RAPASS_P_H_INCLUDED

View File

@@ -1,620 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_LOGGING
#include "../core/formatter_p.h"
#include "../core/misc_p.h"
#include "../core/support.h"
#include "../arm/armformatter_p.h"
#include "../arm/a64operand.h"
#include "../arm/a64instapi_p.h"
#include "../arm/a64instdb_p.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/compiler.h"
#endif
ASMJIT_BEGIN_SUB_NAMESPACE(arm)
// arm::FormatterInternal - Format Feature
// =======================================
Error FormatterInternal::formatFeature(String& sb, uint32_t featureId) noexcept {
// @EnumStringBegin{"enum": "CpuFeatures::ARM", "output": "sFeature", "strip": "k"}@
static const char sFeatureString[] =
"None\0"
"ARMv6\0"
"ARMv7\0"
"ARMv8a\0"
"THUMB\0"
"THUMBv2\0"
"ABLE\0"
"ADERR\0"
"AES\0"
"AFP\0"
"AIE\0"
"AMU1\0"
"AMU1_1\0"
"ANERR\0"
"ASIMD\0"
"BF16\0"
"BRBE\0"
"BTI\0"
"BWE\0"
"CCIDX\0"
"CHK\0"
"CLRBHB\0"
"CMOW\0"
"CMPBR\0"
"CONSTPACFIELD\0"
"CPA\0"
"CPA2\0"
"CPUID\0"
"CRC32\0"
"CSSC\0"
"CSV2\0"
"CSV2_3\0"
"CSV3\0"
"D128\0"
"DGH\0"
"DIT\0"
"DOTPROD\0"
"DPB\0"
"DPB2\0"
"EBEP\0"
"EBF16\0"
"ECBHB\0"
"ECV\0"
"EDHSR\0"
"EDSP\0"
"F8E4M3\0"
"F8E5M2\0"
"F8F16MM\0"
"F8F32MM\0"
"FAMINMAX\0"
"FCMA\0"
"FGT\0"
"FGT2\0"
"FHM\0"
"FLAGM\0"
"FLAGM2\0"
"FMAC\0"
"FP\0"
"FP16\0"
"FP16CONV\0"
"FP8\0"
"FP8DOT2\0"
"FP8DOT4\0"
"FP8FMA\0"
"FPMR\0"
"FPRCVT\0"
"FRINTTS\0"
"GCS\0"
"HACDBS\0"
"HAFDBS\0"
"HAFT\0"
"HDBSS\0"
"HBC\0"
"HCX\0"
"HPDS\0"
"HPDS2\0"
"I8MM\0"
"IDIVA\0"
"IDIVT\0"
"ITE\0"
"JSCVT\0"
"LOR\0"
"LRCPC\0"
"LRCPC2\0"
"LRCPC3\0"
"LS64\0"
"LS64_ACCDATA\0"
"LS64_V\0"
"LS64WB\0"
"LSE\0"
"LSE128\0"
"LSE2\0"
"LSFE\0"
"LSUI\0"
"LUT\0"
"LVA\0"
"LVA3\0"
"MEC\0"
"MOPS\0"
"MPAM\0"
"MTE\0"
"MTE2\0"
"MTE3\0"
"MTE4\0"
"MTE_ASYM_FAULT\0"
"MTE_ASYNC\0"
"MTE_CANONICAL_TAGS\0"
"MTE_NO_ADDRESS_TAGS\0"
"MTE_PERM_S1\0"
"MTE_STORE_ONLY\0"
"MTE_TAGGED_FAR\0"
"MTPMU\0"
"NMI\0"
"NV\0"
"NV2\0"
"OCCMO\0"
"PAN\0"
"PAN2\0"
"PAN3\0"
"PAUTH\0"
"PFAR\0"
"PMU\0"
"PMULL\0"
"PRFMSLC\0"
"RAS\0"
"RAS1_1\0"
"RAS2\0"
"RASSA2\0"
"RDM\0"
"RME\0"
"RNG\0"
"RNG_TRAP\0"
"RPRES\0"
"RPRFM\0"
"S1PIE\0"
"S1POE\0"
"S2PIE\0"
"S2POE\0"
"SB\0"
"SCTLR2\0"
"SEBEP\0"
"SEL2\0"
"SHA1\0"
"SHA256\0"
"SHA3\0"
"SHA512\0"
"SM3\0"
"SM4\0"
"SME\0"
"SME2\0"
"SME2_1\0"
"SME2_2\0"
"SME_AES\0"
"SME_B16B16\0"
"SME_B16F32\0"
"SME_BI32I32\0"
"SME_F16F16\0"
"SME_F16F32\0"
"SME_F32F32\0"
"SME_F64F64\0"
"SME_F8F16\0"
"SME_F8F32\0"
"SME_FA64\0"
"SME_I16I32\0"
"SME_I16I64\0"
"SME_I8I32\0"
"SME_LUTv2\0"
"SME_MOP4\0"
"SME_TMOP\0"
"SPE\0"
"SPE1_1\0"
"SPE1_2\0"
"SPE1_3\0"
"SPE1_4\0"
"SPE_ALTCLK\0"
"SPE_CRR\0"
"SPE_EFT\0"
"SPE_FDS\0"
"SPE_FPF\0"
"SPE_SME\0"
"SPECRES\0"
"SPECRES2\0"
"SPMU\0"
"SSBS\0"
"SSBS2\0"
"SSVE_AES\0"
"SSVE_BITPERM\0"
"SSVE_FEXPA\0"
"SSVE_FP8DOT2\0"
"SSVE_FP8DOT4\0"
"SSVE_FP8FMA\0"
"SVE\0"
"SVE2\0"
"SVE2_1\0"
"SVE2_2\0"
"SVE_AES\0"
"SVE_AES2\0"
"SVE_B16B16\0"
"SVE_BF16\0"
"SVE_BFSCALE\0"
"SVE_BITPERM\0"
"SVE_EBF16\0"
"SVE_ELTPERM\0"
"SVE_F16MM\0"
"SVE_F32MM\0"
"SVE_F64MM\0"
"SVE_I8MM\0"
"SVE_PMULL128\0"
"SVE_SHA3\0"
"SVE_SM4\0"
"SYSINSTR128\0"
"SYSREG128\0"
"THE\0"
"TLBIOS\0"
"TLBIRANGE\0"
"TLBIW\0"
"TME\0"
"TRF\0"
"UAO\0"
"VFP_D32\0"
"VHE\0"
"VMID16\0"
"WFXT\0"
"XNX\0"
"XS\0"
"<Unknown>\0";
static const uint16_t sFeatureIndex[] = {
0, 5, 11, 17, 24, 30, 38, 43, 49, 53, 57, 61, 66, 73, 79, 85, 90, 95, 99,
103, 109, 113, 120, 125, 131, 145, 149, 154, 160, 166, 171, 176, 183, 188,
193, 197, 201, 209, 213, 218, 223, 229, 235, 239, 245, 250, 257, 264, 272,
280, 289, 294, 298, 303, 307, 313, 320, 325, 328, 333, 342, 346, 354, 362,
369, 374, 381, 389, 393, 400, 407, 412, 418, 422, 426, 431, 437, 442, 448,
454, 458, 464, 468, 474, 481, 488, 493, 506, 513, 520, 524, 531, 536, 541,
546, 550, 554, 559, 563, 568, 573, 577, 582, 587, 592, 607, 617, 636, 656,
668, 683, 698, 704, 708, 711, 715, 721, 725, 730, 735, 741, 746, 750, 756,
764, 768, 775, 780, 787, 791, 795, 799, 808, 814, 820, 826, 832, 838, 844,
847, 854, 860, 865, 870, 877, 882, 889, 893, 897, 901, 906, 913, 920, 928,
939, 950, 962, 973, 984, 995, 1006, 1016, 1026, 1035, 1046, 1057, 1067, 1077,
1086, 1095, 1099, 1106, 1113, 1120, 1127, 1138, 1146, 1154, 1162, 1170, 1178,
1186, 1195, 1200, 1205, 1211, 1220, 1233, 1244, 1257, 1270, 1282, 1286, 1291,
1298, 1305, 1313, 1322, 1333, 1342, 1354, 1366, 1376, 1388, 1398, 1408, 1418,
1427, 1440, 1449, 1457, 1469, 1479, 1483, 1490, 1500, 1506, 1510, 1514, 1518,
1526, 1530, 1537, 1542, 1546, 1549
};
// @EnumStringEnd@
return sb.append(sFeatureString + sFeatureIndex[Support::min<uint32_t>(featureId, uint32_t(CpuFeatures::ARM::kMaxValue) + 1)]);
}
// arm::FormatterInternal - Format Constants
// =========================================
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatCondCode(String& sb, CondCode cc) noexcept {
static const char condCodeData[] =
"al\0" "na\0"
"eq\0" "ne\0"
"hs\0" "lo\0" "mi\0" "pl\0" "vs\0" "vc\0"
"hi\0" "ls\0" "ge\0" "lt\0" "gt\0" "le\0"
"<Unknown>";
return sb.append(condCodeData + Support::min<uint32_t>(uint32_t(cc), 16u) * 3);
}
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatShiftOp(String& sb, ShiftOp shiftOp) noexcept {
const char* str = nullptr;
switch (shiftOp) {
case ShiftOp::kLSL: str = "lsl"; break;
case ShiftOp::kLSR: str = "lsr"; break;
case ShiftOp::kASR: str = "asr"; break;
case ShiftOp::kROR: str = "ror"; break;
case ShiftOp::kRRX: str = "rrx"; break;
case ShiftOp::kMSL: str = "msl"; break;
case ShiftOp::kUXTB: str = "uxtb"; break;
case ShiftOp::kUXTH: str = "uxth"; break;
case ShiftOp::kUXTW: str = "uxtw"; break;
case ShiftOp::kUXTX: str = "uxtx"; break;
case ShiftOp::kSXTB: str = "sxtb"; break;
case ShiftOp::kSXTH: str = "sxth"; break;
case ShiftOp::kSXTW: str = "sxtw"; break;
case ShiftOp::kSXTX: str = "sxtx"; break;
default: str = "<Unknown>"; break;
}
return sb.append(str);
}
// arm::FormatterInternal - Format Register
// ========================================
struct FormatElementData {
char letter;
uint8_t elementCount;
uint8_t onlyIndex;
uint8_t reserved;
};
static constexpr FormatElementData formatElementDataTable[9] = {
{ '?' , 0 , 0, 0 }, // None
{ 'b' , 16, 0, 0 }, // bX or b[index]
{ 'h' , 8 , 0, 0 }, // hX or h[index]
{ 's' , 4 , 0, 0 }, // sX or s[index]
{ 'd' , 2 , 0, 0 }, // dX or d[index]
{ 'b' , 4 , 1, 0 }, // ?? or b4[index]
{ 'h' , 2 , 1, 0 }, // ?? or h2[index]
{ '?' , 0 , 0, 0 }, // invalid (possibly stored in Operand)
{ '?' , 0 , 0, 0 } // invalid (never stored in Operand, bug...)
};
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegister(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
RegType regType,
uint32_t rId,
uint32_t elementType,
uint32_t elementIndex) noexcept {
DebugUtils::unused(flags);
DebugUtils::unused(arch);
static const char bhsdq[] = "bhsdq";
bool virtRegFormatted = false;
#ifndef ASMJIT_NO_COMPILER
if (Operand::isVirtId(rId)) {
if (emitter && emitter->isCompiler()) {
const BaseCompiler* cc = static_cast<const BaseCompiler*>(emitter);
if (cc->isVirtIdValid(rId)) {
VirtReg* vReg = cc->virtRegById(rId);
ASMJIT_ASSERT(vReg != nullptr);
ASMJIT_PROPAGATE(Formatter::formatVirtRegName(sb, vReg));
virtRegFormatted = true;
}
}
}
#else
DebugUtils::unused(emitter, flags);
#endif
if (!virtRegFormatted) {
char letter = '\0';
switch (regType) {
case RegType::kVec8:
case RegType::kVec16:
case RegType::kVec32:
case RegType::kVec64:
case RegType::kVec128:
letter = bhsdq[uint32_t(regType) - uint32_t(RegType::kVec8)];
if (elementType) {
letter = 'v';
}
break;
case RegType::kGp32:
if (Environment::is64Bit(arch)) {
letter = 'w';
if (rId == a64::Gp::kIdZr) {
return sb.append("wzr", 3);
}
if (rId == a64::Gp::kIdSp) {
return sb.append("wsp", 3);
}
}
else {
letter = 'r';
}
break;
case RegType::kGp64:
if (Environment::is64Bit(arch)) {
if (rId == a64::Gp::kIdZr) {
return sb.append("xzr", 3);
}
if (rId == a64::Gp::kIdSp) {
return sb.append("sp", 2);
}
letter = 'x';
break;
}
// X registers are undefined in 32-bit mode.
[[fallthrough]];
default:
ASMJIT_PROPAGATE(sb.appendFormat("<Reg-%u>?%u", uint32_t(regType), rId));
break;
}
if (letter)
ASMJIT_PROPAGATE(sb.appendFormat("%c%u", letter, rId));
}
constexpr uint32_t kElementTypeCount = uint32_t(a64::VecElementType::kMaxValue) + 1;
if (elementType) {
elementType = Support::min(elementType, kElementTypeCount);
FormatElementData elementData = formatElementDataTable[elementType];
uint32_t elementCount = elementData.elementCount;
if (regType == RegType::kVec64) {
elementCount /= 2u;
}
ASMJIT_PROPAGATE(sb.append('.'));
if (elementCount) {
ASMJIT_PROPAGATE(sb.appendUInt(elementCount));
}
ASMJIT_PROPAGATE(sb.append(elementData.letter));
}
if (elementIndex != 0xFFFFFFFFu) {
ASMJIT_PROPAGATE(sb.appendFormat("[%u]", elementIndex));
}
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegisterList(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
RegType regType,
uint32_t rMask) noexcept {
bool first = true;
ASMJIT_PROPAGATE(sb.append('{'));
while (rMask != 0u) {
uint32_t start = Support::ctz(rMask);
uint32_t count = 0u;
uint32_t mask = 1u << start;
do {
rMask &= ~mask;
mask <<= 1u;
count++;
} while (rMask & mask);
if (!first) {
ASMJIT_PROPAGATE(sb.append(", "));
}
ASMJIT_PROPAGATE(formatRegister(sb, flags, emitter, arch, regType, start, 0, 0xFFFFFFFFu));
if (count >= 2u) {
ASMJIT_PROPAGATE(sb.append('-'));
ASMJIT_PROPAGATE(formatRegister(sb, flags, emitter, arch, regType, start + count - 1, 0, 0xFFFFFFFFu));
}
first = false;
}
ASMJIT_PROPAGATE(sb.append('}'));
return kErrorOk;
}
// a64::FormatterInternal - Format Operand
// =======================================
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatOperand(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
const Operand_& op) noexcept {
if (op.isReg()) {
const Reg& reg = op.as<Reg>();
uint32_t elementType = op._signature.getField<a64::Vec::kSignatureRegElementTypeMask>();
uint32_t elementIndex = op.as<a64::Vec>().elementIndex();
if (!op.as<a64::Vec>().hasElementIndex()) {
elementIndex = 0xFFFFFFFFu;
}
return formatRegister(sb, flags, emitter, arch, reg.regType(), reg.id(), elementType, elementIndex);
}
if (op.isMem()) {
const a64::Mem& m = op.as<a64::Mem>();
ASMJIT_PROPAGATE(sb.append('['));
if (m.hasBase()) {
if (m.hasBaseLabel()) {
ASMJIT_PROPAGATE(Formatter::formatLabel(sb, flags, emitter, m.baseId()));
}
else {
FormatFlags modifiedFlags = flags;
if (m.isRegHome()) {
ASMJIT_PROPAGATE(sb.append('&'));
modifiedFlags &= ~FormatFlags::kRegCasts;
}
ASMJIT_PROPAGATE(formatRegister(sb, modifiedFlags, emitter, arch, m.baseType(), m.baseId()));
}
}
else {
// ARM really requires base.
if (m.hasIndex() || m.hasOffset()) {
ASMJIT_PROPAGATE(sb.append("<None>"));
}
}
// The post index makes it look like there was another operand, but it's
// still the part of AsmJit's `arm::Mem` operand so it's consistent with
// other architectures.
if (m.isPostIndex())
ASMJIT_PROPAGATE(sb.append(']'));
if (m.hasIndex()) {
ASMJIT_PROPAGATE(sb.append(", "));
ASMJIT_PROPAGATE(formatRegister(sb, flags, emitter, arch, m.indexType(), m.indexId()));
}
if (m.hasOffset()) {
ASMJIT_PROPAGATE(sb.append(", "));
int64_t off = int64_t(m.offset());
uint32_t base = 10;
if (Support::test(flags, FormatFlags::kHexOffsets) && uint64_t(off) > 9) {
base = 16;
}
if (base == 10) {
ASMJIT_PROPAGATE(sb.appendInt(off, base));
}
else {
ASMJIT_PROPAGATE(sb.append("0x"));
ASMJIT_PROPAGATE(sb.appendUInt(uint64_t(off), base));
}
}
if (m.hasShift()) {
ASMJIT_PROPAGATE(sb.append(' '));
if (!m.isPreOrPost()) {
ASMJIT_PROPAGATE(formatShiftOp(sb, m.shiftOp()));
}
ASMJIT_PROPAGATE(sb.appendFormat(" %u", m.shift()));
}
if (!m.isPostIndex()) {
ASMJIT_PROPAGATE(sb.append(']'));
}
if (m.isPreIndex()) {
ASMJIT_PROPAGATE(sb.append('!'));
}
return kErrorOk;
}
if (op.isImm()) {
const Imm& i = op.as<Imm>();
int64_t val = i.value();
uint32_t predicate = i.predicate();
if (predicate) {
ASMJIT_PROPAGATE(formatShiftOp(sb, ShiftOp(predicate)));
ASMJIT_PROPAGATE(sb.append(' '));
}
if (Support::test(flags, FormatFlags::kHexImms) && uint64_t(val) > 9) {
ASMJIT_PROPAGATE(sb.append("0x"));
return sb.appendUInt(uint64_t(val), 16);
}
else {
return sb.appendInt(val, 10);
}
}
if (op.isLabel()) {
return Formatter::formatLabel(sb, flags, emitter, op.id());
}
if (op.isRegList()) {
const BaseRegList& regList = op.as<BaseRegList>();
return formatRegisterList(sb, flags, emitter, arch, regList.regType(), regList.list());
}
return sb.append("<None>");
}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_LOGGING

View File

@@ -1,69 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_ARMFORMATTER_P_H_INCLUDED
#define ASMJIT_ARM_ARMFORMATTER_P_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_LOGGING
#include "../core/formatter.h"
#include "../core/string.h"
#include "../arm/armglobals.h"
ASMJIT_BEGIN_SUB_NAMESPACE(arm)
//! \cond INTERNAL
//! \addtogroup asmjit_arm
//! \{
namespace FormatterInternal {
Error ASMJIT_CDECL formatFeature(
String& sb,
uint32_t featureId) noexcept;
Error ASMJIT_CDECL formatCondCode(
String& sb,
CondCode cc) noexcept;
Error ASMJIT_CDECL formatShiftOp(
String& sb,
ShiftOp shiftOp) noexcept;
Error ASMJIT_CDECL formatRegister(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
RegType regType,
uint32_t rId,
uint32_t elementType = 0,
uint32_t elementIndex = 0xFFFFFFFF) noexcept;
Error ASMJIT_CDECL formatRegisterList(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
RegType regType,
uint32_t rMask) noexcept;
Error ASMJIT_CDECL formatOperand(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
const Operand_& op) noexcept;
} // {FormatterInternal}
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_LOGGING
#endif // ASMJIT_ARM_ARMFORMATTER_P_H_INCLUDED

View File

@@ -1,17 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_ARMGLOBALS_H_INCLUDED
#define ASMJIT_ARM_ARMGLOBALS_H_INCLUDED
#include "../core/archcommons.h"
#include "../core/inst.h"
//! \namespace asmjit::arm
//! \ingroup asmjit_arm
//!
//! API shared between AArch32 & AArch64 backends.
#endif // ASMJIT_ARM_ARMGLOBALS_H_INCLUDED

View File

@@ -1,226 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_ARMUTILS_H_INCLUDED
#define ASMJIT_ARM_ARMUTILS_H_INCLUDED
#include "../core/support.h"
#include "../arm/armglobals.h"
ASMJIT_BEGIN_SUB_NAMESPACE(arm)
//! \addtogroup asmjit_arm
//! \{
//! Public utilities and helpers for targeting AArch32 and AArch64 architectures.
namespace Utils {
//! Encodes a 12-bit immediate part of opcode that ise used by a standard 32-bit ARM encoding.
[[maybe_unused]]
static inline bool encodeAArch32Imm(uint64_t imm, uint32_t* encodedImmOut) noexcept {
if (imm & 0xFFFFFFFF00000000u)
return false;
uint32_t v = uint32_t(imm);
uint32_t r = 0;
if (v <= 0xFFu) {
*encodedImmOut = v;
return true;
}
// Rotate if there are bits on both ends (LSB and MSB)
// (otherwise we would not be able to calculate the rotation with ctz).
if (v & 0xFF0000FFu) {
v = Support::ror(v, 16);
r = 16u;
}
uint32_t n = Support::ctz(v) & ~0x1u;
r = (r - n) & 0x1Eu;
v = Support::ror(v, n);
if (v > 0xFFu)
return false;
*encodedImmOut = v | (r << 7);
return true;
}
//! Decomposed fields of a logical immediate value.
struct LogicalImm {
uint32_t n;
uint32_t s;
uint32_t r;
};
//! Encodes the given `imm` value of the given `width` to a logical immediate value represented as N, S, and R fields
//! and writes these fields to `out`.
//!
//! Encoding Table:
//!
//! ```
//! +---+--------+--------+------+
//! | N | ImmS | ImmR | Size |
//! +---+--------+--------+------+
//! | 1 | ssssss | rrrrrr | 64 |
//! | 0 | 0sssss | .rrrrr | 32 |
//! | 0 | 10ssss | ..rrrr | 16 |
//! | 0 | 110sss | ...rrr | 8 |
//! | 0 | 1110ss | ....rr | 4 |
//! | 0 | 11110s | .....r | 2 |
//! +---+--------+--------+------+
//! ```
[[maybe_unused]]
static bool encodeLogicalImm(uint64_t imm, uint32_t width, LogicalImm* out) noexcept {
// Determine the element width, which must be 2, 4, 8, 16, 32, or 64 bits.
do {
width /= 2;
uint64_t mask = (uint64_t(1) << width) - 1u;
if ((imm & mask) != ((imm >> width) & mask)) {
width *= 2;
break;
}
} while (width > 2);
// Patterns of all zeros and all ones are not encodable.
uint64_t lsbMask = Support::lsbMask<uint64_t>(width);
imm &= lsbMask;
if (imm == 0 || imm == lsbMask)
return false;
// Inspect the pattern and get the most important bit indexes.
//
// oIndex <-+ +-> zIndex
// | |
// |..zeros..|oCount|zCount|..ones..|
// |000000000|111111|000000|11111111|
uint32_t zIndex = Support::ctz(~imm);
uint64_t zImm = imm ^ ((uint64_t(1) << zIndex) - 1);
uint32_t zCount = (zImm ? Support::ctz(zImm) : width) - zIndex;
uint32_t oIndex = zIndex + zCount;
uint64_t oImm = ~(zImm ^ Support::lsbMask<uint64_t>(oIndex));
uint32_t oCount = (oImm ? Support::ctz(oImm) : width) - (oIndex);
// Verify whether the bit-pattern is encodable.
uint64_t mustBeZero = oImm ^ ~Support::lsbMask<uint64_t>(oIndex + oCount);
if (mustBeZero != 0 || (zIndex > 0 && width - (oIndex + oCount) != 0))
return false;
out->n = width == 64;
out->s = (oCount + zIndex - 1) | (Support::neg(width * 2) & 0x3F);
out->r = width - oIndex;
return true;
}
//! Returns true if the given `imm` value is encodable as a logical immediate. The `width` argument describes the
//! width of the operation, and must be either 32 or 64. This function can be used to test whether an immediate
//! value can be used with AND, ANDS, BIC, BICS, EON, EOR, ORN, and ORR instruction.
[[maybe_unused]]
static ASMJIT_INLINE_NODEBUG bool isLogicalImm(uint64_t imm, uint32_t width) noexcept {
LogicalImm dummy;
return encodeLogicalImm(imm, width, &dummy);
}
//! Returns true if the given `imm` value is encodable as an immediate with `add` and `sub` instructions on AArch64.
//! These two instructions can encode 12-bit immediate value optionally shifted left by 12 bits.
[[maybe_unused]]
static ASMJIT_INLINE_NODEBUG bool isAddSubImm(uint64_t imm) noexcept {
return imm <= 0xFFFu || (imm & ~uint64_t(0xFFFu << 12)) == 0;
}
//! Returns true if the given `imm` value is a byte mask. Byte mask has each byte part of the value set to either
//! 0x00 or 0xFF. Some ARM instructions accept immediates that form a byte-mask and this function can be used to
//! verify that the immediate is encodable before using the value.
template<typename T>
static ASMJIT_INLINE_NODEBUG bool isByteMaskImm8(const T& imm) noexcept {
constexpr T kMask = T(0x0101010101010101 & Support::allOnes<T>());
return imm == (imm & kMask) * T(255);
}
// [.......A|B.......|.......C|D.......|.......E|F.......|.......G|H.......]
static ASMJIT_INLINE_NODEBUG uint32_t encodeImm64ByteMaskToImm8(uint64_t imm) noexcept {
return uint32_t(((imm >> (7 - 0)) & 0b00000011) | // [.......G|H.......]
((imm >> (23 - 2)) & 0b00001100) | // [.......E|F.......]
((imm >> (39 - 4)) & 0b00110000) | // [.......C|D.......]
((imm >> (55 - 6)) & 0b11000000)); // [.......A|B.......]
}
//! \cond
//! A generic implementation that checjs whether a floating point value can be converted to ARM Imm8.
template<typename T, uint32_t kNumBBits, uint32_t kNumCDEFGHBits, uint32_t kNumZeroBits>
static ASMJIT_INLINE bool isFPImm8Generic(T val) noexcept {
constexpr uint32_t kAllBsMask = Support::lsbMask<uint32_t>(kNumBBits);
constexpr uint32_t kB0Pattern = Support::bitMask(kNumBBits - 1);
constexpr uint32_t kB1Pattern = kAllBsMask ^ kB0Pattern;
T immZ = val & Support::lsbMask<T>(kNumZeroBits);
uint32_t immB = uint32_t(val >> (kNumZeroBits + kNumCDEFGHBits)) & kAllBsMask;
// ImmZ must be all zeros and ImmB must either be B0 or B1 pattern.
return immZ == 0 && (immB == kB0Pattern || immB == kB1Pattern);
}
//! \endcond
//! Returns true if the given half precision floating point `val` can be encoded as ARM IMM8 value, which represents
//! a limited set of floating point immediate values, which can be used with FMOV instruction.
//!
//! The floating point must have bits distributed in the following way:
//!
//! ```
//! [aBbbcdef|gh000000]
//! ```
static ASMJIT_INLINE_NODEBUG bool isFP16Imm8(uint32_t val) noexcept { return isFPImm8Generic<uint32_t, 3, 6, 6>(val); }
//! Returns true if the given single precision floating point `val` can be encoded as ARM IMM8 value, which represents
//! a limited set of floating point immediate values, which can be used with FMOV instruction.
//!
//! The floating point must have bits distributed in the following way:
//!
//! ```
//! [aBbbbbbc|defgh000|00000000|00000000]
//! ```
static ASMJIT_INLINE_NODEBUG bool isFP32Imm8(uint32_t val) noexcept { return isFPImm8Generic<uint32_t, 6, 6, 19>(val); }
//! \overload
static ASMJIT_INLINE_NODEBUG bool isFP32Imm8(float val) noexcept { return isFP32Imm8(Support::bitCast<uint32_t>(val)); }
//! Returns true if the given double precision floating point `val` can be encoded as ARM IMM8 value, which represents
//! a limited set of floating point immediate values, which can be used with FMOV instruction.
//!
//! The floating point must have bits distributed in the following way:
//!
//! ```
//! [aBbbbbbb|bbcdefgh|00000000|00000000|00000000|00000000|00000000|00000000]
//! ```
static ASMJIT_INLINE_NODEBUG bool isFP64Imm8(uint64_t val) noexcept { return isFPImm8Generic<uint64_t, 9, 6, 48>(val); }
//! \overload
static ASMJIT_INLINE_NODEBUG bool isFP64Imm8(double val) noexcept { return isFP64Imm8(Support::bitCast<uint64_t>(val)); }
//! \cond
template<typename T, uint32_t kNumBBits, uint32_t kNumCDEFGHBits, uint32_t kNumZeroBits>
static ASMJIT_INLINE_NODEBUG uint32_t encodeFPToImm8Generic(T val) noexcept {
uint32_t bits = uint32_t(val >> kNumZeroBits);
return ((bits >> (kNumBBits + kNumCDEFGHBits - 7)) & 0x80u) | (bits & 0x7F);
}
//! \endcond
//! Encodes a double precision floating point value into IMM8 format.
//!
//! \note This function expects that `isFP64Imm8(val) == true` so it doesn't perform any checks of the value and just
//! rearranges some bits into Imm8 order.
static ASMJIT_INLINE_NODEBUG uint32_t encodeFP64ToImm8(uint64_t val) noexcept { return encodeFPToImm8Generic<uint64_t, 9, 6, 48>(val); }
//! \overload
static ASMJIT_INLINE_NODEBUG uint32_t encodeFP64ToImm8(double val) noexcept { return encodeFP64ToImm8(Support::bitCast<uint64_t>(val)); }
} // {Utils}
//! \}
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_ARMUTILS_H_INCLUDED

View File

@@ -1,17 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifdef _WIN32
#pragma push_macro("min")
#pragma push_macro("max")
#ifdef min
#undef min
#endif
#ifdef max
#undef max
#endif
#endif

View File

@@ -1,9 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifdef _WIN32
#pragma pop_macro("min")
#pragma pop_macro("max")
#endif

View File

@@ -1,35 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// SPDX-License-Identifier: Zlib
// Official GitHub Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2024 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_ASMJIT_H_INCLUDED
#define ASMJIT_ASMJIT_H_INCLUDED
#pragma message("asmjit/asmjit.h is deprecated! Please use asmjit/[core|x86|a64|host].h instead.")
#include "./core.h"
#ifndef ASMJIT_NO_X86
#include "./x86.h"
#endif
#endif // ASMJIT_ASMJIT_H_INCLUDED

File diff suppressed because it is too large Load Diff

View File

@@ -1,74 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_API_BUILD_P_H_INCLUDED
#define ASMJIT_CORE_API_BUILD_P_H_INCLUDED
#define ASMJIT_EXPORTS
// Only turn-off these warnings when building asmjit itself.
#ifdef _MSC_VER
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE
#endif
#ifndef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS
#endif
#endif
// Dependencies only required for asmjit build, but never exposed through public headers.
#ifdef _WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h>
#else
// Most production code is compiled with large file support, so do the same.
#if !defined(_WIN32) && !defined(_LARGEFILE64_SOURCE)
#define _LARGEFILE64_SOURCE 1
#endif
// These OSes use 64-bit API by default.
#if defined(__APPLE__ ) || \
defined(__HAIKU__ ) || \
defined(__bsdi__ ) || \
defined(__DragonFly__) || \
defined(__FreeBSD__ ) || \
defined(__NetBSD__ ) || \
defined(__OpenBSD__ )
#define ASMJIT_FILE64_API(NAME) NAME
#else
#define ASMJIT_FILE64_API(NAME) NAME##64
#endif
#endif
#include "./api-config.h"
#if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__) && !defined(__clang__)
#define ASMJIT_FAVOR_SIZE __attribute__((__optimize__("Os")))
#define ASMJIT_FAVOR_SPEED __attribute__((__optimize__("O3")))
#elif ASMJIT_CXX_HAS_ATTRIBUTE(__minsize__, 0)
#define ASMJIT_FAVOR_SIZE __attribute__((__minsize__))
#define ASMJIT_FAVOR_SPEED
#else
#define ASMJIT_FAVOR_SIZE
#define ASMJIT_FAVOR_SPEED
#endif
// Make sure '#ifdef'ed unit tests are properly highlighted in IDE.
#if !defined(ASMJIT_TEST) && defined(__INTELLISENSE__)
#define ASMJIT_TEST
#endif
// Include a unit testing package if this is a `asmjit_test_unit` build.
#if defined(ASMJIT_TEST)
#include "../../../test/broken.h"
#endif
#endif // ASMJIT_CORE_API_BUILD_P_H_INCLUDED

View File

@@ -1,697 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_API_CONFIG_H_INCLUDED
#define ASMJIT_CORE_API_CONFIG_H_INCLUDED
// AsmJit Library & ABI Version
// ============================
//! \addtogroup asmjit_core
//! \{
//! Makes a 32-bit integer that represents AsmJit version in `(major << 16) | (minor << 8) | patch` form.
#define ASMJIT_LIBRARY_MAKE_VERSION(major, minor, patch) ((major << 16) | (minor << 8) | (patch))
//! AsmJit library version, see \ref ASMJIT_LIBRARY_MAKE_VERSION for a version format reference.
#define ASMJIT_LIBRARY_VERSION ASMJIT_LIBRARY_MAKE_VERSION(1, 17, 0)
//! \def ASMJIT_ABI_NAMESPACE
//!
//! AsmJit ABI namespace is an inline namespace within \ref asmjit namespace.
//!
//! It's used to make sure that when user links to an incompatible version of AsmJit, it won't link. It has also
//! some additional properties as well. When `ASMJIT_ABI_NAMESPACE` is defined by the user it would override the
//! AsmJit default, which makes it possible to use multiple AsmJit libraries within a single project, totally
//! controlled by users. This is useful especially in cases in which some of such library comes from third party.
#if !defined(ASMJIT_ABI_NAMESPACE)
#define ASMJIT_ABI_NAMESPACE v1_17
#endif // !ASMJIT_ABI_NAMESPACE
//! \}
// Global Dependencies
// ===================
#include <stdarg.h>
#include <stddef.h>
#include <stdint.h> // We really want std types as globals, not under 'std' namespace.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <initializer_list>
#include <limits>
#include <type_traits>
#include <utility>
#if !defined(_WIN32) && !defined(__EMSCRIPTEN__)
#include <pthread.h>
#endif
// Build Options
// =============
// NOTE: Doxygen cannot document macros that are not defined, that's why we have to define them and then undefine
// them immediately, so it won't use the macros with its own preprocessor.
#ifdef _DOXYGEN
namespace asmjit {
//! \addtogroup asmjit_build
//! \{
//! Asmjit is embedded, implies \ref ASMJIT_STATIC.
#define ASMJIT_EMBED
//! Enables static-library build.
#define ASMJIT_STATIC
//! Defined when AsmJit's build configuration is 'Debug'.
//!
//! \note Can be defined explicitly to bypass auto-detection.
#define ASMJIT_BUILD_DEBUG
//! Defined when AsmJit's build configuration is 'Release'.
//!
//! \note Can be defined explicitly to bypass auto-detection.
#define ASMJIT_BUILD_RELEASE
//! Disables deprecated API at compile time (deprecated API won't be available).
#define ASMJIT_NO_DEPRECATED
//! Disables the use of an inline ABI namespace within asmjit namespace (the inline namespace is used as an ABI tag).
#define ASMJIT_NO_ABI_NAMESPACE
//! Disables X86/X64 backends.
#define ASMJIT_NO_X86
//! Disables AArch64 backend.
#define ASMJIT_NO_AARCH64
//! Disables the use of `shm_open` on all targets even when it's supported.
#define ASMJIT_NO_SHM_OPEN
//! Disables JIT memory management and \ref asmjit::JitRuntime.
#define ASMJIT_NO_JIT
//! Disables \ref asmjit::Logger and \ref asmjit::Formatter.
#define ASMJIT_NO_LOGGING
//! Disables everything that contains text.
#define ASMJIT_NO_TEXT
//! Disables instruction validation API.
#define ASMJIT_NO_VALIDATION
//! Disables instruction introspection API.
#define ASMJIT_NO_INTROSPECTION
//! Disables non-host backends entirely (useful for JIT compilers to minimize the library size).
#define ASMJIT_NO_FOREIGN
//! Disables \ref asmjit_builder functionality completely.
#define ASMJIT_NO_BUILDER
//! Disables \ref asmjit_compiler functionality completely.
#define ASMJIT_NO_COMPILER
// Avoid doxygen preprocessor using feature-selection definitions.
#undef ASMJIT_BUILD_EMBED
#undef ASMJIT_BUILD_STATIC
#undef ASMJIT_BUILD_DEBUG
#undef ASMJIT_BUILD_RELEASE
// (keep ASMJIT_NO_DEPRECATED defined, we don't document deprecated APIs).
#undef ASMJIT_NO_ABI_NAMESPACE
#undef ASMJIT_NO_X86
#undef ASMJIT_NO_AARCH64
#undef ASMJIT_NO_FOREIGN
#undef ASMJIT_NO_JIT
#undef ASMJIT_NO_LOGGING
#undef ASMJIT_NO_TEXT
#undef ASMJIT_NO_VALIDATION
#undef ASMJIT_NO_INTROSPECTION
#undef ASMJIT_NO_BUILDER
#undef ASMJIT_NO_COMPILER
#undef ASMJIT_NO_UJIT
//! \}
} // {asmjit}
#endif // _DOXYGEN
// ASMJIT_NO_BUILDER implies ASMJIT_NO_COMPILER.
#if defined(ASMJIT_NO_BUILDER) && !defined(ASMJIT_NO_COMPILER)
#define ASMJIT_NO_COMPILER
#endif
// ASMJIT_NO_COMPILER implies ASMJIT_NO_UJIT.
#if defined(ASMJIT_NO_COMPILER) && !defined(ASMJIT_NO_UJIT)
#define ASMJIT_NO_UJIT
#endif
// Prevent compile-time errors caused by misconfiguration.
#if defined(ASMJIT_NO_TEXT) && !defined(ASMJIT_NO_LOGGING)
#pragma message("'ASMJIT_NO_TEXT' can only be defined when 'ASMJIT_NO_LOGGING' is defined.")
#undef ASMJIT_NO_TEXT
#endif
#if defined(ASMJIT_NO_INTROSPECTION) && !defined(ASMJIT_NO_COMPILER)
#pragma message("'ASMJIT_NO_INTROSPECTION' can only be defined when 'ASMJIT_NO_COMPILER' is defined")
#undef ASMJIT_NO_INTROSPECTION
#endif
// Build Mode
// ==========
// Detect ASMJIT_BUILD_DEBUG and ASMJIT_BUILD_RELEASE if not defined.
#if !defined(ASMJIT_BUILD_DEBUG) && !defined(ASMJIT_BUILD_RELEASE)
#if !defined(NDEBUG)
#define ASMJIT_BUILD_DEBUG
#else
#define ASMJIT_BUILD_RELEASE
#endif
#endif
// Target Architecture Detection
// =============================
//! \addtogroup asmjit_core
//! \{
//! \def ASMJIT_ARCH_X86
//!
//! Defined to either 0, 32, or 64 depending on whether the target CPU is X86 (32) or X86_64 (64).
//! \def ASMJIT_ARCH_ARM
//!
//! Defined to either 0, 32, or 64 depending on whether the target CPU is ARM (32) or AArch64 (64).
//! \def ASMJIT_ARCH_MIPS
//!
//! Defined to either 0, 32, or 64 depending on whether the target CPU is MIPS (32) or MISP64 (64).
//! \def ASMJIT_ARCH_RISCV
//!
//! Defined to either 0, 32, or 64 depending on whether the target CPU is RV32 (32) or RV64 (64).
//! \def ASMJIT_ARCH_BITS
//!
//! Defined to either 32 or 64 depending on the target.
//! \def ASMJIT_ARCH_LE
//!
//! Defined to 1 if the target architecture is little endian.
//! \def ASMJIT_ARCH_BE
//!
//! Defined to 1 if the target architecture is big endian.
//! \def ASMJIT_HAS_HOST_BACKEND
//!
//! Defined when AsmJit is built with the target architecture backend.
//!
//! For example if AsmJit is building for x86 or x86_64 architectures and `ASMJIT_NO_X86` is not defined,
//! it would define `ASMJIT_HAS_HOST_BACKEND` when `<asmjit/code.h>` or ``<asmjit/host.h>` is included.
//! \}
//! \cond NONE
#if defined(_M_X64) || defined(__x86_64__)
#define ASMJIT_ARCH_X86 64
#elif defined(_M_IX86) || defined(__X86__) || defined(__i386__)
#define ASMJIT_ARCH_X86 32
#else
#define ASMJIT_ARCH_X86 0
#endif
#if defined(_M_ARM64) || defined(__arm64__) || defined(__aarch64__)
# define ASMJIT_ARCH_ARM 64
#elif defined(_M_ARM) || defined(_M_ARMT) || defined(__arm__) || defined(__thumb__) || defined(__thumb2__)
#define ASMJIT_ARCH_ARM 32
#else
#define ASMJIT_ARCH_ARM 0
#endif
#if defined(_MIPS_ARCH_MIPS64) || defined(__mips64)
#define ASMJIT_ARCH_MIPS 64
#elif defined(_MIPS_ARCH_MIPS32) || defined(_M_MRX000) || defined(__mips__)
#define ASMJIT_ARCH_MIPS 32
#else
#define ASMJIT_ARCH_MIPS 0
#endif
// NOTE `__riscv` is the correct macro in this case as specified by "RISC-V Toolchain Conventions".
#if (defined(__riscv) || defined(__riscv__)) && defined(__riscv_xlen)
#define ASMJIT_ARCH_RISCV __riscv_xlen
#else
#define ASMJIT_ARCH_RISCV 0
#endif
#define ASMJIT_ARCH_BITS (ASMJIT_ARCH_X86 | ASMJIT_ARCH_ARM | ASMJIT_ARCH_MIPS | ASMJIT_ARCH_RISCV)
#if ASMJIT_ARCH_BITS == 0
#undef ASMJIT_ARCH_BITS
#if defined(__LP64__) || defined(_LP64)
#define ASMJIT_ARCH_BITS 64
#else
#define ASMJIT_ARCH_BITS 32
#endif
#endif
#if (defined(__ARMEB__)) || \
(defined(__MIPSEB__)) || \
(defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
#define ASMJIT_ARCH_LE 0
#define ASMJIT_ARCH_BE 1
#else
#define ASMJIT_ARCH_LE 1
#define ASMJIT_ARCH_BE 0
#endif
#if defined(ASMJIT_NO_FOREIGN)
#if !ASMJIT_ARCH_X86 && !defined(ASMJIT_NO_X86)
#define ASMJIT_NO_X86
#endif
#if ASMJIT_ARCH_ARM != 64 && !defined(ASMJIT_NO_AARCH64)
#define ASMJIT_NO_AARCH64
#endif
#endif
#if ASMJIT_ARCH_X86 != 0 && !defined(ASMJIT_NO_X86)
#define ASMJIT_HAS_HOST_BACKEND
#endif
#if ASMJIT_ARCH_ARM == 64 && !defined(ASMJIT_NO_AARCH64)
#define ASMJIT_HAS_HOST_BACKEND
#endif
#if !defined(ASMJIT_NO_UJIT)
#if !defined(ASMJIT_NO_X86) && ASMJIT_ARCH_X86 != 0
#define ASMJIT_UJIT_X86
#elif !defined(ASMJIT_NO_AARCH64) && ASMJIT_ARCH_ARM == 64
#define ASMJIT_UJIT_AARCH64
#else
#define ASMJIT_NO_UJIT
#endif
#endif
//! \endcond
// C++ Compiler and Features Detection
// ===================================
#if defined(__GNUC__) && defined(__has_attribute)
#define ASMJIT_CXX_HAS_ATTRIBUTE(NAME, CHECK) (__has_attribute(NAME))
#else
#define ASMJIT_CXX_HAS_ATTRIBUTE(NAME, CHECK) (!(!(CHECK)))
#endif // !ASMJIT_CXX_HAS_ATTRIBUTE
// API Decorators & C++ Extensions
// ===============================
//! \addtogroup asmjit_core
//! \{
//! \def ASMJIT_API
//!
//! A decorator that is used to decorate API that AsmJit exports when built as a shared library.
//! \def ASMJIT_VIRTAPI
//!
//! This is basically a workaround. When using MSVC and marking class as DLL export everything gets exported, which
//! is unwanted in most projects. MSVC automatically exports typeinfo and vtable if at least one symbol of the class
//! is exported. However, GCC has some strange behavior that even if one or more symbol is exported it doesn't export
//! typeinfo unless the class itself is decorated with "visibility(default)" (i.e. ASMJIT_API).
//! \def ASMJIT_INLINE
//!
//! Decorator to force inlining of functions, uses either `__attribute__((__always_inline__))` or __forceinline,
//! depending on C++ compiler.
//! \def ASMJIT_INLINE_NODEBUG
//!
//! Like \ref ASMJIT_INLINE, but uses additionally `__nodebug__` or `__artificial__` attribute to make the
//! debugging of some AsmJit functions easier, especially getters and one-line abstractions where usually you don't
//! want to step in.
//! \def ASMJIT_INLINE_CONSTEXPR
//!
//! Like \ref ASMJIT_INLINE_NODEBUG, but having an additional `constexpr` attribute.
//! \def ASMJIT_NOINLINE
//!
//! Decorator to avoid inlining of functions, uses either `__attribute__((__noinline__))` or `__declspec(noinline)`
//! depending on C++ compiler.
//! \def ASMJIT_CDECL
//!
//! CDECL function attribute - either `__attribute__((__cdecl__))` or `__cdecl`.
//! \def ASMJIT_STDCALL
//!
//! STDCALL function attribute - either `__attribute__((__stdcall__))` or `__stdcall`.
//!
//! \note This expands to nothing on non-x86 targets as STDCALL is X86 specific.
//! \def ASMJIT_FASTCALL
//!
//! FASTCALL function attribute - either `__attribute__((__fastcall__))` or `__fastcall`.
//!
//! \note Expands to nothing on non-x86 targets as FASTCALL is X86 specific.
//! \def ASMJIT_REGPARM(N)
//!
//! Expands to `__attribute__((__regparm__(N)))` when compiled by GCC or clang, nothing otherwise.
//! \def ASMJIT_VECTORCALL
//!
//! VECTORCALL function attribute - either `__attribute__((__vectorcall__))` or `__vectorcall`.
//!
//! \note Expands to nothing on non-x86 targets as VECTORCALL is X86 specific.
//! \}
// API (Export / Import).
#if !defined(ASMJIT_STATIC)
#if defined(_WIN32) && (defined(_MSC_VER) || defined(__MINGW32__))
#ifdef ASMJIT_EXPORTS
#define ASMJIT_API __declspec(dllexport)
#else
#define ASMJIT_API __declspec(dllimport)
#endif
#elif defined(_WIN32) && defined(__GNUC__)
#ifdef ASMJIT_EXPORTS
#define ASMJIT_API __attribute__((__dllexport__))
#else
#define ASMJIT_API __attribute__((__dllimport__))
#endif
#elif defined(__GNUC__)
#define ASMJIT_API __attribute__((__visibility__("default")))
#endif
#endif
#if !defined(ASMJIT_API)
#define ASMJIT_API
#endif
#if !defined(ASMJIT_VARAPI)
#define ASMJIT_VARAPI extern ASMJIT_API
#endif
#if defined(__GNUC__) && !defined(_WIN32)
#define ASMJIT_VIRTAPI ASMJIT_API
#else
#define ASMJIT_VIRTAPI
#endif
// Function attributes.
#if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__)
#define ASMJIT_INLINE inline __attribute__((__always_inline__))
#elif !defined(ASMJIT_BUILD_DEBUG) && defined(_MSC_VER)
#define ASMJIT_INLINE __forceinline
#else
#define ASMJIT_INLINE inline
#endif
#if defined(__clang__)
#define ASMJIT_INLINE_NODEBUG inline __attribute__((__always_inline__, __nodebug__))
#elif defined(__GNUC__)
#define ASMJIT_INLINE_NODEBUG inline __attribute__((__always_inline__, __artificial__))
#else
#define ASMJIT_INLINE_NODEBUG inline
#endif
#define ASMJIT_INLINE_CONSTEXPR constexpr ASMJIT_INLINE_NODEBUG
#if defined(__GNUC__)
#define ASMJIT_NOINLINE __attribute__((__noinline__))
#elif defined(_MSC_VER)
#define ASMJIT_NOINLINE __declspec(noinline)
#else
#define ASMJIT_NOINLINE
#endif
// Calling conventions.
#if ASMJIT_ARCH_X86 == 32 && defined(__GNUC__)
#define ASMJIT_CDECL __attribute__((__cdecl__))
#define ASMJIT_STDCALL __attribute__((__stdcall__))
#define ASMJIT_FASTCALL __attribute__((__fastcall__))
#define ASMJIT_REGPARM(N) __attribute__((__regparm__(N)))
#elif ASMJIT_ARCH_X86 == 32 && defined(_MSC_VER)
#define ASMJIT_CDECL __cdecl
#define ASMJIT_STDCALL __stdcall
#define ASMJIT_FASTCALL __fastcall
#define ASMJIT_REGPARM(N)
#else
#define ASMJIT_CDECL
#define ASMJIT_STDCALL
#define ASMJIT_FASTCALL
#define ASMJIT_REGPARM(N)
#endif
#if ASMJIT_ARCH_X86 && defined(_WIN32) && defined(_MSC_VER)
#define ASMJIT_VECTORCALL __vectorcall
#elif ASMJIT_ARCH_X86 && defined(_WIN32)
#define ASMJIT_VECTORCALL __attribute__((__vectorcall__))
#else
#define ASMJIT_VECTORCALL
#endif
// Type alignment (not allowed by C++17 'alignas' keyword).
#if defined(__GNUC__)
#define ASMJIT_ALIGN_TYPE(N, ...) __attribute__((__aligned__(N))) __VA_ARGS__
#elif defined(_MSC_VER)
#define ASMJIT_ALIGN_TYPE(N, ...) __declspec(align(N)) __VA_ARGS__
#else
#define ASMJIT_ALIGN_TYPE(N, ...) __VA_ARGS__
#endif
//! \def ASMJIT_MAY_ALIAS
//!
//! Expands to `__attribute__((__may_alias__))` if supported.
#if defined(__GNUC__)
#define ASMJIT_MAY_ALIAS __attribute__((__may_alias__))
#else
#define ASMJIT_MAY_ALIAS
#endif
#if defined(__clang__) && !defined(_DOXYGEN)
// NOTE: Clang allows to apply this attribute to function arguments, which is what we want. Once GCC decides
// to support this use, we will enable it for GCC as well. However, until that, it will be clang only, which
// is what we need for static analysis.
#define ASMJIT_NONNULL(FUNCTION_ARGUMENT) FUNCTION_ARGUMENT __attribute__((__nonnull__))
#else
#define ASMJIT_NONNULL(FUNCTION_ARGUMENT) FUNCTION_ARGUMENT
#endif
//! \def ASMJIT_ASSUME(...)
//!
//! Macro that tells the C/C++ compiler that the expression `...` evaluates to true.
//!
//! This macro has two purposes:
//!
//! 1. Enable optimizations that would not be possible without the assumption.
//! 2. Hint static analysis tools that a certain condition is true to prevent false positives.
#if defined(__clang__)
#define ASMJIT_ASSUME(...) __builtin_assume(__VA_ARGS__)
#elif defined(__GNUC__)
#define ASMJIT_ASSUME(...) do { if (!(__VA_ARGS__)) __builtin_unreachable(); } while (0)
#elif defined(_MSC_VER)
#define ASMJIT_ASSUME(...) __assume(__VA_ARGS__)
#else
#define ASMJIT_ASSUME(...) (void)0
#endif
//! \def ASMJIT_LIKELY(...)
//!
//! Condition is likely to be taken (mostly error handling and edge cases).
//! \def ASMJIT_UNLIKELY(...)
//!
//! Condition is unlikely to be taken (mostly error handling and edge cases).
#if defined(__GNUC__)
#define ASMJIT_LIKELY(...) __builtin_expect(!!(__VA_ARGS__), 1)
#define ASMJIT_UNLIKELY(...) __builtin_expect(!!(__VA_ARGS__), 0)
#else
#define ASMJIT_LIKELY(...) (__VA_ARGS__)
#define ASMJIT_UNLIKELY(...) (__VA_ARGS__)
#endif
// Utilities.
#define ASMJIT_OFFSET_OF(STRUCT, MEMBER) ((int)(intptr_t)((const char*)&((const STRUCT*)0x100)->MEMBER) - 0x100)
#define ASMJIT_ARRAY_SIZE(X) uint32_t(sizeof(X) / sizeof(X[0]))
#if ASMJIT_CXX_HAS_ATTRIBUTE(no_sanitize, 0)
#define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize__("undefined")))
#elif defined(__GNUC__)
#define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize_undefined__))
#else
#define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF
#endif
// Diagnostic Macros
// ======================================
#if defined(_MSC_VER) && !defined(__clang__) && !defined(_DOXYGEN)
#define ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \
__pragma(warning(push)) \
__pragma(warning(disable: 4127)) /* conditional expression is const */ \
__pragma(warning(disable: 4201)) /* nameless struct/union */
#define ASMJIT_END_DIAGNOSTIC_SCOPE \
__pragma(warning(pop))
#else
#define ASMJIT_BEGIN_DIAGNOSTIC_SCOPE
#define ASMJIT_END_DIAGNOSTIC_SCOPE
#endif
// Begin-Namespace & End-Namespace Macros
// ======================================
#if !defined(ASMJIT_NO_ABI_NAMESPACE) && !defined(_DOXYGEN)
#define ASMJIT_BEGIN_NAMESPACE \
ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \
namespace asmjit { \
inline namespace ASMJIT_ABI_NAMESPACE {
#define ASMJIT_END_NAMESPACE \
}} \
ASMJIT_END_DIAGNOSTIC_SCOPE
#else
#define ASMJIT_BEGIN_NAMESPACE \
ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \
namespace asmjit {
#define ASMJIT_END_NAMESPACE \
} \
ASMJIT_END_DIAGNOSTIC_SCOPE
#endif
#define ASMJIT_BEGIN_SUB_NAMESPACE(NAMESPACE) ASMJIT_BEGIN_NAMESPACE namespace NAMESPACE {
#define ASMJIT_END_SUB_NAMESPACE } ASMJIT_END_NAMESPACE
// C++ Utilities
// =============
#define ASMJIT_NONCOPYABLE(Type) \
Type(const Type& other) = delete; \
Type& operator=(const Type& other) = delete;
#define ASMJIT_NONCONSTRUCTIBLE(Type) \
Type() = delete; \
Type(const Type& other) = delete; \
Type& operator=(const Type& other) = delete;
//! \def ASMJIT_DEFINE_ENUM_FLAGS(T)
//!
//! Defines bit operations for enumeration flags.
#ifdef _DOXYGEN
#define ASMJIT_DEFINE_ENUM_FLAGS(T)
#else
#define ASMJIT_DEFINE_ENUM_FLAGS(T) \
static ASMJIT_INLINE_CONSTEXPR T operator~(T a) noexcept { \
return T(~std::underlying_type_t<T>(a)); \
} \
\
static ASMJIT_INLINE_CONSTEXPR T operator|(T a, T b) noexcept { \
return T(std::underlying_type_t<T>(a) | std::underlying_type_t<T>(b)); \
} \
static ASMJIT_INLINE_CONSTEXPR T operator&(T a, T b) noexcept { \
return T(std::underlying_type_t<T>(a) & std::underlying_type_t<T>(b)); \
} \
static ASMJIT_INLINE_CONSTEXPR T operator^(T a, T b) noexcept { \
return T(std::underlying_type_t<T>(a) ^ std::underlying_type_t<T>(b)); \
} \
\
static ASMJIT_INLINE_CONSTEXPR T& operator|=(T& a, T b) noexcept { \
a = T(std::underlying_type_t<T>(a) | std::underlying_type_t<T>(b)); \
return a; \
} \
static ASMJIT_INLINE_CONSTEXPR T& operator&=(T& a, T b) noexcept { \
a = T(std::underlying_type_t<T>(a) & std::underlying_type_t<T>(b)); \
return a; \
} \
static ASMJIT_INLINE_CONSTEXPR T& operator^=(T& a, T b) noexcept { \
a = T(std::underlying_type_t<T>(a) ^ std::underlying_type_t<T>(b)); \
return a; \
}
#endif
//! \def ASMJIT_DEFINE_ENUM_COMPARE(T)
//!
//! Defines comparison operations for enumeration flags.
#if defined(_DOXYGEN) || (defined(_MSC_VER) && _MSC_VER <= 1900)
#define ASMJIT_DEFINE_ENUM_COMPARE(T)
#else
#define ASMJIT_DEFINE_ENUM_COMPARE(T) \
static ASMJIT_INLINE_CONSTEXPR bool operator<(T a, T b) noexcept { \
return (std::underlying_type_t<T>)(a) < (std::underlying_type_t<T>)(b); \
} \
static ASMJIT_INLINE_CONSTEXPR bool operator<=(T a, T b) noexcept { \
return (std::underlying_type_t<T>)(a) <= (std::underlying_type_t<T>)(b); \
} \
static ASMJIT_INLINE_CONSTEXPR bool operator>(T a, T b) noexcept { \
return (std::underlying_type_t<T>)(a) > (std::underlying_type_t<T>)(b); \
} \
static ASMJIT_INLINE_CONSTEXPR bool operator>=(T a, T b) noexcept { \
return (std::underlying_type_t<T>)(a) >= (std::underlying_type_t<T>)(b); \
}
#endif
//! Defines a strong type `C` that wraps a value of `T`.
#define ASMJIT_DEFINE_STRONG_TYPE(C, T) \
struct C { \
T v; \
\
ASMJIT_INLINE_NODEBUG C() = default; \
ASMJIT_INLINE_CONSTEXPR explicit C(T x) noexcept : v(x) {} \
ASMJIT_INLINE_CONSTEXPR C(const C& other) noexcept = default; \
\
ASMJIT_INLINE_CONSTEXPR T value() const noexcept { return v; } \
\
ASMJIT_INLINE_CONSTEXPR T* valuePtr() noexcept { return &v; } \
ASMJIT_INLINE_CONSTEXPR const T* valuePtr() const noexcept { return &v; } \
\
ASMJIT_INLINE_CONSTEXPR C& operator=(T x) noexcept { v = x; return *this; }; \
ASMJIT_INLINE_CONSTEXPR C& operator=(const C& x) noexcept { v = x.v; return *this; } \
\
ASMJIT_INLINE_CONSTEXPR C operator+(T x) const noexcept { return C(v + x); } \
ASMJIT_INLINE_CONSTEXPR C operator-(T x) const noexcept { return C(v - x); } \
ASMJIT_INLINE_CONSTEXPR C operator*(T x) const noexcept { return C(v * x); } \
ASMJIT_INLINE_CONSTEXPR C operator/(T x) const noexcept { return C(v / x); } \
\
ASMJIT_INLINE_CONSTEXPR C operator+(const C& x) const noexcept { return C(v + x.v); } \
ASMJIT_INLINE_CONSTEXPR C operator-(const C& x) const noexcept { return C(v - x.v); } \
ASMJIT_INLINE_CONSTEXPR C operator*(const C& x) const noexcept { return C(v * x.v); } \
ASMJIT_INLINE_CONSTEXPR C operator/(const C& x) const noexcept { return C(v / x.v); } \
\
ASMJIT_INLINE_CONSTEXPR C& operator+=(T x) noexcept { v += x; return *this; } \
ASMJIT_INLINE_CONSTEXPR C& operator-=(T x) noexcept { v -= x; return *this; } \
ASMJIT_INLINE_CONSTEXPR C& operator*=(T x) noexcept { v *= x; return *this; } \
ASMJIT_INLINE_CONSTEXPR C& operator/=(T x) noexcept { v /= x; return *this; } \
\
ASMJIT_INLINE_CONSTEXPR C& operator+=(const C& x) noexcept { v += x.v; return *this; } \
ASMJIT_INLINE_CONSTEXPR C& operator-=(const C& x) noexcept { v -= x.v; return *this; } \
ASMJIT_INLINE_CONSTEXPR C& operator*=(const C& x) noexcept { v *= x.v; return *this; } \
ASMJIT_INLINE_CONSTEXPR C& operator/=(const C& x) noexcept { v /= x.v; return *this; } \
\
ASMJIT_INLINE_CONSTEXPR bool operator==(T x) const noexcept { return v == x; } \
ASMJIT_INLINE_CONSTEXPR bool operator!=(T x) const noexcept { return v != x; } \
ASMJIT_INLINE_CONSTEXPR bool operator> (T x) const noexcept { return v > x; } \
ASMJIT_INLINE_CONSTEXPR bool operator>=(T x) const noexcept { return v >= x; } \
ASMJIT_INLINE_CONSTEXPR bool operator< (T x) const noexcept { return v < x; } \
ASMJIT_INLINE_CONSTEXPR bool operator<=(T x) const noexcept { return v <= x; } \
\
ASMJIT_INLINE_CONSTEXPR bool operator==(const C& x) const noexcept { return v == x.v; } \
ASMJIT_INLINE_CONSTEXPR bool operator!=(const C& x) const noexcept { return v != x.v; } \
ASMJIT_INLINE_CONSTEXPR bool operator> (const C& x) const noexcept { return v > x.v; } \
ASMJIT_INLINE_CONSTEXPR bool operator>=(const C& x) const noexcept { return v >= x.v; } \
ASMJIT_INLINE_CONSTEXPR bool operator< (const C& x) const noexcept { return v < x.v; } \
ASMJIT_INLINE_CONSTEXPR bool operator<=(const C& x) const noexcept { return v <= x.v; } \
};
#endif // ASMJIT_CORE_API_CONFIG_H_INCLUDED

View File

@@ -1,268 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ARCHCOMMONS_H_INCLUDED
#define ASMJIT_CORE_ARCHCOMMONS_H_INCLUDED
// This file provides architecture-specific classes that are required in the core library. For example Imm operand
// allows to be created from arm::Shift in a const-expr way, so the arm::Shift must be provided. So this header file
// provides everything architecture-specific that is used by the Core API.
#include "../core/globals.h"
ASMJIT_BEGIN_SUB_NAMESPACE(arm)
//! \addtogroup asmjit_arm
//! \{
//! Condition code (both AArch32 & AArch64).
//!
//! \note This enumeration doesn't match condition code that is used in AArch32/AArch64 opcodes. In general this
//! condition code is encoded as `(cc - 2) & 0xF` so that `kAL` condition code is zero and encoded as 0xE in opcode.
//! This makes it easier to use a condition code as an instruction modifier that defaults to 'al'.
enum class CondCode : uint8_t {
kAL = 0x00u, //!< (no condition code) (always)
kNA = 0x01u, //!< (not available) (special)
kEQ = 0x02u, //!< Z==1 (any_sign ==)
kNE = 0x03u, //!< Z==0 (any_sign !=)
kCS = 0x04u, //!< C==1 (unsigned >=)
kHS = 0x04u, //!< C==1 (unsigned >=)
kLO = 0x05u, //!< C==0 (unsigned < )
kCC = 0x05u, //!< C==0 (unsigned < )
kMI = 0x06u, //!< N==1 (is negative)
kPL = 0x07u, //!< N==0 (is positive or zero)
kVS = 0x08u, //!< V==1 (is overflow)
kVC = 0x09u, //!< V==0 (no overflow)
kHI = 0x0Au, //!< C==1 & Z==0 (unsigned > )
kLS = 0x0Bu, //!< C==0 | Z==1 (unsigned <=)
kGE = 0x0Cu, //!< N==V (signed >=)
kLT = 0x0Du, //!< N!=V (signed < )
kGT = 0x0Eu, //!< Z==0 & N==V (signed > )
kLE = 0x0Fu, //!< Z==1 | N!=V (signed <=)
kZero = kEQ, //!< Zero flag (alias to equal).
kNotZero = kNE, //!< Not zero (alias to Not Equal).
kEqual = kEQ, //!< Equal `a == b`.
kNotEqual = kNE, //!< Not Equal `a != b`.
kCarry = kCS, //!< Carry flag.
kNotCarry = kCC, //!< Not carry.
kSign = kMI, //!< Sign flag.
kNotSign = kPL, //!< Not sign.
kNegative = kMI, //!< Negative.
kPositive = kPL, //!< Positive or zero.
kOverflow = kVS, //!< Signed overflow.
kNotOverflow = kVC, //!< Not signed overflow.
kSignedLT = kLT, //!< Signed `a < b`.
kSignedLE = kLE, //!< Signed `a <= b`.
kSignedGT = kGT, //!< Signed `a > b`.
kSignedGE = kGE, //!< Signed `a >= b`.
kUnsignedLT = kLO, //!< Unsigned `a < b`.
kUnsignedLE = kLS, //!< Unsigned `a <= b`.
kUnsignedGT = kHI, //!< Unsigned `a > b`.
kUnsignedGE = kHS, //!< Unsigned `a >= b`.
kBTZero = kZero, //!< Tested bit is zero.
kBTNotZero = kNotZero, //!< Tested bit is not zero.
kAlways = kAL, //!< No condition code (always).
kMaxValue = 0x0Fu //!< Maximum value of `CondCode`.
};
//! \cond
static constexpr CondCode _reverseCondTable[] = {
CondCode::kAL, // AL <- AL
CondCode::kNA, // NA <- NA
CondCode::kEQ, // EQ <- EQ
CondCode::kNE, // NE <- NE
CondCode::kLS, // LS <- CS
CondCode::kHI, // HI <- LO
CondCode::kMI, // MI <- MI
CondCode::kPL, // PL <- PL
CondCode::kVS, // VS <- VS
CondCode::kVC, // VC <- VC
CondCode::kLO, // LO <- HI
CondCode::kCS, // CS <- LS
CondCode::kLE, // LE <- GE
CondCode::kGT, // GT <- LT
CondCode::kLT, // LT <- GT
CondCode::kGE // GE <- LE
};
//! \endcond
//! Reverses a condition code (reverses the corresponding operands of a comparison).
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR CondCode reverseCond(CondCode cond) noexcept { return _reverseCondTable[uint8_t(cond)]; }
//! Negates a condition code.
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR CondCode negateCond(CondCode cond) noexcept { return CondCode(uint8_t(cond) ^ uint8_t(1)); }
//! Memory offset mode.
//!
//! Describes either fixed, pre-index, or post-index offset modes.
enum class OffsetMode : uint32_t {
//! Fixed offset mode (either no index at all or a regular index without a write-back).
kFixed = 0u,
//! Pre-index "[BASE, #Offset {, <shift>}]!" with write-back.
kPreIndex = 1u,
//! Post-index "[BASE], #Offset {, <shift>}" with write-back.
kPostIndex = 2u
};
//! Shift operation predicate (ARM) describes either SHIFT or EXTEND operation.
//!
//! \note The constants are AsmJit specific. The first 5 values describe real constants on ARM32 and AArch64 hardware,
//! however, the addition constants that describe extend modes are specific to AsmJit and would be translated to the
//! AArch64 specific constants by the assembler.
enum class ShiftOp : uint32_t {
//! Shift left logical operation (default).
//!
//! Available to all ARM architectures.
kLSL = 0x00u,
//! Shift right logical operation.
//!
//! Available to all ARM architectures.
kLSR = 0x01u,
//! Shift right arithmetic operation.
//!
//! Available to all ARM architectures.
kASR = 0x02u,
//! Rotate right operation (AArch32 only).
kROR = 0x03u,
//! Rotate right with carry operation (encoded as `ShiftOp::kROR` with zero) (AArch32 only).
kRRX = 0x04u,
//! Shift left by filling low order bits with ones.
kMSL = 0x05u,
//! UXTN extend register operation (AArch64 only).
kUXTB = 0x06u,
//! UXTH extend register operation (AArch64 only).
kUXTH = 0x07u,
//! UXTW extend register operation (AArch64 only).
kUXTW = 0x08u,
//! UXTX extend register operation (AArch64 only).
kUXTX = 0x09u,
//! SXTB extend register operation (AArch64 only).
kSXTB = 0x0Au,
//! SXTH extend register operation (AArch64 only).
kSXTH = 0x0Bu,
//! SXTW extend register operation (AArch64 only).
kSXTW = 0x0Cu,
//! SXTX extend register operation (AArch64 only).
kSXTX = 0x0Du
// NOTE: 0xE and 0xF are used by memory operand to specify POST|PRE offset mode.
};
//! Represents ARM immediate shift operation type and value.
class Shift {
public:
//! Shift operation.
ShiftOp _op;
//! Shift Value.
uint32_t _value;
//! Default constructed Shift is not initialized.
ASMJIT_INLINE_NODEBUG Shift() noexcept = default;
//! Copy constructor (default)
ASMJIT_INLINE_CONSTEXPR Shift(const Shift& other) noexcept = default;
//! Constructs Shift from operation `op` and shift `value`.
ASMJIT_INLINE_CONSTEXPR Shift(ShiftOp op, uint32_t value) noexcept
: _op(op),
_value(value) {}
//! Returns the shift operation.
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR ShiftOp op() const noexcept { return _op; }
//! Sets shift operation to `op`.
ASMJIT_INLINE_NODEBUG void setOp(ShiftOp op) noexcept { _op = op; }
//! Returns the shift amount.
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR uint32_t value() const noexcept { return _value; }
//! Sets shift amount to `value`.
ASMJIT_INLINE_NODEBUG void setValue(uint32_t value) noexcept { _value = value; }
};
//! \}
ASMJIT_END_SUB_NAMESPACE
ASMJIT_BEGIN_SUB_NAMESPACE(a32)
using namespace arm;
//! Data type that can be encoded with AArch32 instruction identifier.
//!
//! \note Data types are frequently used with AArch32 SIMD instructions. For example `VMAX` instruction can
//! use almost all datatypes in a form `VMAX.F32`, `VMAX.S16`, `VMAX.U32`, etc... Emitter automatically adds
//! the required data type at emit level.
enum class DataType : uint32_t {
//! No data type specified (default for all general purpose instructions).
kNone = 0,
//! 8-bit signed integer, specified as `.s8` in assembly.
kS8 = 1,
//! 16-bit signed integer, specified as `.s16` in assembly.
kS16 = 2,
//! 32-bit signed integer, specified as `.s32` in assembly.
kS32 = 3,
//! 64-bit signed integer, specified as `.s64` in assembly.
kS64 = 4,
//! 8-bit unsigned integer, specified as `.u8` in assembly.
kU8 = 5,
//! 16-bit unsigned integer, specified as `.u16` in assembly.
kU16 = 6,
//! 32-bit unsigned integer, specified as `.u32` in assembly.
kU32 = 7,
//! 64-bit unsigned integer, specified as `.u64` in assembly.
kU64 = 8,
//! 16-bit floating point (half precision), specified as `.f16` in assembly.
kF16 = 10,
//! 32-bit floating point (single precision), specified as `.f32` in assembly.
kF32 = 11,
//! 64-bit floating point (double precision), specified as `.f64` in assembly.
kF64 = 12,
//! 8-bit polynomial.
kP8 = 13,
//! 16-bit BF16 floating point.
kBF16 = 14,
//! 64-bit polynomial.
kP64 = 15,
//! Maximum value of `DataType`.
kMaxValue = 15
};
static ASMJIT_INLINE_NODEBUG uint32_t dataTypeSize(DataType dt) noexcept {
static constexpr uint8_t table[] = { 0, 1, 2, 4, 8, 1, 2, 4, 8, 2, 4, 8, 1, 2, 8 };
return table[size_t(dt)];
}
ASMJIT_END_SUB_NAMESPACE
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
using namespace arm;
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_CORE_ARCHCOMMONS_H_INCLUDED

View File

@@ -1,167 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/archtraits.h"
#include "../core/environment.h"
#include "../core/misc_p.h"
#if !defined(ASMJIT_NO_X86)
#include "../x86/x86archtraits_p.h"
#endif
#if !defined(ASMJIT_NO_AARCH64)
#include "../arm/a64archtraits_p.h"
#endif
ASMJIT_BEGIN_NAMESPACE
static const constexpr ArchTraits noArchTraits = {
// SP/FP/LR/PC.
0xFFu, 0xFFu, 0xFFu, 0xFFu,
// Reserved,
{ 0u, 0u, 0u },
// HW stack alignment.
0u,
// Min/Max stack offset.
0, 0,
// Supported register types.
0u,
// ISA features [Gp, Vec, Mask, Extra].
{{
InstHints::kNoHints,
InstHints::kNoHints,
InstHints::kNoHints,
InstHints::kNoHints
}},
// TypeIdToRegType.
#define V(index) RegType::kNone
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
#undef V
// Word names of 8-bit, 16-bit, 32-bit, and 64-bit quantities.
{
ArchTypeNameId::kByte,
ArchTypeNameId::kHalf,
ArchTypeNameId::kWord,
ArchTypeNameId::kQuad
}
};
ASMJIT_VARAPI const ArchTraits _archTraits[uint32_t(Arch::kMaxValue) + 1] = {
// No architecture.
noArchTraits,
// X86/X86 architectures.
#if !defined(ASMJIT_NO_X86)
x86::x86ArchTraits,
x86::x64ArchTraits,
#else
noArchTraits,
noArchTraits,
#endif
// RISCV32/RISCV64 architectures.
noArchTraits,
noArchTraits,
// ARM architecture
noArchTraits,
// AArch64 architecture.
#if !defined(ASMJIT_NO_AARCH64)
a64::a64ArchTraits,
#else
noArchTraits,
#endif
// ARM/Thumb architecture.
noArchTraits,
// Reserved.
noArchTraits,
// MIPS32/MIPS64
noArchTraits,
noArchTraits
};
ASMJIT_FAVOR_SIZE Error ArchUtils::typeIdToRegSignature(Arch arch, TypeId typeId, TypeId* typeIdOut, OperandSignature* regSignatureOut) noexcept {
const ArchTraits& archTraits = ArchTraits::byArch(arch);
// TODO: Remove this, should never be used like this.
// Passed RegType instead of TypeId?
if (uint32_t(typeId) <= uint32_t(RegType::kMaxValue)) {
typeId = RegUtils::typeIdOf(RegType(uint32_t(typeId)));
}
if (ASMJIT_UNLIKELY(!TypeUtils::isValid(typeId))) {
return DebugUtils::errored(kErrorInvalidTypeId);
}
// First normalize architecture dependent types.
if (TypeUtils::isAbstract(typeId)) {
bool is32Bit = Environment::is32Bit(arch);
if (typeId == TypeId::kIntPtr) {
typeId = is32Bit ? TypeId::kInt32 : TypeId::kInt64;
}
else {
typeId = is32Bit ? TypeId::kUInt32 : TypeId::kUInt64;
}
}
// Type size helps to construct all groups of registers.
// TypeId is invalid if the size is zero.
uint32_t size = TypeUtils::sizeOf(typeId);
if (ASMJIT_UNLIKELY(!size)) {
return DebugUtils::errored(kErrorInvalidTypeId);
}
if (ASMJIT_UNLIKELY(typeId == TypeId::kFloat80)) {
return DebugUtils::errored(kErrorInvalidUseOfF80);
}
RegType regType = RegType::kNone;
if (TypeUtils::isBetween(typeId, TypeId::_kBaseStart, TypeId::_kVec32Start)) {
regType = archTraits._typeIdToRegType[uint32_t(typeId) - uint32_t(TypeId::_kBaseStart)];
if (regType == RegType::kNone) {
if (typeId == TypeId::kInt64 || typeId == TypeId::kUInt64) {
return DebugUtils::errored(kErrorInvalidUseOfGpq);
}
else {
return DebugUtils::errored(kErrorInvalidTypeId);
}
}
}
else {
if (size <= 8 && archTraits.hasRegType(RegType::kVec64)) {
regType = RegType::kVec64;
}
else if (size <= 16 && archTraits.hasRegType(RegType::kVec128)) {
regType = RegType::kVec128;
}
else if (size == 32 && archTraits.hasRegType(RegType::kVec256)) {
regType = RegType::kVec256;
}
else if (archTraits.hasRegType(RegType::kVec512)) {
regType = RegType::kVec512;
}
else {
return DebugUtils::errored(kErrorInvalidTypeId);
}
}
*typeIdOut = typeId;
*regSignatureOut = RegUtils::signatureOf(regType);
return kErrorOk;
}
ASMJIT_END_NAMESPACE

View File

@@ -1,309 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ARCHTRAITS_H_INCLUDED
#define ASMJIT_CORE_ARCHTRAITS_H_INCLUDED
#include "../core/operand.h"
#include "../core/support.h"
#include "../core/type.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
//! Instruction set architecture (ISA).
enum class Arch : uint8_t {
//! Unknown or uninitialized ISA.
kUnknown = 0,
//! 32-bit X86 ISA.
kX86 = 1,
//! 64-bit X86 ISA also known as X64, X86_64, and AMD64.
kX64 = 2,
//! 32-bit RISC-V ISA.
kRISCV32 = 3,
//! 64-bit RISC-V ISA.
kRISCV64 = 4,
//! 32-bit ARM ISA (little endian).
kARM = 5,
//! 64-bit ARM ISA in (little endian).
kAArch64 = 6,
//! 32-bit ARM ISA in Thumb mode (little endian).
kThumb = 7,
// 8 is not used at the moment, even numbers are 64-bit architectures.
//! 32-bit MIPS ISA in (little endian).
kMIPS32_LE = 9,
//! 64-bit MIPS ISA in (little endian).
kMIPS64_LE = 10,
//! 32-bit ARM ISA (big endian).
kARM_BE = 11,
//! 64-bit ARM ISA in (big endian).
kAArch64_BE = 12,
//! 32-bit ARM ISA in Thumb mode (big endian).
kThumb_BE = 13,
// 14 is not used at the moment, even numbers are 64-bit architectures.
//! 32-bit MIPS ISA in (big endian).
kMIPS32_BE = 15,
//! 64-bit MIPS ISA in (big endian).
kMIPS64_BE = 16,
//! Maximum value of `Arch`.
kMaxValue = kMIPS64_BE,
//! Mask used by 32-bit ISAs (odd are 32-bit, even are 64-bit).
k32BitMask = 0x01,
//! First big-endian architecture.
kBigEndian = kARM_BE,
//! ISA detected at compile-time (ISA of the host).
kHost =
#if defined(_DOXYGEN)
DETECTED_AT_COMPILE_TIME
#else
ASMJIT_ARCH_X86 == 32 ? kX86 :
ASMJIT_ARCH_X86 == 64 ? kX64 :
ASMJIT_ARCH_RISCV == 32 ? kRISCV32 :
ASMJIT_ARCH_RISCV == 64 ? kRISCV64 :
ASMJIT_ARCH_ARM == 32 && ASMJIT_ARCH_LE ? kARM :
ASMJIT_ARCH_ARM == 32 && ASMJIT_ARCH_BE ? kARM_BE :
ASMJIT_ARCH_ARM == 64 && ASMJIT_ARCH_LE ? kAArch64 :
ASMJIT_ARCH_ARM == 64 && ASMJIT_ARCH_BE ? kAArch64_BE :
ASMJIT_ARCH_MIPS == 32 && ASMJIT_ARCH_LE ? kMIPS32_LE :
ASMJIT_ARCH_MIPS == 32 && ASMJIT_ARCH_BE ? kMIPS32_BE :
ASMJIT_ARCH_MIPS == 64 && ASMJIT_ARCH_LE ? kMIPS64_LE :
ASMJIT_ARCH_MIPS == 64 && ASMJIT_ARCH_BE ? kMIPS64_BE :
kUnknown
#endif
};
//! Sub-architecture.
enum class SubArch : uint8_t {
//! Unknown or uninitialized architecture sub-type.
kUnknown = 0,
//! Maximum value of `SubArch`.
kMaxValue = kUnknown,
//! Sub-architecture detected at compile-time (sub-architecture of the host).
kHost =
#if defined(_DOXYGEN)
DETECTED_AT_COMPILE_TIME
#else
kUnknown
#endif
};
//! Identifier used to represent names of different data types across architectures.
enum class ArchTypeNameId : uint8_t {
//! Describes 'db' (X86|X86_64 convention, always 8-bit quantity).
kDB = 0,
//! Describes 'dw' (X86|X86_64 convention, always 16-bit word).
kDW,
//! Describes 'dd' (X86|X86_64 convention, always 32-bit word).
kDD,
//! Describes 'dq' (X86|X86_64 convention, always 64-bit word).
kDQ,
//! Describes 'byte' (always 8-bit quantity).
kByte,
//! Describes 'half' (most likely 16-bit word).
kHalf,
//! Describes 'word' (either 16-bit or 32-bit word).
kWord,
//! Describes 'hword' (most likely 16-bit word).
kHWord,
//! Describes 'dword' (either 32-bit or 64-bit word).
kDWord,
//! Describes 'qword' (64-bit word).
kQWord,
//! Describes 'xword' (64-bit word).
kXWord,
//! Describes 'short' (always 16-bit word).
kShort,
//! Describes 'long' (most likely 32-bit word).
kLong,
//! Describes 'quad' (64-bit word).
kQuad,
//! Maximum value of `ArchTypeNameId`.
kMaxValue = kQuad
};
//! Instruction feature hints for each register group provided by \ref ArchTraits.
//!
//! Instruction feature hints describe miscellaneous instructions provided by the architecture that can be used by
//! register allocator to make certain things simpler - like register swaps or emitting register push/pop sequences.
//!
//! \remarks Instruction feature hints are only defined for register groups that can be used with \ref
//! asmjit_compiler infrastructure. Register groups that are not managed by Compiler are not provided by
//! \ref ArchTraits and cannot be queried.
enum class InstHints : uint8_t {
//! No feature hints.
kNoHints = 0,
//! Architecture supports a register swap by using a single instruction.
kRegSwap = 0x01u,
//! Architecture provides push/pop instructions.
kPushPop = 0x02u
};
ASMJIT_DEFINE_ENUM_FLAGS(InstHints)
//! Architecture traits used by Function API and Compiler's register allocator.
struct ArchTraits {
//! \name Members
//! \{
//! Stack pointer register id.
uint8_t _spRegId;
//! Frame pointer register id.
uint8_t _fpRegId;
//! Link register id.
uint8_t _linkRegId;
//! Instruction pointer (or program counter) register id, if accessible.
uint8_t _pcRegId;
// Reserved.
uint8_t _reserved[3];
//! Hardware stack alignment requirement.
uint8_t _hwStackAlignment;
//! Minimum addressable offset on stack guaranteed for all instructions.
uint32_t _minStackOffset;
//! Maximum addressable offset on stack depending on specific instruction.
uint32_t _maxStackOffset;
//! Bit-mask indexed by \ref RegType that describes, which register types are supported by the ISA.
uint32_t _supportedRegTypes;
//! Flags for each virtual register group.
Support::Array<InstHints, Globals::kNumVirtGroups> _instHints;
//! Maps scalar TypeId values (from TypeId::_kIdBaseStart) to register types, see \ref TypeId.
Support::Array<RegType, 32> _typeIdToRegType;
//! Word name identifiers of 8-bit, 16-bit, 32-bit, and 64-bit quantities that appear in formatted text.
ArchTypeNameId _typeNameIdTable[4];
//! \}
//! \name Accessors
//! \{
//! Returns stack pointer register id (always GP register).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t spRegId() const noexcept { return _spRegId; }
//! Returns stack frame register id (always GP register).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t fpRegId() const noexcept { return _fpRegId; }
//! Returns link register id, if the architecture provides it (always GP register).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t linkRegId() const noexcept { return _linkRegId; }
//! Returns program counter register id, if the architecture exposes it (always GP register).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t pcRegId() const noexcept { return _pcRegId; }
//! Returns a hardware stack alignment requirement.
//!
//! \note This is a hardware constraint. Architectures that don't constrain it would return the lowest alignment
//! (1), however, some architectures may constrain the alignment, for example AArch64 requires 16-byte alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t hwStackAlignment() const noexcept { return _hwStackAlignment; }
//! Tests whether the architecture provides link register, which is used across function calls. If the link
//! register is not provided then a function call pushes the return address on stack (X86/X64).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasLinkReg() const noexcept { return _linkRegId != Reg::kIdBad; }
//! Returns minimum addressable offset on stack guaranteed for all instructions.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t minStackOffset() const noexcept { return _minStackOffset; }
//! Returns maximum addressable offset on stack depending on specific instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t maxStackOffset() const noexcept { return _maxStackOffset; }
//! Returns ISA flags of the given register `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstHints instFeatureHints(RegGroup group) const noexcept { return _instHints[group]; }
//! Tests whether the given register `group` has the given `flag` set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInstHint(RegGroup group, InstHints feature) const noexcept { return Support::test(_instHints[group], feature); }
//! Tests whether the ISA provides register swap instruction for the given register `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInstRegSwap(RegGroup group) const noexcept { return hasInstHint(group, InstHints::kRegSwap); }
//! Tests whether the ISA provides push/pop instructions for the given register `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInstPushPop(RegGroup group) const noexcept { return hasInstHint(group, InstHints::kPushPop); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRegType(RegType type) const noexcept {
if (ASMJIT_UNLIKELY(type > RegType::kMaxValue)) {
type = RegType::kNone;
}
return Support::bitTest(_supportedRegTypes, uint32_t(type));
}
//! Returns a table of ISA word names that appear in formatted text. Word names are ISA dependent.
//!
//! The index of this table is log2 of the size:
//! - [0] 8-bits
//! - [1] 16-bits
//! - [2] 32-bits
//! - [3] 64-bits
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ArchTypeNameId* typeNameIdTable() const noexcept { return _typeNameIdTable; }
//! Returns an ISA word name identifier of the given `index`, see \ref typeNameIdTable() for more details.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ArchTypeNameId typeNameIdByIndex(uint32_t index) const noexcept { return _typeNameIdTable[index]; }
//! \}
//! \name Statics
//! \{
//! Returns a const reference to `ArchTraits` for the given architecture `arch`.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG const ArchTraits& byArch(Arch arch) noexcept;
//! \}
};
ASMJIT_VARAPI const ArchTraits _archTraits[uint32_t(Arch::kMaxValue) + 1];
//! \cond
ASMJIT_INLINE_NODEBUG const ArchTraits& ArchTraits::byArch(Arch arch) noexcept { return _archTraits[uint32_t(arch)]; }
//! \endcond
//! Architecture utilities.
namespace ArchUtils {
ASMJIT_API Error typeIdToRegSignature(Arch arch, TypeId typeId, TypeId* typeIdOut, OperandSignature* regSignatureOut) noexcept;
} // {ArchUtils}
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ARCHTRAITS_H_INCLUDED

View File

@@ -1,444 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/assembler.h"
#include "../core/codewriter_p.h"
#include "../core/constpool.h"
#include "../core/emitterutils_p.h"
#include "../core/formatter.h"
#include "../core/logger.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// BaseAssembler - Construction & Destruction
// ==========================================
BaseAssembler::BaseAssembler() noexcept
: BaseEmitter(EmitterType::kAssembler) {}
BaseAssembler::~BaseAssembler() noexcept {}
// BaseAssembler - Buffer Management
// =================================
Error BaseAssembler::setOffset(size_t offset) {
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
size_t size = Support::max<size_t>(_section->bufferSize(), this->offset());
if (ASMJIT_UNLIKELY(offset > size)) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
_bufferPtr = _bufferData + offset;
return kErrorOk;
}
// BaseAssembler - Section Management
// ==================================
static ASMJIT_INLINE Error BaseAssembler_initSection(BaseAssembler* self, Section* section) noexcept {
uint8_t* p = section->_buffer._data;
self->_section = section;
self->_bufferData = p;
self->_bufferPtr = p + section->_buffer._size;
self->_bufferEnd = p + section->_buffer._capacity;
return kErrorOk;
}
Error BaseAssembler::section(Section* section) {
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
if (!_code->isSectionValid(section->sectionId()) || _code->_sections[section->sectionId()] != section) {
return reportError(DebugUtils::errored(kErrorInvalidSection));
}
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
_logger->logf(".section %s {#%u}\n", section->name(), section->sectionId());
}
#endif
return BaseAssembler_initSection(this, section);
}
// BaseAssembler - Label Management
// ================================
Label BaseAssembler::newLabel() {
Label label;
if (ASMJIT_LIKELY(_code)) {
Error err = _code->newLabelId(&label._baseId);
if (ASMJIT_UNLIKELY(err)) {
reportError(err);
}
}
return label;
}
Label BaseAssembler::newNamedLabel(const char* name, size_t nameSize, LabelType type, uint32_t parentId) {
Label label;
if (ASMJIT_LIKELY(_code)) {
uint32_t labelId;
Error err = _code->newNamedLabelId(&labelId, name, nameSize, type, parentId);
if (ASMJIT_UNLIKELY(err)) {
reportError(err);
}
else {
label.setId(labelId);
}
}
return label;
}
Error BaseAssembler::bind(const Label& label) {
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
Error err = _code->bindLabel(label, _section->sectionId(), offset());
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
EmitterUtils::logLabelBound(this, label);
}
#endif
resetInlineComment();
if (err) {
return reportError(err);
}
return kErrorOk;
}
// BaseAssembler - Embed
// =====================
Error BaseAssembler::embed(const void* data, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
if (dataSize == 0) {
return kErrorOk;
}
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
writer.emitData(data, dataSize);
writer.done(this);
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
StringTmp<512> sb;
Formatter::formatData(sb, _logger->flags(), arch(), TypeId::kUInt8, data, dataSize, 1);
sb.append('\n');
_logger->log(sb);
}
#endif
return kErrorOk;
}
Error BaseAssembler::embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t repeatCount) {
uint32_t deabstractDelta = TypeUtils::deabstractDeltaOfSize(registerSize());
TypeId finalTypeId = TypeUtils::deabstract(typeId, deabstractDelta);
if (ASMJIT_UNLIKELY(!TypeUtils::isValid(finalTypeId))) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
if (itemCount == 0 || repeatCount == 0) {
return kErrorOk;
}
uint32_t typeSize = TypeUtils::sizeOf(finalTypeId);
Support::FastUInt8 of = 0;
size_t dataSize = Support::mulOverflow(itemCount, size_t(typeSize), &of);
size_t totalSize = Support::mulOverflow(dataSize, repeatCount, &of);
if (ASMJIT_UNLIKELY(of)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, totalSize));
for (size_t i = 0; i < repeatCount; i++) {
writer.emitData(data, dataSize);
}
writer.done(this);
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
StringTmp<512> sb;
Formatter::formatData(sb, _logger->flags(), arch(), typeId, data, itemCount, repeatCount);
sb.append('\n');
_logger->log(sb);
}
#endif
return kErrorOk;
}
#ifndef ASMJIT_NO_LOGGING
static const TypeId dataTypeIdBySize[9] = {
TypeId::kVoid, // [0] (invalid)
TypeId::kUInt8, // [1] (uint8_t)
TypeId::kUInt16, // [2] (uint16_t)
TypeId::kVoid, // [3] (invalid)
TypeId::kUInt32, // [4] (uint32_t)
TypeId::kVoid, // [5] (invalid)
TypeId::kVoid, // [6] (invalid)
TypeId::kVoid, // [7] (invalid)
TypeId::kUInt64 // [8] (uint64_t)
};
#endif
Error BaseAssembler::embedConstPool(const Label& label, const ConstPool& pool) {
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
if (ASMJIT_UNLIKELY(!isLabelValid(label))) {
return reportError(DebugUtils::errored(kErrorInvalidLabel));
}
ASMJIT_PROPAGATE(align(AlignMode::kData, uint32_t(pool.alignment())));
ASMJIT_PROPAGATE(bind(label));
size_t size = pool.size();
if (!size) {
return kErrorOk;
}
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, size));
#ifndef ASMJIT_NO_LOGGING
uint8_t* data = writer.cursor();
#endif
pool.fill(writer.cursor());
writer.advance(size);
writer.done(this);
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
uint32_t dataSizeLog2 = Support::min<uint32_t>(Support::ctz(pool.minItemSize()), 3);
uint32_t dataSize = 1 << dataSizeLog2;
StringTmp<512> sb;
Formatter::formatData(sb, _logger->flags(), arch(), dataTypeIdBySize[dataSize], data, size >> dataSizeLog2);
sb.append('\n');
_logger->log(sb);
}
#endif
return kErrorOk;
}
Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
if (ASMJIT_UNLIKELY(isLabelValid(label))) {
return reportError(DebugUtils::errored(kErrorInvalidLabel));
}
RelocEntry* re;
LabelEntry& le = _code->labelEntry(label);
if (dataSize == 0) {
dataSize = registerSize();
}
if (ASMJIT_UNLIKELY(!Support::isPowerOf2UpTo(dataSize, 8u))) {
return reportError(DebugUtils::errored(kErrorInvalidOperandSize));
}
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
StringTmp<256> sb;
sb.append('.');
Formatter::formatDataType(sb, _logger->flags(), arch(), dataTypeIdBySize[dataSize]);
sb.append(' ');
Formatter::formatLabel(sb, FormatFlags::kNone, this, label.id());
sb.append('\n');
_logger->log(sb);
}
#endif
Error err = _code->newRelocEntry(&re, RelocType::kRelToAbs);
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
}
re->_sourceSectionId = _section->sectionId();
re->_sourceOffset = offset();
re->_format.resetToSimpleValue(OffsetType::kUnsignedOffset, dataSize);
if (le.isBound()) {
re->_targetSectionId = le.sectionId();
re->_payload = le.offset();
}
else {
OffsetFormat of;
of.resetToSimpleValue(OffsetType::kUnsignedOffset, dataSize);
Fixup* fixup = _code->newFixup(le, _section->sectionId(), offset(), 0, of);
if (ASMJIT_UNLIKELY(!fixup)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
fixup->labelOrRelocId = re->id();
}
// Emit dummy DWORD/QWORD depending on the data size.
writer.emitZeros(dataSize);
writer.done(this);
return kErrorOk;
}
Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
if (ASMJIT_UNLIKELY(!Support::bool_and(_code->isLabelValid(label), _code->isLabelValid(base)))) {
return reportError(DebugUtils::errored(kErrorInvalidLabel));
}
LabelEntry& labelEntry = _code->labelEntry(label);
LabelEntry& baseEntry = _code->labelEntry(base);
if (dataSize == 0) {
dataSize = registerSize();
}
if (ASMJIT_UNLIKELY(!Support::isPowerOf2UpTo(dataSize, 8u))) {
return reportError(DebugUtils::errored(kErrorInvalidOperandSize));
}
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
StringTmp<256> sb;
sb.append('.');
Formatter::formatDataType(sb, _logger->flags(), arch(), dataTypeIdBySize[dataSize]);
sb.append(" (");
Formatter::formatLabel(sb, FormatFlags::kNone, this, label.id());
sb.append(" - ");
Formatter::formatLabel(sb, FormatFlags::kNone, this, base.id());
sb.append(")\n");
_logger->log(sb);
}
#endif
// If both labels are bound within the same section it means the delta can be calculated now.
if (labelEntry.isBound() && baseEntry.isBound() && labelEntry.sectionId() == baseEntry.sectionId()) {
uint64_t delta = labelEntry.offset() - baseEntry.offset();
writer.emitValueLE(delta, dataSize);
}
else {
RelocEntry* re;
Error err = _code->newRelocEntry(&re, RelocType::kExpression);
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
}
Expression* exp = _code->_zone.newT<Expression>();
if (ASMJIT_UNLIKELY(!exp)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
exp->reset();
exp->opType = ExpressionOpType::kSub;
exp->setValueAsLabelId(0, label.id());
exp->setValueAsLabelId(1, base.id());
re->_format.resetToSimpleValue(OffsetType::kSignedOffset, dataSize);
re->_sourceSectionId = _section->sectionId();
re->_sourceOffset = offset();
re->_payload = (uint64_t)(uintptr_t)exp;
writer.emitZeros(dataSize);
}
writer.done(this);
return kErrorOk;
}
// BaseAssembler - Comment
// =======================
Error BaseAssembler::comment(const char* data, size_t size) {
if (!hasEmitterFlag(EmitterFlags::kLogComments)) {
if (!hasEmitterFlag(EmitterFlags::kAttached)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
return kErrorOk;
}
#ifndef ASMJIT_NO_LOGGING
// Logger cannot be NULL if `EmitterFlags::kLogComments` is set.
ASMJIT_ASSERT(_logger != nullptr);
_logger->log(data, size);
_logger->log("\n", 1);
return kErrorOk;
#else
DebugUtils::unused(data, size);
return kErrorOk;
#endif
}
// BaseAssembler - Events
// ======================
Error BaseAssembler::onAttach(CodeHolder& code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
// Attach to the end of the .text section.
return BaseAssembler_initSection(this, code._sections[0]);
}
Error BaseAssembler::onDetach(CodeHolder& code) noexcept {
_section = nullptr;
_bufferData = nullptr;
_bufferEnd = nullptr;
_bufferPtr = nullptr;
return Base::onDetach(code);
}
Error BaseAssembler::onReinit(CodeHolder& code) noexcept {
// BaseEmitter::onReinit() never fails.
(void)Base::onReinit(code);
return BaseAssembler_initSection(this, code._sections[0]);
}
ASMJIT_END_NAMESPACE

View File

@@ -1,139 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ASSEMBLER_H_INCLUDED
#define ASMJIT_CORE_ASSEMBLER_H_INCLUDED
#include "../core/codeholder.h"
#include "../core/emitter.h"
#include "../core/operand.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_assembler
//! \{
//! Base assembler.
//!
//! This is a base class that provides interface used by architecture specific assembler implementations. Assembler
//! doesn't hold any data, instead it's attached to \ref CodeHolder, which provides all the data that Assembler needs
//! and which can be altered by it.
//!
//! Check out architecture specific assemblers for more details and examples:
//!
//! - \ref x86::Assembler - X86/X64 assembler implementation.
//! - \ref a64::Assembler - AArch64 assembler implementation.
class ASMJIT_VIRTAPI BaseAssembler : public BaseEmitter {
public:
ASMJIT_NONCOPYABLE(BaseAssembler)
using Base = BaseEmitter;
//! Current section where the assembling happens.
Section* _section = nullptr;
//! Start of the CodeBuffer of the current section.
uint8_t* _bufferData = nullptr;
//! End (first invalid byte) of the current section.
uint8_t* _bufferEnd = nullptr;
//! Pointer in the CodeBuffer of the current section.
uint8_t* _bufferPtr = nullptr;
//! \name Construction & Destruction
//! \{
//! Creates a new `BaseAssembler` instance.
ASMJIT_API BaseAssembler() noexcept;
//! Destroys the `BaseAssembler` instance.
ASMJIT_API ~BaseAssembler() noexcept override;
//! \}
//! \name Code-Buffer Management
//! \{
//! Returns the capacity of the current CodeBuffer.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t bufferCapacity() const noexcept { return (size_t)(_bufferEnd - _bufferData); }
//! Returns the number of remaining bytes in the current CodeBuffer.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t remainingSpace() const noexcept { return (size_t)(_bufferEnd - _bufferPtr); }
//! Returns the current position in the CodeBuffer.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t offset() const noexcept { return (size_t)(_bufferPtr - _bufferData); }
//! Sets the current position in the CodeBuffer to `offset`.
//!
//! \note The `offset` cannot be greater than buffer size even if it's within the buffer's capacity.
ASMJIT_API Error setOffset(size_t offset);
//! Returns the start of the CodeBuffer in the current section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* bufferData() const noexcept { return _bufferData; }
//! Returns the end (first invalid byte) in the current section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* bufferEnd() const noexcept { return _bufferEnd; }
//! Returns the current pointer in the CodeBuffer in the current section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* bufferPtr() const noexcept { return _bufferPtr; }
//! \}
//! \name Section Management
//! \{
//! Returns the current section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Section* currentSection() const noexcept { return _section; }
ASMJIT_API Error section(Section* section) override;
//! \}
//! \name Label Management
//! \{
ASMJIT_API Label newLabel() override;
ASMJIT_API Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, LabelType type = LabelType::kGlobal, uint32_t parentId = Globals::kInvalidId) override;
ASMJIT_API Error bind(const Label& label) override;
//! \}
//! \name Embed
//! \{
ASMJIT_API Error embed(const void* data, size_t dataSize) override;
ASMJIT_API Error embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t repeatCount = 1) override;
ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool) override;
ASMJIT_API Error embedLabel(const Label& label, size_t dataSize = 0) override;
ASMJIT_API Error embedLabelDelta(const Label& label, const Label& base, size_t dataSize = 0) override;
//! \}
//! \name Comment
//! \{
ASMJIT_API Error comment(const char* data, size_t size = SIZE_MAX) override;
//! \}
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder& code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder& code) noexcept override;
ASMJIT_API Error onReinit(CodeHolder& code) noexcept override;
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ASSEMBLER_H_INCLUDED

View File

@@ -1,946 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_BUILDER
#include "../core/builder.h"
#include "../core/emitterutils_p.h"
#include "../core/errorhandler.h"
#include "../core/formatter.h"
#include "../core/logger.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// PostponedErrorHandler (Internal)
// ================================
//! Postponed error handler that never throws. Used as a temporal error handler
//! to run passes. If error occurs, the caller is notified and will call the
//! real error handler, that can throw.
class PostponedErrorHandler : public ErrorHandler {
public:
void handleError(Error err, const char* message, BaseEmitter* origin) override {
DebugUtils::unused(err, origin);
_message.assign(message);
}
StringTmp<128> _message;
};
// BaseBuilder - Utilities
// =======================
static void BaseBuilder_deletePasses(BaseBuilder* self) noexcept {
for (Pass* pass : self->_passes) {
pass->~Pass();
}
self->_passes.reset();
}
// BaseBuilder - Construction & Destruction
// ========================================
BaseBuilder::BaseBuilder() noexcept
: BaseEmitter(EmitterType::kBuilder),
_codeZone(64u * 1024u),
_passZone(64u * 1024u),
_allocator(&_codeZone) {}
BaseBuilder::~BaseBuilder() noexcept {
BaseBuilder_deletePasses(this);
}
// BaseBuilder - Node Management
// =============================
Error BaseBuilder::newInstNode(InstNode** out, InstId instId, InstOptions instOptions, uint32_t opCount) {
uint32_t opCapacity = InstNode::capacityOfOpCount(opCount);
ASMJIT_ASSERT(opCapacity >= InstNode::kBaseOpCapacity);
void* ptr = _codeZone.alloc(InstNode::nodeSizeOfOpCapacity(opCapacity));
if (ASMJIT_UNLIKELY(!ptr)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
*out = new(Support::PlacementNew{ptr}) InstNode(instId, instOptions, opCount, opCapacity);
return kErrorOk;
}
Error BaseBuilder::newLabelNode(LabelNode** out) {
*out = nullptr;
ASMJIT_PROPAGATE(_newNodeT<LabelNode>(out));
return registerLabelNode(*out);
}
Error BaseBuilder::newAlignNode(AlignNode** out, AlignMode alignMode, uint32_t alignment) {
*out = nullptr;
return _newNodeT<AlignNode>(out, alignMode, alignment);
}
Error BaseBuilder::newEmbedDataNode(EmbedDataNode** out, TypeId typeId, const void* data, size_t itemCount, size_t repeatCount) {
*out = nullptr;
uint32_t deabstractDelta = TypeUtils::deabstractDeltaOfSize(registerSize());
TypeId finalTypeId = TypeUtils::deabstract(typeId, deabstractDelta);
if (ASMJIT_UNLIKELY(!TypeUtils::isValid(finalTypeId))) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
uint32_t typeSize = TypeUtils::sizeOf(finalTypeId);
Support::FastUInt8 of = 0;
size_t nodeSize = Support::maddOverflow(itemCount, size_t(typeSize), sizeof(EmbedDataNode), &of);
if (ASMJIT_UNLIKELY(of)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
EmbedDataNode* node = nullptr;
ASMJIT_PROPAGATE(_newNodeTWithSize<EmbedDataNode>(
&node, Support::alignUp(nodeSize, Globals::kZoneAlignment),
typeId, uint8_t(typeSize), itemCount, repeatCount
));
if (data) {
memcpy(node->data(), data, node->dataSize());
}
*out = node;
return kErrorOk;
}
Error BaseBuilder::newConstPoolNode(ConstPoolNode** out) {
*out = nullptr;
ASMJIT_PROPAGATE(_newNodeT<ConstPoolNode>(out, &_codeZone));
return registerLabelNode(*out);
}
Error BaseBuilder::newCommentNode(CommentNode** out, const char* data, size_t size) {
*out = nullptr;
if (data) {
if (size == SIZE_MAX) {
size = strlen(data);
}
if (size > 0) {
data = static_cast<char*>(_codeZone.dup(data, size, true));
if (ASMJIT_UNLIKELY(!data)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
}
}
return _newNodeT<CommentNode>(out, data);
}
BaseNode* BaseBuilder::addNode(BaseNode* node) noexcept {
ASMJIT_ASSERT(!node->_prev);
ASMJIT_ASSERT(!node->_next);
ASMJIT_ASSERT(!node->isActive());
if (!_cursor) {
if (_nodeList.empty()) {
_nodeList.reset(node, node);
}
else {
node->_next = _nodeList.first();
_nodeList._first->_prev = node;
_nodeList._first = node;
}
}
else {
BaseNode* prev = _cursor;
BaseNode* next = _cursor->next();
node->_prev = prev;
node->_next = next;
prev->_next = node;
if (next) {
next->_prev = node;
}
else {
_nodeList._last = node;
}
}
node->_addFlags(NodeFlags::kIsActive);
if (node->isSection()) {
_dirtySectionLinks = true;
}
_cursor = node;
return node;
}
BaseNode* BaseBuilder::addAfter(BaseNode* node, BaseNode* ref) noexcept {
ASMJIT_ASSERT(!node->_prev);
ASMJIT_ASSERT(!node->_next);
BaseNode* prev = ref;
BaseNode* next = ref->next();
node->_prev = prev;
node->_next = next;
node->_addFlags(NodeFlags::kIsActive);
if (node->isSection()) {
_dirtySectionLinks = true;
}
prev->_next = node;
if (next) {
next->_prev = node;
}
else {
_nodeList._last = node;
}
return node;
}
BaseNode* BaseBuilder::addBefore(BaseNode* node, BaseNode* ref) noexcept {
ASMJIT_ASSERT(!node->_prev);
ASMJIT_ASSERT(!node->_next);
ASMJIT_ASSERT(!node->isActive());
ASMJIT_ASSERT(ref->isActive());
BaseNode* prev = ref->prev();
BaseNode* next = ref;
node->_prev = prev;
node->_next = next;
node->_addFlags(NodeFlags::kIsActive);
if (node->isSection()) {
_dirtySectionLinks = true;
}
next->_prev = node;
if (prev) {
prev->_next = node;
}
else {
_nodeList._first = node;
}
return node;
}
BaseNode* BaseBuilder::removeNode(BaseNode* node) noexcept {
if (!node->isActive()) {
return node;
}
BaseNode* prev = node->prev();
BaseNode* next = node->next();
if (_nodeList._first == node) {
_nodeList._first = next;
}
else {
prev->_next = next;
}
if (_nodeList._last == node) {
_nodeList._last = prev;
}
else {
next->_prev = prev;
}
node->_prev = nullptr;
node->_next = nullptr;
node->_clearFlags(NodeFlags::kIsActive);
if (node->isSection()) {
_dirtySectionLinks = true;
}
if (_cursor == node) {
_cursor = prev;
}
return node;
}
void BaseBuilder::removeNodes(BaseNode* first, BaseNode* last) noexcept {
if (first == last) {
removeNode(first);
return;
}
if (!first->isActive()) {
return;
}
BaseNode* prev = first->prev();
BaseNode* next = last->next();
if (_nodeList._first == first) {
_nodeList._first = next;
}
else {
prev->_next = next;
}
if (_nodeList._last == last) {
_nodeList._last = prev;
}
else {
next->_prev = prev;
}
BaseNode* node = first;
uint32_t didRemoveSection = false;
for (;;) {
next = node->next();
ASMJIT_ASSERT(next != nullptr);
node->_prev = nullptr;
node->_next = nullptr;
node->_clearFlags(NodeFlags::kIsActive);
didRemoveSection |= uint32_t(node->isSection());
if (_cursor == node) {
_cursor = prev;
}
if (node == last) {
break;
}
node = next;
}
if (didRemoveSection) {
_dirtySectionLinks = true;
}
}
BaseNode* BaseBuilder::setCursor(BaseNode* node) noexcept {
BaseNode* old = _cursor;
_cursor = node;
return old;
}
// BaseBuilder - Sections
// ======================
Error BaseBuilder::sectionNodeOf(SectionNode** out, uint32_t sectionId) {
*out = nullptr;
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
if (ASMJIT_UNLIKELY(!_code->isSectionValid(sectionId))) {
return reportError(DebugUtils::errored(kErrorInvalidSection));
}
if (sectionId >= _sectionNodes.size()) {
Error err = _sectionNodes.reserve(&_allocator, sectionId + 1);
if (ASMJIT_UNLIKELY(err != kErrorOk)) {
return reportError(err);
}
}
SectionNode* node = nullptr;
if (sectionId < _sectionNodes.size()) {
node = _sectionNodes[sectionId];
}
if (!node) {
ASMJIT_PROPAGATE(_newNodeT<SectionNode>(&node, sectionId));
// We have already reserved enough space, this cannot fail now.
if (sectionId >= _sectionNodes.size()) {
// SAFETY: No need to check for error condition as we have already reserved enough space.
(void)_sectionNodes.resize(&_allocator, sectionId + 1);
}
_sectionNodes[sectionId] = node;
}
*out = node;
return kErrorOk;
}
Error BaseBuilder::section(Section* section) {
SectionNode* node;
ASMJIT_PROPAGATE(sectionNodeOf(&node, section->sectionId()));
ASMJIT_ASSUME(node != nullptr);
if (!node->isActive()) {
// Insert the section at the end if it was not part of the code.
addAfter(node, lastNode());
_cursor = node;
}
else {
// This is a bit tricky. We cache section links to make sure that switching sections doesn't involve
// traversal in linked-list unless the position of the section has changed.
if (hasDirtySectionLinks()) {
updateSectionLinks();
}
if (node->_nextSection) {
_cursor = node->_nextSection->_prev;
}
else {
_cursor = _nodeList.last();
}
}
return kErrorOk;
}
void BaseBuilder::updateSectionLinks() noexcept {
if (!_dirtySectionLinks) {
return;
}
BaseNode* node_ = _nodeList.first();
SectionNode* currentSection = nullptr;
while (node_) {
if (node_->isSection()) {
if (currentSection) {
currentSection->_nextSection = node_->as<SectionNode>();
}
currentSection = node_->as<SectionNode>();
}
node_ = node_->next();
}
if (currentSection) {
currentSection->_nextSection = nullptr;
}
_dirtySectionLinks = false;
}
// BaseBuilder - Labels
// ====================
Error BaseBuilder::labelNodeOf(LabelNode** out, uint32_t labelId) {
*out = nullptr;
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
uint32_t index = labelId;
if (ASMJIT_UNLIKELY(index >= _code->labelCount())) {
return DebugUtils::errored(kErrorInvalidLabel);
}
if (index >= _labelNodes.size()) {
ASMJIT_PROPAGATE(_labelNodes.resize(&_allocator, index + 1));
}
LabelNode* node = _labelNodes[index];
if (!node) {
ASMJIT_PROPAGATE(_newNodeT<LabelNode>(&node, labelId));
_labelNodes[index] = node;
}
*out = node;
return kErrorOk;
}
Error BaseBuilder::registerLabelNode(LabelNode* node) {
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
uint32_t labelId;
ASMJIT_PROPAGATE(_code->newLabelId(&labelId));
// We just added one label so it must be true.
ASMJIT_ASSERT(_labelNodes.size() < labelId + 1);
ASMJIT_PROPAGATE(_labelNodes.resize(&_allocator, labelId + 1));
_labelNodes[labelId] = node;
node->_labelId = labelId;
return kErrorOk;
}
static Error BaseBuilder_newLabelInternal(BaseBuilder* self, uint32_t labelId) {
ASMJIT_ASSERT(self->_labelNodes.size() < labelId + 1);
uint32_t growBy = labelId - self->_labelNodes.size();
Error err = self->_labelNodes.willGrow(&self->_allocator, growBy);
if (ASMJIT_UNLIKELY(err)) {
return self->reportError(err);
}
LabelNode* node = nullptr;
ASMJIT_PROPAGATE(self->_newNodeT<LabelNode>(&node, labelId));
// SAFETY: No need to check for error condition as we have already reserved enough space.
(void)self->_labelNodes.resize(&self->_allocator, labelId + 1);
self->_labelNodes[labelId] = node;
node->_labelId = labelId;
return kErrorOk;
}
Label BaseBuilder::newLabel() {
Label label;
if (ASMJIT_LIKELY(_code)) {
uint32_t labelId;
Error err = _code->newLabelId(&labelId);
if (ASMJIT_UNLIKELY(err)) {
reportError(err);
}
else {
if (ASMJIT_LIKELY(BaseBuilder_newLabelInternal(this, labelId)) == kErrorOk) {
label.setId(labelId);
}
}
}
return label;
}
Label BaseBuilder::newNamedLabel(const char* name, size_t nameSize, LabelType type, uint32_t parentId) {
Label label;
if (ASMJIT_LIKELY(_code)) {
uint32_t labelId;
Error err = _code->newNamedLabelId(&labelId, name, nameSize, type, parentId);
if (ASMJIT_UNLIKELY(err)) {
reportError(err);
}
else {
if (ASMJIT_LIKELY(BaseBuilder_newLabelInternal(this, labelId) == kErrorOk)) {
label.setId(labelId);
}
}
}
return label;
}
Error BaseBuilder::bind(const Label& label) {
LabelNode* node;
ASMJIT_PROPAGATE(labelNodeOf(&node, label));
addNode(node);
return kErrorOk;
}
// BaseBuilder - Passes
// ====================
ASMJIT_FAVOR_SIZE Pass* BaseBuilder::passByName(const char* name) const noexcept {
for (Pass* pass : _passes) {
if (strcmp(pass->name(), name) == 0) {
return pass;
}
}
return nullptr;
}
ASMJIT_FAVOR_SIZE Error BaseBuilder::addPass(Pass* pass) noexcept {
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
if (ASMJIT_UNLIKELY(pass == nullptr)) {
// Since this is directly called by `addPassT()` we treat `null` argument
// as out-of-memory condition. Otherwise it would be API misuse.
return DebugUtils::errored(kErrorOutOfMemory);
}
else if (ASMJIT_UNLIKELY(pass->_cb)) {
// Kinda weird, but okay...
if (pass->_cb == this) {
return kErrorOk;
}
return DebugUtils::errored(kErrorInvalidState);
}
ASMJIT_PROPAGATE(_passes.append(&_allocator, pass));
pass->_cb = this;
return kErrorOk;
}
Error BaseBuilder::runPasses() {
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
if (_passes.empty()) {
return kErrorOk;
}
ErrorHandler* prev = errorHandler();
PostponedErrorHandler postponed;
Error err = kErrorOk;
setErrorHandler(&postponed);
for (Pass* pass : _passes) {
_passZone.reset();
err = pass->run(&_passZone, _logger);
if (err) {
break;
}
}
_passZone.reset();
setErrorHandler(prev);
if (ASMJIT_UNLIKELY(err)) {
return reportError(err, !postponed._message.empty() ? postponed._message.data() : nullptr);
}
return kErrorOk;
}
// BaseBuilder - Emit
// ==================
Error BaseBuilder::_emit(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) {
uint32_t opCount = EmitterUtils::opCountFromEmitArgs(o0, o1, o2, opExt);
InstOptions options = instOptions() | forcedInstOptions();
if (Support::test(options, InstOptions::kReserved)) {
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
#ifndef ASMJIT_NO_VALIDATION
// Strict validation.
if (hasDiagnosticOption(DiagnosticOptions::kValidateIntermediate)) {
Operand_ opArray[Globals::kMaxOpCount];
EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
ValidationFlags validationFlags = isCompiler() ? ValidationFlags::kEnableVirtRegs : ValidationFlags::kNone;
Error err = _funcs.validate(BaseInst(instId, options, _extraReg), opArray, opCount, validationFlags);
if (ASMJIT_UNLIKELY(err)) {
#ifndef ASMJIT_NO_LOGGING
return EmitterUtils::logInstructionFailed(this, err, instId, options, o0, o1, o2, opExt);
#else
resetState();
return reportError(err);
#endif
}
}
#endif
// Clear instruction options that should never be part of a regular instruction.
options &= ~InstOptions::kReserved;
}
uint32_t opCapacity = InstNode::capacityOfOpCount(opCount);
ASMJIT_ASSERT(opCapacity >= InstNode::kBaseOpCapacity);
void* ptr = _codeZone.alloc(InstNode::nodeSizeOfOpCapacity(opCapacity));
const char* comment = inlineComment();
resetInstOptions();
resetInlineComment();
if (ASMJIT_UNLIKELY(!ptr)) {
resetExtraReg();
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
InstNode* node = new(Support::PlacementNew{ptr}) InstNode(instId, options, opCount, opCapacity);
node->setExtraReg(extraReg());
node->setOp(0, o0);
node->setOp(1, o1);
node->setOp(2, o2);
for (uint32_t i = 3; i < opCount; i++) {
node->setOp(i, opExt[i - 3]);
}
node->resetOpRange(opCount, opCapacity);
if (comment) {
node->setInlineComment(static_cast<char*>(_codeZone.dup(comment, strlen(comment), true)));
}
addNode(node);
resetExtraReg();
return kErrorOk;
}
// BaseBuilder - Align
// ===================
Error BaseBuilder::align(AlignMode alignMode, uint32_t alignment) {
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
AlignNode* node;
ASMJIT_PROPAGATE(newAlignNode(&node, alignMode, alignment));
ASMJIT_ASSUME(node != nullptr);
addNode(node);
return kErrorOk;
}
// BaseBuilder - Embed
// ===================
Error BaseBuilder::embed(const void* data, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
EmbedDataNode* node;
ASMJIT_PROPAGATE(newEmbedDataNode(&node, TypeId::kUInt8, data, dataSize));
ASMJIT_ASSUME(node != nullptr);
addNode(node);
return kErrorOk;
}
Error BaseBuilder::embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t itemRepeat) {
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
EmbedDataNode* node;
ASMJIT_PROPAGATE(newEmbedDataNode(&node, typeId, data, itemCount, itemRepeat));
ASMJIT_ASSUME(node != nullptr);
addNode(node);
return kErrorOk;
}
Error BaseBuilder::embedConstPool(const Label& label, const ConstPool& pool) {
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
if (!isLabelValid(label)) {
return reportError(DebugUtils::errored(kErrorInvalidLabel));
}
ASMJIT_PROPAGATE(align(AlignMode::kData, uint32_t(pool.alignment())));
ASMJIT_PROPAGATE(bind(label));
EmbedDataNode* node;
ASMJIT_PROPAGATE(newEmbedDataNode(&node, TypeId::kUInt8, nullptr, pool.size()));
ASMJIT_ASSUME(node != nullptr);
pool.fill(node->data());
addNode(node);
return kErrorOk;
}
// BaseBuilder - EmbedLabel & EmbedLabelDelta
// ==========================================
Error BaseBuilder::embedLabel(const Label& label, size_t dataSize) {
if (ASMJIT_UNLIKELY(!Support::bool_and(_code, Support::isZeroOrPowerOf2UpTo(dataSize, 8u)))) {
return reportError(DebugUtils::errored(!_code ? kErrorNotInitialized : kErrorInvalidArgument));
}
EmbedLabelNode* node = nullptr;
ASMJIT_PROPAGATE(_newNodeT<EmbedLabelNode>(&node, label.id(), uint32_t(dataSize)));
addNode(node);
return kErrorOk;
}
Error BaseBuilder::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) {
if (ASMJIT_UNLIKELY(!Support::bool_and(_code, Support::isZeroOrPowerOf2UpTo(dataSize, 8u)))) {
return reportError(DebugUtils::errored(!_code ? kErrorNotInitialized : kErrorInvalidArgument));
}
EmbedLabelDeltaNode* node = nullptr;
ASMJIT_PROPAGATE(_newNodeT<EmbedLabelDeltaNode>(&node, label.id(), base.id(), uint32_t(dataSize)));
addNode(node);
return kErrorOk;
}
// BaseBuilder - Comment
// =====================
Error BaseBuilder::comment(const char* data, size_t size) {
if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized);
}
CommentNode* node;
ASMJIT_PROPAGATE(newCommentNode(&node, data, size));
ASMJIT_ASSUME(node != nullptr);
addNode(node);
return kErrorOk;
}
// BaseBuilder - SerializeTo
// =========================
Error BaseBuilder::serializeTo(BaseEmitter* dst) {
Error err = kErrorOk;
BaseNode* node_ = _nodeList.first();
Operand_ opArray[Globals::kMaxOpCount];
do {
dst->setInlineComment(node_->inlineComment());
if (node_->isInst()) {
InstNode* node = node_->as<InstNode>();
// NOTE: Inlined to remove one additional call per instruction.
dst->setInstOptions(node->options());
dst->setExtraReg(node->extraReg());
const Operand_* op = node->operands();
const Operand_* opExt = EmitterUtils::noExt;
uint32_t opCount = node->opCount();
if (opCount > 3) {
uint32_t i = 4;
opArray[3] = op[3];
while (i < opCount) {
opArray[i].copyFrom(op[i]);
i++;
}
while (i < Globals::kMaxOpCount) {
opArray[i].reset();
i++;
}
opExt = opArray + 3;
}
err = dst->_emit(node->id(), op[0], op[1], op[2], opExt);
}
else if (node_->isLabel()) {
if (node_->isConstPool()) {
ConstPoolNode* node = node_->as<ConstPoolNode>();
err = dst->embedConstPool(node->label(), node->constPool());
}
else {
LabelNode* node = node_->as<LabelNode>();
err = dst->bind(node->label());
}
}
else if (node_->isAlign()) {
AlignNode* node = node_->as<AlignNode>();
err = dst->align(node->alignMode(), node->alignment());
}
else if (node_->isEmbedData()) {
EmbedDataNode* node = node_->as<EmbedDataNode>();
err = dst->embedDataArray(node->typeId(), node->data(), node->itemCount(), node->repeatCount());
}
else if (node_->isEmbedLabel()) {
EmbedLabelNode* node = node_->as<EmbedLabelNode>();
err = dst->embedLabel(node->label(), node->dataSize());
}
else if (node_->isEmbedLabelDelta()) {
EmbedLabelDeltaNode* node = node_->as<EmbedLabelDeltaNode>();
err = dst->embedLabelDelta(node->label(), node->baseLabel(), node->dataSize());
}
else if (node_->isSection()) {
SectionNode* node = node_->as<SectionNode>();
err = dst->section(_code->sectionById(node->sectionId()));
}
else if (node_->isComment()) {
CommentNode* node = node_->as<CommentNode>();
err = dst->comment(node->inlineComment());
}
if (err) {
break;
}
node_ = node_->next();
} while (node_);
return err;
}
// BaseBuilder - Events
// ====================
static ASMJIT_INLINE void BaseBuilder_clearAll(BaseBuilder* self) noexcept {
self->_sectionNodes.reset();
self->_labelNodes.reset();
self->_allocator.reset(&self->_codeZone);
self->_codeZone.reset();
self->_passZone.reset();
self->_cursor = nullptr;
self->_nodeList.reset();
}
static ASMJIT_INLINE Error BaseBuilder_initSection(BaseBuilder* self) noexcept {
SectionNode* initialSection;
ASMJIT_PROPAGATE(self->sectionNodeOf(&initialSection, 0));
ASMJIT_PROPAGATE(self->_passes.willGrow(&self->_allocator, 4));
ASMJIT_ASSUME(initialSection != nullptr);
self->_cursor = initialSection;
self->_nodeList.reset(initialSection, initialSection);
initialSection->_setFlags(NodeFlags::kIsActive);
return kErrorOk;
}
Error BaseBuilder::onAttach(CodeHolder& code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
Error err = BaseBuilder_initSection(this);
if (ASMJIT_UNLIKELY(err)) {
onDetach(code);
}
return err;
}
Error BaseBuilder::onDetach(CodeHolder& code) noexcept {
BaseBuilder_deletePasses(this);
BaseBuilder_clearAll(this);
return Base::onDetach(code);
}
Error BaseBuilder::onReinit(CodeHolder& code) noexcept {
// BaseEmitter::onReinit() never fails.
(void)Base::onReinit(code);
BaseBuilder_deletePasses(this);
BaseBuilder_clearAll(this);
return BaseBuilder_initSection(this);
}
// Pass - Construction & Destruction
// =================================
Pass::Pass(const char* name) noexcept
: _name(name) {}
Pass::~Pass() noexcept {}
// Pass - Interface
// ================
// [[pure virtual]]
Error Pass::run(Zone* zone, Logger* logger) {
DebugUtils::unused(zone, logger);
return DebugUtils::errored(kErrorInvalidState);
}
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_BUILDER

File diff suppressed because it is too large Load Diff

View File

@@ -1,38 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_BUILDER_P_H_INCLUDED
#define ASMJIT_CORE_BUILDER_P_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_BUILDER
#include "../core/builder.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_builder
//! \{
static inline void BaseBuilder_assignInlineComment(BaseBuilder* self, BaseNode* node, const char* comment) noexcept {
if (comment) {
node->setInlineComment(static_cast<char*>(self->_codeZone.dup(comment, strlen(comment), true)));
}
}
static inline void BaseBuilder_assignInstState(BaseBuilder* self, InstNode* node, const BaseEmitter::State& state) noexcept {
node->setOptions(state.options);
node->setExtraReg(state.extraReg);
BaseBuilder_assignInlineComment(self, node, state.comment);
}
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_BUILDER
#endif // ASMJIT_CORE_BUILDER_P_H_INCLUDED

View File

@@ -1,135 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_CODEBUFFER_H_INCLUDED
#define ASMJIT_CORE_CODEBUFFER_H_INCLUDED
#include "../core/globals.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
//! Flags used by \ref CodeBuffer.
enum class CodeBufferFlags : uint32_t {
//! No flags.
kNone = 0,
//! Buffer is external (not allocated by asmjit).
kIsExternal = 0x00000001u,
//! Buffer is fixed (cannot be reallocated).
kIsFixed = 0x00000002u
};
ASMJIT_DEFINE_ENUM_FLAGS(CodeBufferFlags)
//! Code or data buffer.
struct CodeBuffer {
//! \name Members
//! \{
//! The content of the buffer (data).
uint8_t* _data;
//! Number of bytes of `data` used.
size_t _size;
//! Buffer capacity (in bytes).
size_t _capacity;
//! Buffer flags.
CodeBufferFlags _flags;
//! \}
//! \name Overloaded Operators
//! \{
//! Returns a reference to the byte at the given `index`.
[[nodiscard]]
inline uint8_t& operator[](size_t index) noexcept {
ASMJIT_ASSERT(index < _size);
return _data[index];
}
//! \overload
[[nodiscard]]
inline const uint8_t& operator[](size_t index) const noexcept {
ASMJIT_ASSERT(index < _size);
return _data[index];
}
//! \}
//! \name Accessors
//! \{
//! Returns code buffer flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CodeBufferFlags flags() const noexcept { return _flags; }
//! Tests whether the code buffer has the given `flag` set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(CodeBufferFlags flag) const noexcept { return Support::test(_flags, flag); }
//! Tests whether this code buffer has a fixed size.
//!
//! Fixed size means that the code buffer is fixed and cannot grow.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFixed() const noexcept { return hasFlag(CodeBufferFlags::kIsFixed); }
//! Tests whether the data in this code buffer is external.
//!
//! External data can only be provided by users, it's never used by AsmJit.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isExternal() const noexcept { return hasFlag(CodeBufferFlags::kIsExternal); }
//! Tests whether the data in this code buffer is allocated (non-null).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAllocated() const noexcept { return _data != nullptr; }
//! Tests whether the code buffer is empty.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return !_size; }
//! Returns the size of the data.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
//! Returns the capacity of the data.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t capacity() const noexcept { return _capacity; }
//! Returns the pointer to the data the buffer references.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* data() noexcept { return _data; }
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const uint8_t* data() const noexcept { return _data; }
//! \}
//! \name Iterators
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* begin() noexcept { return _data; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const uint8_t* begin() const noexcept { return _data; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* end() noexcept { return _data + _size; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const uint8_t* end() const noexcept { return _data + _size; }
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_CODEBUFFER_H_INCLUDED

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,300 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/codeholder.h"
#include "../core/codewriter_p.h"
#include "../arm/armutils.h"
ASMJIT_BEGIN_NAMESPACE
bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const OffsetFormat& format) noexcept {
uint32_t bitCount = format.immBitCount();
uint32_t bitShift = format.immBitShift();
uint32_t discardLsb = format.immDiscardLsb();
// Invalid offset (should not happen).
if (!bitCount || bitCount > format.valueSize() * 8u) {
return false;
}
uint32_t value;
uint32_t u = 0;
bool unsignedLogic = format.type() == OffsetType::kUnsignedOffset;
// First handle all offsets that use additional field for their sign and the offset is encoded as its
// absolute value.
if (format.hasSignBit()) {
u = uint32_t(offset64 >= 0);
if (u == 0) {
offset64 = -offset64;
}
unsignedLogic = true;
}
// First handle all unsigned offset types.
if (unsignedLogic) {
if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0) {
return false;
}
offset64 = int64_t(uint64_t(offset64) >> discardLsb);
}
value = uint32_t(offset64 & Support::lsbMask<uint32_t>(bitCount));
if (value != offset64) {
return false;
}
}
else {
// The rest of OffsetType options are all signed.
if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0) {
return false;
}
offset64 >>= discardLsb;
}
if (!Support::isInt32(offset64)) {
return false;
}
value = uint32_t(int32_t(offset64));
if (!Support::isEncodableOffset32(int32_t(value), bitCount)) {
return false;
}
}
switch (format.type()) {
case OffsetType::kSignedOffset:
case OffsetType::kUnsignedOffset: {
*dst = (value & Support::lsbMask<uint32_t>(bitCount)) << bitShift;
return true;
}
// Opcode: {.....|imm:1|..N.N|......|imm:3|....|imm:8}
case OffsetType::kThumb32_ADR: {
// Sanity checks.
if (format.valueSize() != 4 || bitCount != 12 || bitShift != 0) {
return false;
}
uint32_t imm8 = (value & 0x00FFu);
uint32_t imm3 = (value & 0x0700u) << (12 - 8);
uint32_t imm1 = (value & 0x0800u) << (26 - 11);
uint32_t n = u ^ 1u;
*dst = imm8 | imm3 | imm1 | (n << 21) | (n << 23);
return true;
}
// Opcode: {....|.|imm[22]|imm[19:10]|..|ja|.|jb|imm[9:0]|.}
case OffsetType::kThumb32_BLX:
// The calculation is the same as `B`, but the first LSB bit must be zero, so account for that.
value <<= 1;
[[fallthrough]];
// Opcode: {....|.|imm[23]|imm[20:11]|..|ja|.|jb|imm[10:0]}
case OffsetType::kThumb32_B: {
// Sanity checks.
if (format.valueSize() != 4) {
return false;
}
uint32_t ia = (value & 0x0007FFu);
uint32_t ib = (value & 0x1FF800u) << (16 - 11);
uint32_t ic = (value & 0x800000u) << (26 - 23);
uint32_t ja = ((~value >> 23) ^ (value >> 22)) & 1u;
uint32_t jb = ((~value >> 23) ^ (value >> 21)) & 1u;
*dst = ia | ib | ic | (ja << 14) | (jb << 11);
return true;
}
// Opcode: {....|.|imm[19]|....|imm[16:11]|..|ja|.|jb|imm[10:0]}
case OffsetType::kThumb32_BCond: {
// Sanity checks.
if (format.valueSize() != 4 || bitCount != 20 || bitShift != 0) {
return false;
}
uint32_t ia = (value & 0x0007FFu);
uint32_t ib = (value & 0x01F800u) << (16 - 11);
uint32_t ic = (value & 0x080000u) << (26 - 19);
uint32_t ja = ((~value >> 19) ^ (value >> 22)) & 1u;
uint32_t jb = ((~value >> 19) ^ (value >> 21)) & 1u;
*dst = ia | ib | ic | (ja << 14) | (jb << 11);
return true;
}
case OffsetType::kAArch32_ADR: {
uint32_t encodedImm;
if (!arm::Utils::encodeAArch32Imm(value, &encodedImm)) {
return false;
}
*dst = (Support::bitMask(22) << u) | (encodedImm << bitShift);
return true;
}
case OffsetType::kAArch32_U23_SignedOffset: {
*dst = (value << bitShift) | (u << 23);
return true;
}
case OffsetType::kAArch32_U23_0To3At0_4To7At8: {
// Sanity checks.
if (format.valueSize() != 4 || bitCount != 8 || bitShift != 0) {
return false;
}
uint32_t immLo = (value & 0x0Fu);
uint32_t immHi = (value & 0xF0u) << (8 - 4);
*dst = immLo | immHi | (u << 23);
return true;
}
case OffsetType::kAArch32_1To24At0_0At24: {
// Sanity checks.
if (format.valueSize() != 4 || bitCount != 25 || bitShift != 0) {
return false;
}
uint32_t immLo = (value & 0x0000001u) << 24;
uint32_t immHi = (value & 0x1FFFFFEu) >> 1;
*dst = immLo | immHi;
return true;
}
case OffsetType::kAArch64_ADR:
case OffsetType::kAArch64_ADRP: {
// Sanity checks.
if (format.valueSize() != 4 || bitCount != 21 || bitShift != 5) {
return false;
}
uint32_t immLo = value & 0x3u;
uint32_t immHi = (value >> 2) & Support::lsbMask<uint32_t>(19);
*dst = (immLo << 29) | (immHi << 5);
return true;
}
default:
return false;
}
}
bool CodeWriterUtils::encodeOffset64(uint64_t* dst, int64_t offset64, const OffsetFormat& format) noexcept {
uint32_t bitCount = format.immBitCount();
uint32_t discardLsb = format.immDiscardLsb();
if (!bitCount || bitCount > format.valueSize() * 8u) {
return false;
}
uint64_t value;
// First handle all unsigned offset types.
if (format.type() == OffsetType::kUnsignedOffset) {
if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0) {
return false;
}
offset64 = int64_t(uint64_t(offset64) >> discardLsb);
}
value = uint64_t(offset64) & Support::lsbMask<uint64_t>(bitCount);
if (value != uint64_t(offset64)) {
return false;
}
}
else {
// The rest of OffsetType options are all signed.
if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0) {
return false;
}
offset64 >>= discardLsb;
}
if (!Support::isEncodableOffset64(offset64, bitCount)) {
return false;
}
value = uint64_t(offset64);
}
switch (format.type()) {
case OffsetType::kSignedOffset:
case OffsetType::kUnsignedOffset: {
*dst = (value & Support::lsbMask<uint64_t>(bitCount)) << format.immBitShift();
return true;
}
default:
return false;
}
}
bool CodeWriterUtils::writeOffset(void* dst, int64_t offset64, const OffsetFormat& format) noexcept {
// Offset the destination by ValueOffset so the `dst` points to the
// patched word instead of the beginning of the patched region.
dst = static_cast<char*>(dst) + format.valueOffset();
switch (format.valueSize()) {
case 1: {
uint32_t mask;
if (!encodeOffset32(&mask, offset64, format)) {
return false;
}
Support::store_u8(dst, uint8_t(Support::load_u8(dst) | mask));
return true;
}
case 2: {
uint32_t mask;
if (!encodeOffset32(&mask, offset64, format)) {
return false;
}
Support::storeu_u16_le(dst, uint16_t(Support::loadu_u16_le(dst) | mask));
return true;
}
case 4: {
uint32_t mask;
if (!encodeOffset32(&mask, offset64, format)) {
return false;
}
Support::storeu_u32_le(dst, Support::loadu_u32_le(dst) | mask);
return true;
}
case 8: {
uint64_t mask;
if (!encodeOffset64(&mask, offset64, format)) {
return false;
}
Support::storeu_u64_le(dst, Support::loadu_u64_le(dst) | mask);
return true;
}
default:
return false;
}
}
ASMJIT_END_NAMESPACE

View File

@@ -1,187 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED
#define ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED
#include "../core/assembler.h"
#include "../core/codebuffer.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_assembler
//! \{
struct OffsetFormat;
//! Helper that is used to write into a \ref CodeBuffer held by \ref BaseAssembler.
class CodeWriter {
public:
uint8_t* _cursor;
ASMJIT_INLINE_NODEBUG explicit CodeWriter(BaseAssembler* a) noexcept
: _cursor(a->_bufferPtr) {}
[[nodiscard]]
ASMJIT_INLINE Error ensureSpace(BaseAssembler* a, size_t n) noexcept {
size_t remainingSpace = (size_t)(a->_bufferEnd - _cursor);
if (ASMJIT_UNLIKELY(remainingSpace < n)) {
CodeBuffer& buffer = a->_section->_buffer;
Error err = a->_code->growBuffer(&buffer, n);
if (ASMJIT_UNLIKELY(err))
return a->reportError(err);
_cursor = a->_bufferPtr;
}
return kErrorOk;
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* cursor() const noexcept { return _cursor; }
ASMJIT_INLINE_NODEBUG void setCursor(uint8_t* cursor) noexcept { _cursor = cursor; }
ASMJIT_INLINE_NODEBUG void advance(size_t n) noexcept { _cursor += n; }
[[nodiscard]]
ASMJIT_INLINE size_t offsetFrom(uint8_t* from) const noexcept {
ASMJIT_ASSERT(_cursor >= from);
return (size_t)(_cursor - from);
}
template<typename T>
ASMJIT_INLINE void emit8(T val) noexcept {
using U = std::make_unsigned_t<T>;
_cursor[0] = uint8_t(U(val) & U(0xFF));
_cursor++;
}
template<typename T, typename Y>
ASMJIT_INLINE void emit8If(T val, Y cond) noexcept {
using U = std::make_unsigned_t<T>;
ASMJIT_ASSERT(size_t(cond) <= 1u);
_cursor[0] = uint8_t(U(val) & U(0xFF));
_cursor += size_t(cond);
}
template<typename T>
ASMJIT_INLINE void emit16uLE(T val) noexcept {
using U = std::make_unsigned_t<T>;
Support::storeu_u16_le(_cursor, uint16_t(U(val) & 0xFFFFu));
_cursor += 2;
}
template<typename T>
ASMJIT_INLINE void emit16uBE(T val) noexcept {
using U = std::make_unsigned_t<T>;
Support::storeu_u16_be(_cursor, uint16_t(U(val) & 0xFFFFu));
_cursor += 2;
}
template<typename T>
ASMJIT_INLINE void emit32uLE(T val) noexcept {
using U = std::make_unsigned_t<T>;
Support::storeu_u32_le(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
_cursor += 4;
}
template<typename T>
ASMJIT_INLINE void emit32uBE(T val) noexcept {
using U = std::make_unsigned_t<T>;
Support::storeu_u32_be(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
_cursor += 4;
}
ASMJIT_INLINE void emitData(const void* data, size_t size) noexcept {
ASMJIT_ASSERT(size != 0);
memcpy(_cursor, data, size);
_cursor += size;
}
template<typename T>
ASMJIT_INLINE void emitValueLE(const T& value, size_t size) noexcept {
using U = std::make_unsigned_t<T>;
ASMJIT_ASSERT(size <= sizeof(T));
U v = U(value);
for (uint32_t i = 0; i < size; i++) {
_cursor[i] = uint8_t(v & 0xFFu);
v >>= 8;
}
_cursor += size;
}
template<typename T>
ASMJIT_INLINE void emitValueBE(const T& value, size_t size) noexcept {
using U = std::make_unsigned_t<T>;
ASMJIT_ASSERT(size <= sizeof(T));
U v = U(value);
for (uint32_t i = 0; i < size; i++) {
_cursor[i] = uint8_t(v >> (sizeof(T) - 8));
v <<= 8;
}
_cursor += size;
}
ASMJIT_INLINE void emitZeros(size_t size) noexcept {
ASMJIT_ASSERT(size != 0);
memset(_cursor, 0, size);
_cursor += size;
}
ASMJIT_INLINE void remove8(uint8_t* where) noexcept {
ASMJIT_ASSERT(where < _cursor);
uint8_t* p = where;
while (++p != _cursor)
p[-1] = p[0];
_cursor--;
}
template<typename T>
ASMJIT_INLINE void insert8(uint8_t* where, T val) noexcept {
uint8_t* p = _cursor;
while (p != where) {
p[0] = p[-1];
p--;
}
*p = uint8_t(val & 0xFF);
_cursor++;
}
ASMJIT_INLINE void done(BaseAssembler* a) noexcept {
CodeBuffer& buffer = a->_section->_buffer;
size_t newSize = (size_t)(_cursor - a->_bufferData);
ASMJIT_ASSERT(newSize <= buffer.capacity());
a->_bufferPtr = _cursor;
buffer._size = Support::max(buffer._size, newSize);
}
};
//! Code writer utilities.
namespace CodeWriterUtils {
[[nodiscard]]
bool encodeOffset32(uint32_t* dst, int64_t offset64, const OffsetFormat& format) noexcept;
[[nodiscard]]
bool encodeOffset64(uint64_t* dst, int64_t offset64, const OffsetFormat& format) noexcept;
[[nodiscard]]
bool writeOffset(void* dst, int64_t offset64, const OffsetFormat& format) noexcept;
} // {CodeWriterUtils}
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED

View File

@@ -1,621 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/assembler.h"
#include "../core/builder_p.h"
#include "../core/compiler.h"
#include "../core/cpuinfo.h"
#include "../core/logger.h"
#include "../core/rapass_p.h"
#include "../core/rastack_p.h"
#include "../core/support.h"
#include "../core/type.h"
ASMJIT_BEGIN_NAMESPACE
// GlobalConstPoolPass
// ===================
class GlobalConstPoolPass : public Pass {
ASMJIT_NONCOPYABLE(GlobalConstPoolPass)
public:
using Base = Pass;
GlobalConstPoolPass() noexcept : Pass("GlobalConstPoolPass") {}
Error run(Zone* zone, Logger* logger) override {
DebugUtils::unused(zone, logger);
// Flush the global constant pool.
BaseCompiler* compiler = static_cast<BaseCompiler*>(_cb);
ConstPoolNode* globalConstPool = compiler->_constPools[uint32_t(ConstPoolScope::kGlobal)];
if (globalConstPool) {
compiler->addAfter(globalConstPool, compiler->lastNode());
compiler->_constPools[uint32_t(ConstPoolScope::kGlobal)] = nullptr;
}
return kErrorOk;
}
};
// BaseCompiler - Construction & Destruction
// =========================================
BaseCompiler::BaseCompiler() noexcept
: BaseBuilder(),
_func(nullptr),
_vRegArray(),
_constPools { nullptr, nullptr } {
_emitterType = EmitterType::kCompiler;
_validationFlags = ValidationFlags::kEnableVirtRegs;
}
BaseCompiler::~BaseCompiler() noexcept {}
// BaseCompiler - Function Management
// ==================================
Error BaseCompiler::newFuncNode(FuncNode** out, const FuncSignature& signature) {
*out = nullptr;
// Create FuncNode together with all the required surrounding nodes.
FuncNode* funcNode = nullptr;
ASMJIT_PROPAGATE(_newNodeT<FuncNode>(&funcNode));
ASMJIT_PROPAGATE(newLabelNode(&funcNode->_exitNode));
ASMJIT_PROPAGATE(_newNodeT<SentinelNode>(&funcNode->_end, SentinelType::kFuncEnd));
// Initialize the function's detail info.
Error err = funcNode->detail().init(signature, environment());
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
}
// If the Target guarantees greater stack alignment than required by the calling convention
// then override it as we can prevent having to perform dynamic stack alignment
uint32_t environmentStackAlignment = _environment.stackAlignment();
if (funcNode->_funcDetail._callConv.naturalStackAlignment() < environmentStackAlignment) {
funcNode->_funcDetail._callConv.setNaturalStackAlignment(environmentStackAlignment);
}
// Initialize the function frame.
err = funcNode->_frame.init(funcNode->_funcDetail);
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
}
// Allocate space for function arguments.
funcNode->_args = nullptr;
if (funcNode->argCount() != 0) {
funcNode->_args = _codeZone.alloc<FuncNode::ArgPack>(funcNode->argCount() * sizeof(FuncNode::ArgPack));
if (ASMJIT_UNLIKELY(!funcNode->_args)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
memset(funcNode->_args, 0, funcNode->argCount() * sizeof(FuncNode::ArgPack));
}
ASMJIT_PROPAGATE(registerLabelNode(funcNode));
*out = funcNode;
return kErrorOk;
}
Error BaseCompiler::addFuncNode(FuncNode** out, const FuncSignature& signature) {
State state = _grabState();
ASMJIT_PROPAGATE(newFuncNode(out, signature));
ASMJIT_ASSUME(*out != nullptr);
BaseBuilder_assignInlineComment(this, *out, state.comment);
addFunc(*out);
return kErrorOk;
}
Error BaseCompiler::newFuncRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
uint32_t opCount = !o1.isNone() ? 2u : !o0.isNone() ? 1u : 0u;
FuncRetNode* node = nullptr;
ASMJIT_PROPAGATE(_newNodeT<FuncRetNode>(&node));
ASMJIT_ASSUME(node != nullptr);
node->setOpCount(opCount);
node->setOp(0, o0);
node->setOp(1, o1);
node->resetOpRange(2, node->opCapacity());
*out = node;
return kErrorOk;
}
Error BaseCompiler::addFuncRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
State state = _grabState();
ASMJIT_PROPAGATE(newFuncRetNode(out, o0, o1));
ASMJIT_ASSUME(*out != nullptr);
BaseBuilder_assignInlineComment(this, *out, state.comment);
addNode(*out);
return kErrorOk;
}
FuncNode* BaseCompiler::addFunc(FuncNode* func) {
_func = func;
addNode(func); // Function node.
BaseNode* prev = cursor(); // {CURSOR}.
addNode(func->exitNode()); // Function exit label.
addNode(func->endNode()); // Function end sentinel.
_setCursor(prev);
return func;
}
Error BaseCompiler::endFunc() {
FuncNode* func = _func;
resetState();
if (ASMJIT_UNLIKELY(!func)) {
return reportError(DebugUtils::errored(kErrorInvalidState));
}
// Add the local constant pool at the end of the function (if exists).
ConstPoolNode* localConstPool = _constPools[uint32_t(ConstPoolScope::kLocal)];
if (localConstPool) {
setCursor(func->endNode()->prev());
addNode(localConstPool);
_constPools[uint32_t(ConstPoolScope::kLocal)] = nullptr;
}
// Mark as finished.
_func = nullptr;
SentinelNode* end = func->endNode();
setCursor(end);
return kErrorOk;
}
// BaseCompiler - Function Invocation
// ==================================
Error BaseCompiler::newInvokeNode(InvokeNode** out, InstId instId, const Operand_& o0, const FuncSignature& signature) {
InvokeNode* node = nullptr;
ASMJIT_PROPAGATE(_newNodeT<InvokeNode>(&node, instId, InstOptions::kNone));
node->setOpCount(1);
node->setOp(0, o0);
node->resetOpRange(1, node->opCapacity());
Error err = node->detail().init(signature, environment());
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
}
// Skip the allocation if there are no arguments.
uint32_t argCount = signature.argCount();
if (argCount) {
node->_args = _codeZone.alloc<InvokeNode::OperandPack>(argCount * sizeof(InvokeNode::OperandPack));
if (!node->_args) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
memset(node->_args, 0, argCount * sizeof(InvokeNode::OperandPack));
}
*out = node;
return kErrorOk;
}
Error BaseCompiler::addInvokeNode(InvokeNode** out, InstId instId, const Operand_& o0, const FuncSignature& signature) {
State state = _grabState();
ASMJIT_PROPAGATE(newInvokeNode(out, instId, o0, signature));
ASMJIT_ASSUME(*out != nullptr);
BaseBuilder_assignInstState(this, *out, state);
addNode(*out);
return kErrorOk;
}
// BaseCompiler - Virtual Registers
// ================================
Error BaseCompiler::newVirtReg(VirtReg** out, TypeId typeId, OperandSignature signature, const char* name) {
*out = nullptr;
uint32_t index = _vRegArray.size();
if (ASMJIT_UNLIKELY(index >= uint32_t(Operand::kVirtIdCount))) {
return reportError(DebugUtils::errored(kErrorTooManyVirtRegs));
}
if (ASMJIT_UNLIKELY(_vRegArray.willGrow(&_allocator) != kErrorOk)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
void* vRegPtr = _codeZone.alloc(Zone::alignedSizeOf<VirtReg>());
if (ASMJIT_UNLIKELY(!vRegPtr)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
uint32_t size = TypeUtils::sizeOf(typeId);
uint32_t alignment = Support::min<uint32_t>(size, 64);
VirtReg* vReg = new(Support::PlacementNew{vRegPtr}) VirtReg(signature, Operand::indexToVirtId(index), size, alignment, typeId);
#ifndef ASMJIT_NO_LOGGING
if (name && name[0] != '\0') {
vReg->_name.setData(&_codeZone, name, SIZE_MAX);
}
#else
DebugUtils::unused(name);
#endif
_vRegArray.appendUnsafe(vReg);
*out = vReg;
return kErrorOk;
}
Error BaseCompiler::_newReg(Reg* out, TypeId typeId, const char* name) {
OperandSignature regSignature;
out->reset();
Error err = ArchUtils::typeIdToRegSignature(arch(), typeId, &typeId, &regSignature);
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
}
VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regSignature, name));
ASMJIT_ASSUME(vReg != nullptr);
out->_initReg(regSignature, vReg->id());
return kErrorOk;
}
Error BaseCompiler::_newRegFmt(Reg* out, TypeId typeId, const char* fmt, ...) {
va_list ap;
StringTmp<256> sb;
va_start(ap, fmt);
sb.appendVFormat(fmt, ap);
va_end(ap);
return _newReg(out, typeId, sb.data());
}
Error BaseCompiler::_newReg(Reg* out, const Reg& ref, const char* name) {
out->reset();
OperandSignature regSignature;
TypeId typeId;
if (isVirtRegValid(ref)) {
VirtReg* vRef = virtRegByReg(ref);
typeId = vRef->typeId();
// NOTE: It's possible to cast one register type to another if it's the same register group. However, VirtReg
// always contains the TypeId that was used to create the register. This means that in some cases we may end
// up having different size of `ref` and `vRef`. In such case we adjust the TypeId to match the `ref` register
// type instead of the original register type, which should be the expected behavior.
uint32_t typeSize = TypeUtils::sizeOf(typeId);
uint32_t refSize = ref.size();
if (typeSize != refSize) {
if (TypeUtils::isInt(typeId)) {
// GP register - change TypeId to match `ref`, but keep sign of `vRef`.
switch (refSize) {
case 1: typeId = TypeId(uint32_t(TypeId::kInt8 ) | (uint32_t(typeId) & 1)); break;
case 2: typeId = TypeId(uint32_t(TypeId::kInt16) | (uint32_t(typeId) & 1)); break;
case 4: typeId = TypeId(uint32_t(TypeId::kInt32) | (uint32_t(typeId) & 1)); break;
case 8: typeId = TypeId(uint32_t(TypeId::kInt64) | (uint32_t(typeId) & 1)); break;
default: typeId = TypeId::kVoid; break;
}
}
else if (TypeUtils::isMmx(typeId)) {
// MMX register - always use 64-bit.
typeId = TypeId::kMmx64;
}
else if (TypeUtils::isMask(typeId)) {
// Mask register - change TypeId to match `ref` size.
switch (refSize) {
case 1: typeId = TypeId::kMask8; break;
case 2: typeId = TypeId::kMask16; break;
case 4: typeId = TypeId::kMask32; break;
case 8: typeId = TypeId::kMask64; break;
default: typeId = TypeId::kVoid; break;
}
}
else {
// Vector register - change TypeId to match `ref` size, keep vector metadata.
TypeId scalarTypeId = TypeUtils::scalarOf(typeId);
switch (refSize) {
case 16: typeId = TypeUtils::scalarToVector(scalarTypeId, TypeId::_kVec128Start); break;
case 32: typeId = TypeUtils::scalarToVector(scalarTypeId, TypeId::_kVec256Start); break;
case 64: typeId = TypeUtils::scalarToVector(scalarTypeId, TypeId::_kVec512Start); break;
default: typeId = TypeId::kVoid; break;
}
}
if (typeId == TypeId::kVoid) {
return reportError(DebugUtils::errored(kErrorInvalidState));
}
}
}
else {
typeId = RegUtils::typeIdOf(ref.regType());
}
Error err = ArchUtils::typeIdToRegSignature(arch(), typeId, &typeId, &regSignature);
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
}
VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regSignature, name));
ASMJIT_ASSUME(vReg != nullptr);
out->_initReg(regSignature, vReg->id());
return kErrorOk;
}
Error BaseCompiler::_newRegFmt(Reg* out, const Reg& ref, const char* fmt, ...) {
va_list ap;
StringTmp<256> sb;
va_start(ap, fmt);
sb.appendVFormat(fmt, ap);
va_end(ap);
return _newReg(out, ref, sb.data());
}
Error BaseCompiler::_newStack(BaseMem* out, uint32_t size, uint32_t alignment, const char* name) {
out->reset();
if (ASMJIT_UNLIKELY(Support::bool_or(size == 0, !Support::isZeroOrPowerOf2(alignment)))) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
if (alignment == 0u) {
alignment = 1u;
}
if (alignment > 64u) {
alignment = 64u;
}
VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, TypeId::kVoid, OperandSignature{0}, name));
ASMJIT_ASSUME(vReg != nullptr);
vReg->_virtSize = size;
vReg->_isStack = true;
vReg->_alignment = uint8_t(alignment);
// Set the memory operand to GPD/GPQ and its id to VirtReg.
*out = BaseMem(OperandSignature::fromOpType(OperandType::kMem) |
OperandSignature::fromMemBaseType(_gpSignature.regType()) |
OperandSignature::fromBits(OperandSignature::kMemRegHomeFlag),
vReg->id(), 0, 0);
return kErrorOk;
}
Error BaseCompiler::setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment) {
if (!isVirtIdValid(virtId)) {
return DebugUtils::errored(kErrorInvalidVirtId);
}
if (!Support::isZeroOrPowerOf2(newAlignment)) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
if (newAlignment > 64u) {
newAlignment = 64u;
}
VirtReg* vReg = virtRegById(virtId);
if (newSize) {
vReg->_virtSize = newSize;
}
if (newAlignment) {
vReg->_alignment = uint8_t(newAlignment);
}
// This is required if the RAPass is already running. There is a chance that a stack-slot has been already
// allocated and in that case it has to be updated as well, otherwise we would allocate wrong amount of memory.
RAWorkReg* workReg = vReg->_workReg;
if (workReg && workReg->_stackSlot) {
workReg->_stackSlot->_size = vReg->_virtSize;
workReg->_stackSlot->_alignment = vReg->_alignment;
}
return kErrorOk;
}
Error BaseCompiler::_newConst(BaseMem* out, ConstPoolScope scope, const void* data, size_t size) {
out->reset();
if (uint32_t(scope) > 1) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
if (!_constPools[uint32_t(scope)]) {
ASMJIT_PROPAGATE(newConstPoolNode(&_constPools[uint32_t(scope)]));
}
ConstPoolNode* pool = _constPools[uint32_t(scope)];
size_t off;
Error err = pool->add(data, size, off);
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
}
*out = BaseMem(OperandSignature::fromOpType(OperandType::kMem) |
OperandSignature::fromMemBaseType(RegType::kLabelTag) |
OperandSignature::fromSize(uint32_t(size)),
pool->labelId(), 0, int32_t(off));
return kErrorOk;
}
void BaseCompiler::rename(const Reg& reg, const char* fmt, ...) {
if (!reg.isVirtReg()) return;
VirtReg* vReg = virtRegById(reg.id());
if (!vReg) {
return;
}
if (fmt && fmt[0] != '\0') {
char buf[128];
va_list ap;
va_start(ap, fmt);
vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
va_end(ap);
vReg->_name.setData(&_codeZone, buf, SIZE_MAX);
}
}
// BaseCompiler - Jump Annotations
// ===============================
Error BaseCompiler::newJumpNode(JumpNode** out, InstId instId, InstOptions instOptions, const Operand_& o0, JumpAnnotation* annotation) {
JumpNode* node = _codeZone.alloc<JumpNode>();
*out = node;
if (ASMJIT_UNLIKELY(!node)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
uint32_t opCount = 1;
node = new(Support::PlacementNew{node}) JumpNode(instId, instOptions, opCount, annotation);
node->setOp(0, o0);
node->resetOpRange(opCount, JumpNode::kBaseOpCapacity);
return kErrorOk;
}
Error BaseCompiler::emitAnnotatedJump(InstId instId, const Operand_& o0, JumpAnnotation* annotation) {
State state = _grabState();
JumpNode* node;
ASMJIT_PROPAGATE(newJumpNode(&node, instId, state.options, o0, annotation));
node->setExtraReg(state.extraReg);
BaseBuilder_assignInlineComment(this, node, state.comment);
addNode(node);
return kErrorOk;
}
JumpAnnotation* BaseCompiler::newJumpAnnotation() {
if (_jumpAnnotations.grow(&_allocator, 1) != kErrorOk) {
reportError(DebugUtils::errored(kErrorOutOfMemory));
return nullptr;
}
uint32_t id = _jumpAnnotations.size();
JumpAnnotation* jumpAnnotation = _codeZone.newT<JumpAnnotation>(this, id);
if (!jumpAnnotation) {
reportError(DebugUtils::errored(kErrorOutOfMemory));
return nullptr;
}
_jumpAnnotations.appendUnsafe(jumpAnnotation);
return jumpAnnotation;
}
// BaseCompiler - Events
// =====================
static ASMJIT_INLINE void BaseCompiler_clear(BaseCompiler* self) noexcept {
self->_func = nullptr;
self->_constPools[uint32_t(ConstPoolScope::kLocal)] = nullptr;
self->_constPools[uint32_t(ConstPoolScope::kGlobal)] = nullptr;
self->_vRegArray.reset();
}
static ASMJIT_INLINE Error BaseCompiler_initDefaultPasses(BaseCompiler* self) noexcept {
return self->addPassT<GlobalConstPoolPass>();
}
Error BaseCompiler::onAttach(CodeHolder& code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
Error err = BaseCompiler_initDefaultPasses(this);
if (ASMJIT_UNLIKELY(err)) {
onDetach(code);
return err;
}
return kErrorOk;
}
Error BaseCompiler::onDetach(CodeHolder& code) noexcept {
BaseCompiler_clear(this);
return Base::onDetach(code);
}
Error BaseCompiler::onReinit(CodeHolder& code) noexcept {
BaseCompiler_clear(this);
Error err = Base::onReinit(code);
if (ASMJIT_LIKELY(err == kErrorOk)) {
err = BaseCompiler_initDefaultPasses(this);
if (ASMJIT_UNLIKELY(err)) {
onDetach(code);
return err;
}
}
return err;
}
// FuncPass - Construction & Destruction
// =====================================
FuncPass::FuncPass(const char* name) noexcept
: Pass(name) {}
// FuncPass - Run
// ==============
Error FuncPass::run(Zone* zone, Logger* logger) {
BaseNode* node = cb()->firstNode();
while (node) {
if (node->type() == NodeType::kFunc) {
FuncNode* func = node->as<FuncNode>();
node = func->endNode();
ASMJIT_PROPAGATE(runOnFunction(zone, logger, func));
}
// Find a function by skipping all nodes that are not `NodeType::kFunc`.
do {
node = node->next();
} while (node && node->type() != NodeType::kFunc);
}
return kErrorOk;
}
// [[pure virtual]]
Error FuncPass::runOnFunction(Zone* zone, Logger* logger, FuncNode* func) {
DebugUtils::unused(zone, logger, func);
return DebugUtils::errored(kErrorInvalidState);
}
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER

View File

@@ -1,780 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_COMPILER_H_INCLUDED
#define ASMJIT_CORE_COMPILER_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/assembler.h"
#include "../core/builder.h"
#include "../core/constpool.h"
#include "../core/compilerdefs.h"
#include "../core/func.h"
#include "../core/inst.h"
#include "../core/operand.h"
#include "../core/support.h"
#include "../core/zone.h"
#include "../core/zonevector.h"
ASMJIT_BEGIN_NAMESPACE
class JumpAnnotation;
class JumpNode;
class FuncNode;
class FuncRetNode;
class InvokeNode;
//! \addtogroup asmjit_compiler
//! \{
//! Code emitter that uses virtual registers and performs register allocation.
//!
//! Compiler is a high-level code-generation tool that provides register allocation and automatic handling of function
//! calling conventions. It was primarily designed for merging multiple parts of code into a function without worrying
//! about registers and function calling conventions.
//!
//! BaseCompiler can be used, with a minimum effort, to handle 32-bit and 64-bit code generation within a single code
//! base.
//!
//! BaseCompiler is based on BaseBuilder and contains all the features it provides. It means that the code it stores
//! can be modified (removed, added, injected) and analyzed. When the code is finalized the compiler can emit the code
//! into an Assembler to translate the abstract representation into a machine code.
//!
//! Check out architecture specific compilers for more details and examples:
//!
//! - \ref x86::Compiler - X86/X64 compiler implementation.
//! - \ref a64::Compiler - AArch64 compiler implementation.
class ASMJIT_VIRTAPI BaseCompiler : public BaseBuilder {
public:
ASMJIT_NONCOPYABLE(BaseCompiler)
using Base = BaseBuilder;
//! \name Members
//! \{
//! Current function.
FuncNode* _func;
//! Stores array of `VirtReg` pointers.
ZoneVector<VirtReg*> _vRegArray;
//! Stores jump annotations.
ZoneVector<JumpAnnotation*> _jumpAnnotations;
//! Local and global constant pools.
//!
//! Local constant pool is flushed with each function, global constant pool is flushed only by \ref finalize().
ConstPoolNode* _constPools[2];
//! \}
//! \name Construction & Destruction
//! \{
//! Creates a new `BaseCompiler` instance.
ASMJIT_API BaseCompiler() noexcept;
//! Destroys the `BaseCompiler` instance.
ASMJIT_API ~BaseCompiler() noexcept override;
//! \}
//! \name Function Management
//! \{
//! Creates a new \ref FuncNode.
ASMJIT_API Error newFuncNode(FuncNode** ASMJIT_NONNULL(out), const FuncSignature& signature);
//! Creates a new \ref FuncNode adds it to the instruction stream.
ASMJIT_API Error addFuncNode(FuncNode** ASMJIT_NONNULL(out), const FuncSignature& signature);
//! Creates a new \ref FuncRetNode.
ASMJIT_API Error newFuncRetNode(FuncRetNode** ASMJIT_NONNULL(out), const Operand_& o0, const Operand_& o1);
//! Creates a new \ref FuncRetNode and adds it to the instruction stream.
ASMJIT_API Error addFuncRetNode(FuncRetNode** ASMJIT_NONNULL(out), const Operand_& o0, const Operand_& o1);
//! Returns the current function.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncNode* func() const noexcept { return _func; }
//! Creates a new \ref FuncNode with the given `signature` and returns it.
inline FuncNode* newFunc(const FuncSignature& signature) {
FuncNode* node;
newFuncNode(&node, signature);
return node;
}
//! Creates a new \ref FuncNode with the given `signature`, adds it to the instruction stream by using
//! the \ref addFunc(FuncNode*) overload, and returns it.
inline FuncNode* addFunc(const FuncSignature& signature) {
FuncNode* node;
addFuncNode(&node, signature);
return node;
}
//! Adds a function `node` to the instruction stream.
ASMJIT_API FuncNode* addFunc(FuncNode* ASMJIT_NONNULL(func));
//! Emits a sentinel that marks the end of the current function.
ASMJIT_API Error endFunc();
inline Error addRet(const Operand_& o0, const Operand_& o1) {
FuncRetNode* node;
return addFuncRetNode(&node, o0, o1);
}
//! \}
//! \name Function Invocation
//! \{
//! Creates a new \ref InvokeNode.
ASMJIT_API Error newInvokeNode(InvokeNode** ASMJIT_NONNULL(out), InstId instId, const Operand_& o0, const FuncSignature& signature);
//! Creates a new \ref InvokeNode and adds it to the instruction stream.
ASMJIT_API Error addInvokeNode(InvokeNode** ASMJIT_NONNULL(out), InstId instId, const Operand_& o0, const FuncSignature& signature);
//! \}
//! \name Virtual Registers
//! \{
//! Creates a new virtual register representing the given `typeId` and `signature`.
//!
//! \note This function is public, but it's not generally recommended to be used by AsmJit users, use architecture
//! specific `newReg()` functionality instead or functions like \ref _newReg() and \ref _newRegFmt().
ASMJIT_API Error newVirtReg(VirtReg** ASMJIT_NONNULL(out), TypeId typeId, OperandSignature signature, const char* name);
//! Creates a new virtual register of the given `typeId` and stores it to `out` operand.
ASMJIT_API Error _newReg(Reg* ASMJIT_NONNULL(out), TypeId typeId, const char* name = nullptr);
//! Creates a new virtual register of the given `typeId` and stores it to `out` operand.
//!
//! \note This version accepts a snprintf() format `fmt` followed by a variadic arguments.
ASMJIT_API Error _newRegFmt(Reg* ASMJIT_NONNULL(out), TypeId typeId, const char* fmt, ...);
//! \overload
inline Error _newRegFmt(Reg* ASMJIT_NONNULL(out), TypeId typeId) { return _newRegFmt(out, typeId, nullptr); }
//! Creates a new virtual register compatible with the provided reference register `ref`.
ASMJIT_API Error _newReg(Reg* ASMJIT_NONNULL(out), const Reg& ref, const char* name = nullptr);
//! Creates a new virtual register compatible with the provided reference register `ref`.
//!
//! \note This version accepts a snprintf() format `fmt` followed by a variadic arguments.
ASMJIT_API Error _newRegFmt(Reg* ASMJIT_NONNULL(out), const Reg& ref, const char* fmt, ...);
//! Tests whether the given `id` is a valid virtual register id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isVirtIdValid(uint32_t id) const noexcept {
uint32_t index = Operand::virtIdToIndex(id);
return index < _vRegArray.size();
}
//! Tests whether the given `reg` is a virtual register having a valid id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isVirtRegValid(const Reg& reg) const noexcept {
return isVirtIdValid(reg.id());
}
//! Returns \ref VirtReg associated with the given `id`.
[[nodiscard]]
inline VirtReg* virtRegById(uint32_t id) const noexcept {
ASMJIT_ASSERT(isVirtIdValid(id));
return _vRegArray[Operand::virtIdToIndex(id)];
}
//! Returns \ref VirtReg associated with the given `reg`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VirtReg* virtRegByReg(const Reg& reg) const noexcept { return virtRegById(reg.id()); }
//! Returns \ref VirtReg associated with the given virtual register `index`.
//!
//! \note This is not the same as virtual register id. The conversion between id and its index is implemented
//! by \ref Operand_::virtIdToIndex() and \ref Operand_::indexToVirtId() functions.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VirtReg* virtRegByIndex(uint32_t index) const noexcept { return _vRegArray[index]; }
//! Returns an array of all virtual registers managed by the Compiler.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<VirtReg*>& virtRegs() const noexcept { return _vRegArray; }
//! \name Stack
//! \{
//! Creates a new stack of the given `size` and `alignment` and stores it to `out`.
//!
//! \note `name` can be used to give the stack a name, for debugging purposes.
ASMJIT_API Error _newStack(BaseMem* ASMJIT_NONNULL(out), uint32_t size, uint32_t alignment, const char* name = nullptr);
//! Updates the stack size of a stack created by `_newStack()` by its `virtId`.
ASMJIT_API Error setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment = 0);
//! Updates the stack size of a stack created by `_newStack()`.
ASMJIT_INLINE_NODEBUG Error setStackSize(const BaseMem& mem, uint32_t newSize, uint32_t newAlignment = 0) {
return setStackSize(mem.id(), newSize, newAlignment);
}
//! \}
//! \name Constants
//! \{
//! Creates a new constant of the given `scope` (see \ref ConstPoolScope).
//!
//! This function adds a constant of the given `size` to the built-in \ref ConstPool and stores the reference to that
//! constant to the `out` operand.
ASMJIT_API Error _newConst(BaseMem* ASMJIT_NONNULL(out), ConstPoolScope scope, const void* data, size_t size);
//! \}
//! \name Miscellaneous
//! \{
//! Rename the given virtual register `reg` to a formatted string `fmt`.
ASMJIT_API void rename(const Reg& reg, const char* fmt, ...);
//! \}
//! \name Jump Annotations
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<JumpAnnotation*>& jumpAnnotations() const noexcept {
return _jumpAnnotations;
}
ASMJIT_API Error newJumpNode(JumpNode** ASMJIT_NONNULL(out), InstId instId, InstOptions instOptions, const Operand_& o0, JumpAnnotation* annotation);
ASMJIT_API Error emitAnnotatedJump(InstId instId, const Operand_& o0, JumpAnnotation* annotation);
//! Returns a new `JumpAnnotation` instance, which can be used to aggregate possible targets of a jump where the
//! target is not a label, for example to implement jump tables.
[[nodiscard]]
ASMJIT_API JumpAnnotation* newJumpAnnotation();
//! \}
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder& code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder& code) noexcept override;
ASMJIT_API Error onReinit(CodeHolder& code) noexcept override;
//! \}
};
//! Jump annotation used to annotate jumps.
//!
//! \ref BaseCompiler allows to emit jumps where the target is either register or memory operand. Such jumps cannot be
//! trivially inspected, so instead of doing heuristics AsmJit allows to annotate such jumps with possible targets.
//! Register allocator then uses the annotation to construct control-flow, which is then used by liveness analysis and
//! other tools to prepare ground for register allocation.
class JumpAnnotation {
public:
ASMJIT_NONCOPYABLE(JumpAnnotation)
//! \name Members
//! \{
//! Compiler that owns this JumpAnnotation.
BaseCompiler* _compiler;
//! Annotation identifier.
uint32_t _annotationId;
//! Vector of label identifiers, see \ref labelIds().
ZoneVector<uint32_t> _labelIds;
//! \}
//! \name Construction & Destruction
//! \{
ASMJIT_INLINE_NODEBUG JumpAnnotation(BaseCompiler* ASMJIT_NONNULL(compiler), uint32_t annotationId) noexcept
: _compiler(compiler),
_annotationId(annotationId) {}
//! \}
//! \name Accessors
//! \{
//! Returns the compiler that owns this JumpAnnotation.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseCompiler* compiler() const noexcept { return _compiler; }
//! Returns the annotation id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t annotationId() const noexcept { return _annotationId; }
//! Returns a vector of label identifiers that lists all targets of the jump.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<uint32_t>& labelIds() const noexcept { return _labelIds; }
//! Tests whether the given `label` is a target of this JumpAnnotation.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasLabel(const Label& label) const noexcept { return hasLabelId(label.id()); }
//! Tests whether the given `labelId` is a target of this JumpAnnotation.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasLabelId(uint32_t labelId) const noexcept { return _labelIds.contains(labelId); }
//! \}
//! \name Annotation Building API
//! \{
//! Adds the `label` to the list of targets of this JumpAnnotation.
ASMJIT_INLINE_NODEBUG Error addLabel(const Label& label) noexcept { return addLabelId(label.id()); }
//! Adds the `labelId` to the list of targets of this JumpAnnotation.
ASMJIT_INLINE_NODEBUG Error addLabelId(uint32_t labelId) noexcept { return _labelIds.append(&_compiler->_allocator, labelId); }
//! \}
};
//! Jump instruction with \ref JumpAnnotation.
//!
//! \note This node should be only used to represent jump where the jump target cannot be deduced by examining
//! instruction operands. For example if the jump target is register or memory location. This pattern is often
//! used to perform indirect jumps that use jump table, e.g. to implement `switch{}` statement.
class JumpNode : public InstNodeWithOperands<InstNode::kBaseOpCapacity> {
public:
ASMJIT_NONCOPYABLE(JumpNode)
//! \name Members
//! \{
JumpAnnotation* _annotation;
//! \}
//! \name Construction & Destruction
//! \{
inline JumpNode(InstId instId, InstOptions options, uint32_t opCount, JumpAnnotation* annotation) noexcept
: InstNodeWithOperands(instId, options, opCount),
_annotation(annotation) {
_setType(NodeType::kJump);
}
//! \}
//! \name Accessors
//! \{
//! Tests whether this JumpNode has associated a \ref JumpAnnotation.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAnnotation() const noexcept { return _annotation != nullptr; }
//! Returns the \ref JumpAnnotation associated with this jump, or `nullptr`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG JumpAnnotation* annotation() const noexcept { return _annotation; }
//! Sets the \ref JumpAnnotation associated with this jump to `annotation`.
ASMJIT_INLINE_NODEBUG void setAnnotation(JumpAnnotation* annotation) noexcept { _annotation = annotation; }
//! \}
};
//! Function node represents a function used by \ref BaseCompiler.
//!
//! A function is composed of the following:
//!
//! - Function entry, \ref FuncNode acts as a label, so the entry is implicit. To get the entry, simply use
//! \ref FuncNode::label(), which is the same as \ref LabelNode::label().
//!
//! - Function exit, which is represented by \ref FuncNode::exitNode(). A helper function
//! \ref FuncNode::exitLabel() exists and returns an exit label instead of node.
//!
//! - Function \ref FuncNode::endNode() sentinel. This node marks the end of a function - there should be no
//! code that belongs to the function after this node, but the Compiler doesn't enforce that at the moment.
//!
//! - Function detail, see \ref FuncNode::detail().
//!
//! - Function frame, see \ref FuncNode::frame().
//!
//! - Function arguments mapped to virtual registers, see \ref FuncNode::argPacks().
//!
//! In a node list, the function and its body looks like the following:
//!
//! \code{.unparsed}
//! [...] - Anything before the function.
//!
//! [FuncNode] - Entry point of the function, acts as a label as well.
//! <Prolog> - Prolog inserted by the register allocator.
//! {...} - Function body - user code basically.
//! [ExitLabel] - Exit label
//! <Epilog> - Epilog inserted by the register allocator.
//! <Return> - Return inserted by the register allocator.
//! {...} - Can contain data or user code (error handling, special cases, ...).
//! [FuncEnd] - End sentinel
//!
//! [...] - Anything after the function.
//! \endcode
//!
//! When a function is added to the instruction stream by \ref BaseCompiler::addFunc() it actually inserts 3 nodes
//! (FuncNode, ExitLabel, and FuncEnd) and sets the current cursor to be FuncNode. When \ref BaseCompiler::endFunc()
//! is called the cursor is set to FuncEnd. This guarantees that user can use ExitLabel as a marker after additional
//! code or data can be placed, which is a common practice.
class FuncNode : public LabelNode {
public:
ASMJIT_NONCOPYABLE(FuncNode)
//! Arguments pack.
struct ArgPack {
RegOnly _data[Globals::kMaxValuePack];
inline void reset() noexcept {
for (size_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++)
_data[valueIndex].reset();
}
inline RegOnly& operator[](size_t valueIndex) noexcept { return _data[valueIndex]; }
inline const RegOnly& operator[](size_t valueIndex) const noexcept { return _data[valueIndex]; }
};
//! \name Members
//! \{
//! Function detail.
FuncDetail _funcDetail;
//! Function frame.
FuncFrame _frame;
//! Function exit label.
LabelNode* _exitNode;
//! Function end (sentinel).
SentinelNode* _end;
//! Argument packs.
ArgPack* _args;
//! \}
//! \name Construction & Destruction
//! \{
//! Creates a new `FuncNode` instance.
//!
//! Always use `BaseCompiler::addFunc()` to create a new `FuncNode`.
inline explicit FuncNode(uint32_t labelId = Globals::kInvalidId) noexcept
: LabelNode(labelId),
_funcDetail(),
_frame(),
_exitNode(nullptr),
_end(nullptr),
_args(nullptr) {
_setType(NodeType::kFunc);
}
//! \}
//! \{
//! \name Accessors
//! Returns function exit `LabelNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG LabelNode* exitNode() const noexcept { return _exitNode; }
//! Returns function exit label.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label exitLabel() const noexcept { return _exitNode->label(); }
//! Returns "End of Func" sentinel node.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SentinelNode* endNode() const noexcept { return _end; }
//! Returns function detail.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncDetail& detail() noexcept { return _funcDetail; }
//! Returns function detail.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FuncDetail& detail() const noexcept { return _funcDetail; }
//! Returns function frame.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncFrame& frame() noexcept { return _frame; }
//! Returns function frame.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FuncFrame& frame() const noexcept { return _frame; }
//! Returns function attributes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncAttributes attributes() const noexcept { return _frame.attributes(); }
//! Adds `attrs` to the function attributes.
ASMJIT_INLINE_NODEBUG void addAttributes(FuncAttributes attrs) noexcept { _frame.addAttributes(attrs); }
//! Returns arguments count.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
//! Returns argument packs.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ArgPack* argPacks() const noexcept { return _args; }
//! Tests whether the function has a return value.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRet() const noexcept { return _funcDetail.hasRet(); }
//! Returns argument pack at `argIndex`.
[[nodiscard]]
inline ArgPack& argPack(size_t argIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex];
}
//! Sets argument at `argIndex`.
inline void setArg(size_t argIndex, const Reg& vReg) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][0].init(vReg);
}
//! \overload
inline void setArg(size_t argIndex, const RegOnly& vReg) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][0].init(vReg);
}
//! Sets argument at `argIndex` and `valueIndex`.
inline void setArg(size_t argIndex, size_t valueIndex, const Reg& vReg) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][valueIndex].init(vReg);
}
//! \overload
inline void setArg(size_t argIndex, size_t valueIndex, const RegOnly& vReg) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][valueIndex].init(vReg);
}
//! Resets argument pack at `argIndex`.
inline void resetArg(size_t argIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex].reset();
}
//! Resets argument pack at `argIndex`.
inline void resetArg(size_t argIndex, size_t valueIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][valueIndex].reset();
}
//! \}
};
//! Function return, used by \ref BaseCompiler.
class FuncRetNode : public InstNodeWithOperands<InstNode::kBaseOpCapacity> {
public:
ASMJIT_NONCOPYABLE(FuncRetNode)
//! \name Construction & Destruction
//! \{
//! Creates a new `FuncRetNode` instance.
inline FuncRetNode() noexcept
: InstNodeWithOperands(BaseInst::kIdAbstract, InstOptions::kNone, 0) {
_nodeType = NodeType::kFuncRet;
}
//! \}
};
//! Function invocation, used by \ref BaseCompiler.
class InvokeNode : public InstNodeWithOperands<InstNode::kBaseOpCapacity> {
public:
ASMJIT_NONCOPYABLE(InvokeNode)
//! Operand pack provides multiple operands that can be associated with a single return value of function
//! argument. Sometimes this is necessary to express an argument or return value that requires multiple
//! registers, for example 64-bit value in 32-bit mode or passing / returning homogeneous data structures.
struct OperandPack {
//! Operands.
Operand_ _data[Globals::kMaxValuePack];
//! Reset the pack by resetting all operands in the pack.
inline void reset() noexcept {
for (size_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++)
_data[valueIndex].reset();
}
//! Returns an operand at the given `valueIndex`.
[[nodiscard]]
inline Operand& operator[](size_t valueIndex) noexcept {
ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack);
return _data[valueIndex].as<Operand>();
}
//! Returns an operand at the given `valueIndex` (const).
[[nodiscard]]
const inline Operand& operator[](size_t valueIndex) const noexcept {
ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack);
return _data[valueIndex].as<Operand>();
}
};
//! \name Members
//! \{
//! Function detail.
FuncDetail _funcDetail;
//! Function return value(s).
OperandPack _rets;
//! Function arguments.
OperandPack* _args;
//! \}
//! \name Construction & Destruction
//! \{
//! Creates a new `InvokeNode` instance.
inline InvokeNode(InstId instId, InstOptions options) noexcept
: InstNodeWithOperands(instId, options, 0),
_funcDetail(),
_args(nullptr) {
_setType(NodeType::kInvoke);
_resetOps();
_rets.reset();
_addFlags(NodeFlags::kIsRemovable);
}
//! \}
//! \name Accessors
//! \{
//! Sets the function signature.
[[nodiscard]]
inline Error init(const FuncSignature& signature, const Environment& environment) noexcept {
return _funcDetail.init(signature, environment);
}
//! Returns the function detail.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncDetail& detail() noexcept { return _funcDetail; }
//! Returns the function detail.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FuncDetail& detail() const noexcept { return _funcDetail; }
//! Returns the target operand.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Operand& target() noexcept { return op(0); }
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const Operand& target() const noexcept { return op(0); }
//! Returns the number of function return values.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRet() const noexcept { return _funcDetail.hasRet(); }
//! Returns the number of function arguments.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
//! Returns operand pack representing function return value(s).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG OperandPack& retPack() noexcept { return _rets; }
//! Returns operand pack representing function return value(s).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const OperandPack& retPack() const noexcept { return _rets; }
//! Returns the return value at the given `valueIndex`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Operand& ret(size_t valueIndex = 0) noexcept { return _rets[valueIndex]; }
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const Operand& ret(size_t valueIndex = 0) const noexcept { return _rets[valueIndex]; }
//! Returns operand pack representing function return value(s).
[[nodiscard]]
inline OperandPack& argPack(size_t argIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex];
}
//! \overload
[[nodiscard]]
inline const OperandPack& argPack(size_t argIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex];
}
//! Returns a function argument at the given `argIndex`.
[[nodiscard]]
inline Operand& arg(size_t argIndex, size_t valueIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex][valueIndex];
}
//! \overload
[[nodiscard]]
inline const Operand& arg(size_t argIndex, size_t valueIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex][valueIndex];
}
//! Sets the function return value at `i` to `op`.
inline void _setRet(size_t valueIndex, const Operand_& op) noexcept { _rets[valueIndex] = op; }
//! Sets the function argument at `i` to `op`.
inline void _setArg(size_t argIndex, size_t valueIndex, const Operand_& op) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][valueIndex] = op;
}
//! Sets the function return value at `valueIndex` to `reg`.
ASMJIT_INLINE_NODEBUG void setRet(size_t valueIndex, const Reg& reg) noexcept { _setRet(valueIndex, reg); }
//! Sets the first function argument in a value-pack at `argIndex` to `reg`.
ASMJIT_INLINE_NODEBUG void setArg(size_t argIndex, const Reg& reg) noexcept { _setArg(argIndex, 0, reg); }
//! Sets the first function argument in a value-pack at `argIndex` to `imm`.
ASMJIT_INLINE_NODEBUG void setArg(size_t argIndex, const Imm& imm) noexcept { _setArg(argIndex, 0, imm); }
//! Sets the function argument at `argIndex` and `valueIndex` to `reg`.
ASMJIT_INLINE_NODEBUG void setArg(size_t argIndex, size_t valueIndex, const Reg& reg) noexcept { _setArg(argIndex, valueIndex, reg); }
//! Sets the function argument at `argIndex` and `valueIndex` to `imm`.
ASMJIT_INLINE_NODEBUG void setArg(size_t argIndex, size_t valueIndex, const Imm& imm) noexcept { _setArg(argIndex, valueIndex, imm); }
//! \}
};
//! Function pass extends \ref Pass with \ref FuncPass::runOnFunction().
class ASMJIT_VIRTAPI FuncPass : public Pass {
public:
ASMJIT_NONCOPYABLE(FuncPass)
using Base = Pass;
//! \name Construction & Destruction
//! \{
ASMJIT_API FuncPass(const char* name) noexcept;
//! \}
//! \name Accessors
//! \{
//! Returns the associated `BaseCompiler`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseCompiler* cc() const noexcept { return static_cast<BaseCompiler*>(_cb); }
//! \}
//! \name Pass Interface
//! \{
//! Calls `runOnFunction()` on each `FuncNode` node found.
ASMJIT_API Error run(Zone* zone, Logger* logger) override;
//! Called once per `FuncNode`.
ASMJIT_API virtual Error runOnFunction(Zone* zone, Logger* logger, FuncNode* func);
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER
#endif // ASMJIT_CORE_COMPILER_H_INCLUDED

View File

@@ -1,221 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_COMPILERDEFS_H_INCLUDED
#define ASMJIT_CORE_COMPILERDEFS_H_INCLUDED
#include "../core/api-config.h"
#include "../core/operand.h"
#include "../core/type.h"
#include "../core/zonestring.h"
ASMJIT_BEGIN_NAMESPACE
class RAWorkReg;
//! \addtogroup asmjit_compiler
//! \{
//! Public virtual register interface, managed by \ref BaseCompiler.
//!
//! When a virtual register is created by \ref BaseCompiler a `VirtReg` is linked with the register operand id it
//! returns. This `VirtReg` can be accessed via \ref BaseCompiler::virtRegByReg() function, which returns a pointer
//! to `VirtReg`.
//!
//! In general, `VirtReg` should be only introspected as it contains important variables that are needed and managed
//! by AsmJit, however, the `VirtReg` API can also be used to influence register allocation. For example there is
//! a \ref VirtReg::setWeight() function, which could be used to increase a weight of a virtual register (thus make
//! it hard to spill, for example). In addition, there is a \ref VirtReg::setHomeIdHint() function, which can be used
//! to do an initial assignment of a physical register of a virtual register. However, AsmJit could still override
//! the physical register assigned in some special cases.
class VirtReg {
public:
ASMJIT_NONCOPYABLE(VirtReg)
//! \name Members
//! \{
//! Virtual register signature.
OperandSignature _signature {};
//! Virtual register id.
uint32_t _id = 0;
//! Virtual register size (can be smaller than `_signature._size`).
uint32_t _virtSize = 0;
//! Virtual register alignment (for spilling).
uint8_t _alignment = 0;
//! Type-id.
TypeId _typeId = TypeId::kVoid;
//! Virtual register weight for alloc/spill decisions.
uint8_t _weight = 1;
//! True if this is a fixed register, never reallocated.
uint8_t _isFixed : 1;
//! True if the virtual register is only used as a stack (never accessed as register).
uint8_t _isStack : 1;
//! True if this virtual register has assigned stack offset (can be only valid after register allocation pass).
uint8_t _hasStackSlot : 1;
uint8_t _reservedBits : 5;
//! Home register hint for the register allocator (initially unassigned).
uint8_t _homeIdHint = Reg::kIdBad;
//! Stack offset assigned by the register allocator relative to stack pointer (can be negative as well).
int32_t _stackOffset = 0;
//! Reserved for future use (padding).
uint32_t _reservedU32 = 0;
//! Virtual register name (either empty or user provided).
ZoneString<16> _name {};
// The following members are used exclusively by RAPass. They are initialized when the VirtReg is created to
// null pointers and then changed during RAPass execution. RAPass sets them back to NULL before it returns.
//! Reference to `RAWorkReg`, used during register allocation.
RAWorkReg* _workReg = nullptr;
//! \}
//! \name Construction & Destruction
//! \{
ASMJIT_INLINE_NODEBUG VirtReg(OperandSignature signature, uint32_t id, uint32_t virtSize, uint32_t alignment, TypeId typeId) noexcept
: _signature(signature),
_id(id),
_virtSize(virtSize),
_alignment(uint8_t(alignment)),
_typeId(typeId),
_isFixed(0),
_isStack(0),
_hasStackSlot(0),
_reservedBits(0) {}
//! \}
//! \name Accessors
//! \{
//! Returns the virtual register id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t id() const noexcept { return _id; }
//! Returns the virtual register name.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _name.data(); }
//! Returns the size of the virtual register name.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t nameSize() const noexcept { return _name.size(); }
//! Returns a register signature of this virtual register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG OperandSignature signature() const noexcept { return _signature; }
//! Returns a virtual register type (maps to the physical register type as well).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegType type() const noexcept { return _signature.regType(); }
//! Returns a virtual register group (maps to the physical register group as well).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegGroup group() const noexcept { return _signature.regGroup(); }
//! Returns a real size of the register this virtual register maps to.
//!
//! For example if this is a 128-bit SIMD register used for a scalar single precision floating point value then
//! its virtSize would be 4, however, the `regSize` would still say 16 (128-bits), because it's the smallest size
//! of that register type.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t regSize() const noexcept { return _signature.size(); }
//! Returns the virtual register size.
//!
//! The virtual register size describes how many bytes the virtual register needs to store its content. It can be
//! smaller than the physical register size, see `regSize()`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t virtSize() const noexcept { return _virtSize; }
//! Returns the virtual register alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t alignment() const noexcept { return _alignment; }
//! Returns the virtual register type id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG TypeId typeId() const noexcept { return _typeId; }
//! Returns the virtual register weight - the register allocator can use it as explicit hint for alloc/spill
//! decisions.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t weight() const noexcept { return _weight; }
//! Sets the virtual register weight (0 to 255) - the register allocator can use it as explicit hint for
//! alloc/spill decisions and initial bin-packing.
ASMJIT_INLINE_NODEBUG void setWeight(uint32_t weight) noexcept { _weight = uint8_t(weight); }
//! Returns whether the virtual register is always allocated to a fixed physical register (and never reallocated).
//!
//! \note This is only used for special purposes and it's mostly internal.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFixed() const noexcept { return bool(_isFixed); }
//! Tests whether the virtual register is in fact a stack that only uses the virtual register id.
//!
//! \note It's an error if a stack is accessed as a register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isStack() const noexcept { return bool(_isStack); }
//! Tests whether this virtual register (or stack) has assigned a stack offset.
//!
//! If this is a virtual register that was never allocated on stack, it would return false, otherwise if
//! it's a virtual register that was spilled or explicitly allocated stack, the return value would be true.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasStackSlot() const noexcept { return bool(_hasStackSlot); }
//! Assigns a stack offset of this virtual register to `stackOffset` and sets `_hasStackSlot` to true.
ASMJIT_INLINE_NODEBUG void assignStackSlot(int32_t stackOffset) noexcept {
_hasStackSlot = 1;
_stackOffset = stackOffset;
}
//! Tests whether this virtual register has assigned a physical register as a hint to the register allocator.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasHomeIdHint() const noexcept { return _homeIdHint != Reg::kIdBad; }
//! Returns a physical register hint, which will be used by the register allocator.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t homeIdHint() const noexcept { return _homeIdHint; }
//! Assigns a physical register hint, which will be used by the register allocator.
ASMJIT_INLINE_NODEBUG void setHomeIdHint(uint32_t homeId) noexcept { _homeIdHint = uint8_t(homeId); }
//! Resets a physical register hint.
ASMJIT_INLINE_NODEBUG void resetHomeIdHint() noexcept { _homeIdHint = Reg::kIdBad; }
//! Returns a stack offset associated with a virtual register or explicit stack allocation.
//!
//! \note Always verify that the stack offset has been assigned by calling \ref hasStackSlot(). The return
//! value will be zero when the stack offset was not assigned.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG int32_t stackOffset() const noexcept { return _stackOffset; }
//! Tests whether the virtual register has an associated `RAWorkReg` at the moment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasWorkReg() const noexcept { return _workReg != nullptr; }
//! Returns an associated RAWorkReg with this virtual register (only valid during register allocation).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAWorkReg* workReg() const noexcept { return _workReg; }
//! Associates a RAWorkReg with this virtual register (used by register allocator).
ASMJIT_INLINE_NODEBUG void setWorkReg(RAWorkReg* workReg) noexcept { _workReg = workReg; }
//! Reset the RAWorkReg association (used by register allocator).
ASMJIT_INLINE_NODEBUG void resetWorkReg() noexcept { _workReg = nullptr; }
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_COMPILERDEFS_H_INCLUDED

View File

@@ -1,369 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/constpool.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// ConstPool - Construction & Destruction
// ======================================
ConstPool::ConstPool(Zone* zone) noexcept { reset(zone); }
ConstPool::~ConstPool() noexcept {}
// ConstPool - Reset
// =================
void ConstPool::reset(Zone* zone) noexcept {
_zone = zone;
size_t dataSize = 1;
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
_tree[i].reset();
_tree[i].setDataSize(dataSize);
_gaps[i] = nullptr;
dataSize <<= 1;
}
_gapPool = nullptr;
_size = 0;
_alignment = 0;
_minItemSize = 0;
}
// ConstPool - Operations
// ======================
static inline ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept {
ConstPool::Gap* gap = self->_gapPool;
if (!gap) {
return self->_zone->alloc<ConstPool::Gap>();
}
self->_gapPool = gap->_next;
return gap;
}
static inline void ConstPool_freeGap(ConstPool* self, ConstPool::Gap* gap) noexcept {
gap->_next = self->_gapPool;
self->_gapPool = gap;
}
static void ConstPool_addGap(ConstPool* self, size_t offset, size_t size) noexcept {
ASMJIT_ASSERT(size > 0);
while (size > 0) {
size_t gapIndex;
size_t gapSize;
if (size >= 32 && Support::isAligned<size_t>(offset, 32)) {
gapIndex = ConstPool::kIndex32;
gapSize = 32;
}
else if (size >= 16 && Support::isAligned<size_t>(offset, 16)) {
gapIndex = ConstPool::kIndex16;
gapSize = 16;
}
else if (size >= 8 && Support::isAligned<size_t>(offset, 8)) {
gapIndex = ConstPool::kIndex8;
gapSize = 8;
}
else if (size >= 4 && Support::isAligned<size_t>(offset, 4)) {
gapIndex = ConstPool::kIndex4;
gapSize = 4;
}
else if (size >= 2 && Support::isAligned<size_t>(offset, 2)) {
gapIndex = ConstPool::kIndex2;
gapSize = 2;
}
else {
gapIndex = ConstPool::kIndex1;
gapSize = 1;
}
// We don't have to check for errors here, if this failed nothing really happened (just the gap won't be
// visible) and it will fail again at place where the same check would generate `kErrorOutOfMemory` error.
ConstPool::Gap* gap = ConstPool_allocGap(self);
if (!gap) {
return;
}
gap->_next = self->_gaps[gapIndex];
self->_gaps[gapIndex] = gap;
gap->_offset = offset;
gap->_size = gapSize;
offset += gapSize;
size -= gapSize;
}
}
Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept {
constexpr size_t kMaxSize = size_t(1) << (kIndexCount - 1);
// Avoid sizes outside of the supported range.
if (ASMJIT_UNLIKELY(size == 0 || size > kMaxSize)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
size_t treeIndex = Support::ctz(size);
// Avoid sizes, which are not aligned to power of 2.
if (ASMJIT_UNLIKELY((size_t(1) << treeIndex) != size)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
ConstPool::Node* node = _tree[treeIndex].get(data);
if (node) {
dstOffset = node->_offset;
return kErrorOk;
}
// Before incrementing the current offset try if there is a gap that can be used for the requested data.
size_t offset = ~size_t(0);
size_t gapIndex = treeIndex;
while (gapIndex != kIndexCount - 1) {
ConstPool::Gap* gap = _gaps[treeIndex];
// Check if there is a gap.
if (gap) {
size_t gapOffset = gap->_offset;
size_t gapSize = gap->_size;
// Destroy the gap for now.
_gaps[treeIndex] = gap->_next;
ConstPool_freeGap(this, gap);
offset = gapOffset;
ASMJIT_ASSERT(Support::isAligned<size_t>(offset, size));
gapSize -= size;
if (gapSize > 0) {
ConstPool_addGap(this, gapOffset, gapSize);
}
}
gapIndex++;
}
if (offset == ~size_t(0)) {
// Get how many bytes have to be skipped so the address is aligned accordingly to the 'size'.
size_t diff = Support::alignUpDiff<size_t>(_size, size);
if (diff != 0) {
ConstPool_addGap(this, _size, diff);
_size += diff;
}
offset = _size;
_size += size;
}
// Add the initial node to the right index.
node = ConstPool::Tree::_newNode(_zone, data, size, offset, false);
if (ASMJIT_UNLIKELY(!node)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
_tree[treeIndex].insert(node);
_alignment = Support::max<size_t>(_alignment, size);
dstOffset = offset;
// Now create a bunch of shared constants that are based on the data pattern. We stop at size 4,
// it probably doesn't make sense to split constants down to 1 byte.
size_t pCount = 1;
size_t smallerSize = size;
while (smallerSize > 4) {
pCount <<= 1;
smallerSize >>= 1;
ASMJIT_ASSERT(treeIndex != 0);
treeIndex--;
const uint8_t* pData = static_cast<const uint8_t*>(data);
for (size_t i = 0; i < pCount; i++, pData += smallerSize) {
node = _tree[treeIndex].get(pData);
if (node) {
continue;
}
node = ConstPool::Tree::_newNode(_zone, pData, smallerSize, offset + (i * smallerSize), true);
_tree[treeIndex].insert(node);
}
}
_minItemSize = !_minItemSize ? size : Support::min(_minItemSize, size);
return kErrorOk;
}
// ConstPool - Reset
// =================
struct ConstPoolFill {
inline ConstPoolFill(uint8_t* dst, size_t dataSize) noexcept :
_dst(dst),
_dataSize(dataSize) {}
inline void operator()(const ConstPool::Node* node) noexcept {
if (!node->_shared) {
memcpy(_dst + node->_offset, node->data(), _dataSize);
}
}
uint8_t* _dst;
size_t _dataSize;
};
void ConstPool::fill(void* dst) const noexcept {
// Clears possible gaps, asmjit should never emit garbage to the output.
memset(dst, 0, _size);
ConstPoolFill filler(static_cast<uint8_t*>(dst), 1);
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
_tree[i].forEach(filler);
filler._dataSize <<= 1;
}
}
// ConstPool - Tests
// =================
#if defined(ASMJIT_TEST)
UNIT(const_pool) {
Zone zone(32u * 1024u);
ConstPool pool(&zone);
uint32_t i;
uint32_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 1000000;
INFO("Adding %u constants to the pool", kCount);
{
size_t prevOffset;
size_t curOffset;
uint64_t c = 0x0101010101010101u;
EXPECT_EQ(pool.add(&c, 8, prevOffset), kErrorOk);
EXPECT_EQ(prevOffset, 0u);
for (i = 1; i < kCount; i++) {
c++;
EXPECT_EQ(pool.add(&c, 8, curOffset), kErrorOk);
EXPECT_EQ(prevOffset + 8, curOffset);
EXPECT_EQ(pool.size(), (i + 1) * 8);
prevOffset = curOffset;
}
EXPECT_EQ(pool.alignment(), 8u);
}
INFO("Retrieving %u constants from the pool", kCount);
{
uint64_t c = 0x0101010101010101u;
for (i = 0; i < kCount; i++) {
size_t offset;
EXPECT_EQ(pool.add(&c, 8, offset), kErrorOk);
EXPECT_EQ(offset, i * 8);
c++;
}
}
INFO("Checking if the constants were split into 4-byte patterns");
{
uint32_t c = 0x01010101u;
size_t offset;
EXPECT_EQ(pool.add(&c, 4, offset), kErrorOk);
EXPECT_EQ(offset, 0u);
// NOTE: We have to adjust the offset to successfully test this on big endian architectures.
size_t baseOffset = size_t(ASMJIT_ARCH_BE ? 4 : 0);
for (i = 1; i < kCount; i++) {
c++;
EXPECT_EQ(pool.add(&c, 4, offset), kErrorOk);
EXPECT_EQ(offset, baseOffset + i * 8);
}
}
INFO("Adding 2 byte constant to misalign the current offset");
{
uint16_t c = 0xFFFF;
size_t offset;
EXPECT_EQ(pool.add(&c, 2, offset), kErrorOk);
EXPECT_EQ(offset, kCount * 8);
EXPECT_EQ(pool.alignment(), 8u);
}
INFO("Adding 8 byte constant to check if pool gets aligned again");
{
uint64_t c = 0xFFFFFFFFFFFFFFFFu;
size_t offset;
EXPECT_EQ(pool.add(&c, 8, offset), kErrorOk);
EXPECT_EQ(offset, kCount * 8 + 8u);
}
INFO("Adding 2 byte constant to verify the gap is filled");
{
uint16_t c = 0xFFFE;
size_t offset;
EXPECT_EQ(pool.add(&c, 2, offset), kErrorOk);
EXPECT_EQ(offset, kCount * 8 + 2);
EXPECT_EQ(pool.alignment(), 8u);
}
INFO("Checking reset functionality");
{
pool.reset(&zone);
zone.reset();
EXPECT_EQ(pool.size(), 0u);
EXPECT_EQ(pool.alignment(), 0u);
}
INFO("Checking pool alignment when combined constants are added");
{
uint8_t bytes[32] = { 0 };
size_t offset;
pool.add(bytes, 1, offset);
EXPECT_EQ(pool.size(), 1u);
EXPECT_EQ(pool.alignment(), 1u);
EXPECT_EQ(offset, 0u);
pool.add(bytes, 2, offset);
EXPECT_EQ(pool.size(), 4u);
EXPECT_EQ(pool.alignment(), 2u);
EXPECT_EQ(offset, 2u);
pool.add(bytes, 4, offset);
EXPECT_EQ(pool.size(), 8u);
EXPECT_EQ(pool.alignment(), 4u);
EXPECT_EQ(offset, 4u);
pool.add(bytes, 4, offset);
EXPECT_EQ(pool.size(), 8u);
EXPECT_EQ(pool.alignment(), 4u);
EXPECT_EQ(offset, 4u);
pool.add(bytes, 32, offset);
EXPECT_EQ(pool.size(), 64u);
EXPECT_EQ(pool.alignment(), 32u);
EXPECT_EQ(offset, 32u);
}
}
#endif
ASMJIT_END_NAMESPACE

View File

@@ -1,281 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_CONSTPOOL_H_INCLUDED
#define ASMJIT_CORE_CONSTPOOL_H_INCLUDED
#include "../core/support.h"
#include "../core/zone.h"
#include "../core/zonetree.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_utilities
//! \{
//! Constant pool scope.
enum class ConstPoolScope : uint32_t {
//! Local constant, always embedded right after the current function.
kLocal = 0,
//! Global constant, embedded at the end of the currently compiled code.
kGlobal = 1,
//! Maximum value of `ConstPoolScope`.
kMaxValue = kGlobal
};
//! Constant pool.
//!
//! Constant pool is designed to hold 1, 2, 4, 8, 16, 32, and 64 byte constants. It's not designed to hold constants
//! having arbitrary length like strings and arrays.
class ConstPool {
public:
ASMJIT_NONCOPYABLE(ConstPool)
//! \cond INTERNAL
//! Index of a given size in const-pool table.
enum Index : uint32_t {
kIndex1 = 0,
kIndex2 = 1,
kIndex4 = 2,
kIndex8 = 3,
kIndex16 = 4,
kIndex32 = 5,
kIndex64 = 6,
kIndexCount = 7
};
//! Zone-allocated const-pool gap created by two differently aligned constants.
struct Gap {
//! Pointer to the next gap
Gap* _next;
//! Offset of the gap.
size_t _offset;
//! Remaining bytes of the gap (basically a gap size).
size_t _size;
};
//! Zone-allocated const-pool node.
class Node : public ZoneTreeNodeT<Node> {
public:
ASMJIT_NONCOPYABLE(Node)
//! If this constant is shared with another.
uint32_t _shared : 1;
//! Data offset from the beginning of the pool.
uint32_t _offset;
ASMJIT_INLINE_NODEBUG Node(size_t offset, bool shared) noexcept
: ZoneTreeNodeT<Node>(),
_shared(shared),
_offset(uint32_t(offset)) {}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG void* data() noexcept { return Support::offsetPtr<void>(this, sizeof(*this)); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const void* data() const noexcept { return Support::offsetPtr<void>(this, sizeof(*this)); }
};
//! Data comparer used internally.
class Compare {
public:
size_t _dataSize;
ASMJIT_INLINE_NODEBUG Compare(size_t dataSize) noexcept
: _dataSize(dataSize) {}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG int operator()(const Node& a, const Node& b) const noexcept {
return ::memcmp(a.data(), b.data(), _dataSize);
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG int operator()(const Node& a, const void* data) const noexcept {
return ::memcmp(a.data(), data, _dataSize);
}
};
//! Zone-allocated const-pool tree.
struct Tree {
//! RB tree.
ZoneTree<Node> _tree;
//! Size of the tree (number of nodes).
size_t _size;
//! Size of the data.
size_t _dataSize;
ASMJIT_INLINE_NODEBUG explicit Tree(size_t dataSize = 0) noexcept
: _tree(),
_size(0),
_dataSize(dataSize) {}
ASMJIT_INLINE_NODEBUG void reset() noexcept {
_tree.reset();
_size = 0;
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
inline void setDataSize(size_t dataSize) noexcept {
ASMJIT_ASSERT(empty());
_dataSize = dataSize;
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Node* get(const void* data) noexcept {
Compare cmp(_dataSize);
return _tree.get(data, cmp);
}
ASMJIT_INLINE_NODEBUG void insert(Node* node) noexcept {
Compare cmp(_dataSize);
_tree.insert(node, cmp);
_size++;
}
template<typename Visitor>
inline void forEach(Visitor& visitor) const noexcept {
Node* node = _tree.root();
if (!node) return;
Node* stack[Globals::kMaxTreeHeight];
size_t top = 0;
for (;;) {
Node* left = node->left();
if (left != nullptr) {
ASMJIT_ASSERT(top != Globals::kMaxTreeHeight);
stack[top++] = node;
node = left;
continue;
}
for (;;) {
visitor(node);
node = node->right();
if (node != nullptr)
break;
if (top == 0)
return;
node = stack[--top];
}
}
}
[[nodiscard]]
static inline Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) noexcept {
size_t nodeSize = Support::alignUp(sizeof(Node) + size, Globals::kZoneAlignment);
Node* node = zone->alloc<Node>(nodeSize);
if (ASMJIT_UNLIKELY(!node)) {
return nullptr;
}
node = new(Support::PlacementNew{node}) Node(offset, shared);
memcpy(node->data(), data, size);
return node;
}
};
//! \endcond
//! \name Members
//! \{
//! Zone allocator.
Zone* _zone;
//! Tree per size.
Tree _tree[kIndexCount];
//! Gaps per size.
Gap* _gaps[kIndexCount];
//! Gaps pool
Gap* _gapPool;
//! Size of the pool (in bytes).
size_t _size;
//! Required pool alignment.
size_t _alignment;
//! Minimum item size in the pool.
size_t _minItemSize;
//! \}
//! \name Construction & Destruction
//! \{
//! Creates a new constant pool that would use `zone` as a memory allocator.
ASMJIT_API explicit ConstPool(Zone* zone) noexcept;
//! Destroys this constant pool.
ASMJIT_API ~ConstPool() noexcept;
//! \}
//! \name Reset
//! \{
//! Resets this constant pool and its allocator to `zone`.
ASMJIT_API void reset(Zone* zone) noexcept;
//! \}
//! \name Accessors
//! \{
//! Tests whether the constant-pool is empty.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; }
//! Returns the size of the constant-pool in bytes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
//! Returns minimum alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t alignment() const noexcept { return _alignment; }
//! Returns the minimum size of all items added to the constant pool.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t minItemSize() const noexcept { return _minItemSize; }
//! \}
//! \name Utilities
//! \{
//! Adds a constant to the constant pool.
//!
//! The constant must have known size, which is 1, 2, 4, 8, 16 or 32 bytes. The constant is added to the pool only
//! if it doesn't not exist, otherwise cached value is returned.
//!
//! AsmJit is able to subdivide added constants, so for example if you add 8-byte constant 0x1122334455667788 it
//! will create the following slots:
//!
//! 8-byte: 0x1122334455667788
//! 4-byte: 0x11223344, 0x55667788
//!
//! The reason is that when combining MMX/SSE/AVX code some patterns are used frequently. However, AsmJit is not
//! able to reallocate a constant that has been already added. For example if you try to add 4-byte constant and
//! then 8-byte constant having the same 4-byte pattern as the previous one, two independent slots will be used.
ASMJIT_API Error add(const void* data, size_t size, size_t& dstOffset) noexcept;
//! Fills the destination with the content of this constant pool.
ASMJIT_API void fill(void* dst) const noexcept;
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_CONSTPOOL_H_INCLUDED

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,365 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/archtraits.h"
#include "../core/emithelper_p.h"
#include "../core/formatter.h"
#include "../core/funcargscontext_p.h"
#include "../core/radefs_p.h"
// Can be used for debugging...
// #define ASMJIT_DUMP_ARGS_ASSIGNMENT
ASMJIT_BEGIN_NAMESPACE
// BaseEmitHelper - Formatting
// ===========================
#ifdef ASMJIT_DUMP_ARGS_ASSIGNMENT
static void dumpFuncValue(String& sb, Arch arch, const FuncValue& value) noexcept {
Formatter::formatTypeId(sb, value.typeId());
sb.append('@');
if (value.isIndirect()) {
sb.append('[');
}
if (value.isReg()) {
Formatter::formatRegister(sb, 0, nullptr, arch, value.regType(), value.regId());
}
else if (value.isStack()) {
sb.appendFormat("[%d]", value.stackOffset());
}
else {
sb.append("<none>");
}
if (value.isIndirect()) {
sb.append(']');
}
}
static void dumpAssignment(String& sb, const FuncArgsContext& ctx) noexcept {
using Var = FuncArgsContext::Var;
Arch arch = ctx.arch();
uint32_t varCount = ctx.varCount();
for (uint32_t i = 0; i < varCount; i++) {
const Var& var = ctx.var(i);
const FuncValue& dst = var.out;
const FuncValue& cur = var.cur;
sb.appendFormat("Var%u: ", i);
dumpFuncValue(sb, arch, dst);
sb.append(" <- ");
dumpFuncValue(sb, arch, cur);
if (var.isDone()) {
sb.append(" {Done}");
}
sb.append('\n');
}
}
#endif
// BaseEmitHelper - Abstract
// =========================
Error BaseEmitHelper::emitRegMove(const Operand_& dst_, const Operand_& src_, TypeId typeId, const char* comment) {
DebugUtils::unused(dst_, src_, typeId, comment);
return DebugUtils::errored(kErrorInvalidState);
}
Error BaseEmitHelper::emitRegSwap(const Reg& a, const Reg& b, const char* comment) {
DebugUtils::unused(a, b, comment);
return DebugUtils::errored(kErrorInvalidState);
}
Error BaseEmitHelper::emitArgMove(const Reg& dst_, TypeId dstTypeId, const Operand_& src_, TypeId srcTypeId, const char* comment) {
DebugUtils::unused(dst_, dstTypeId, src_, srcTypeId, comment);
return DebugUtils::errored(kErrorInvalidState);
}
// BaseEmitHelper - EmitArgsAssignment
// ===================================
ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) {
using Var = FuncArgsContext::Var;
using WorkData = FuncArgsContext::WorkData;
enum WorkFlags : uint32_t {
kWorkNone = 0x00,
kWorkDidSome = 0x01,
kWorkPending = 0x02,
kWorkPostponed = 0x04
};
Arch arch = frame.arch();
const ArchTraits& archTraits = ArchTraits::byArch(arch);
RAConstraints constraints;
FuncArgsContext ctx;
ASMJIT_PROPAGATE(constraints.init(arch));
ASMJIT_PROPAGATE(ctx.initWorkData(frame, args, &constraints));
#ifdef ASMJIT_DUMP_ARGS_ASSIGNMENT
{
String sb;
dumpAssignment(sb, ctx);
printf("%s\n", sb.data());
}
#endif
auto& workData = ctx._workData;
uint32_t varCount = ctx._varCount;
uint32_t saVarId = ctx._saVarId;
Reg sp = Reg(_emitter->_gpSignature, archTraits.spRegId());
Reg sa = sp;
if (frame.hasDynamicAlignment()) {
if (frame.hasPreservedFP()) {
sa.setId(archTraits.fpRegId());
}
else {
sa.setId(saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId());
}
}
// Register to stack and stack to stack moves must be first as now we have
// the biggest chance of having as many as possible unassigned registers.
if (ctx._stackDstMask) {
// Base address of all arguments passed by stack.
BaseMem baseArgPtr(sa, int32_t(frame.saOffset(sa.id())));
BaseMem baseStackPtr(sp, 0);
for (uint32_t varId = 0; varId < varCount; varId++) {
Var& var = ctx._vars[varId];
if (!var.out.isStack()) {
continue;
}
FuncValue& cur = var.cur;
FuncValue& out = var.out;
ASMJIT_ASSERT(cur.isReg() || cur.isStack());
Reg reg;
BaseMem dstStackPtr = baseStackPtr.cloneAdjusted(out.stackOffset());
BaseMem srcStackPtr = baseArgPtr.cloneAdjusted(cur.stackOffset());
if (cur.isIndirect()) {
if (cur.isStack()) {
// TODO: Indirect stack.
return DebugUtils::errored(kErrorInvalidAssignment);
}
else {
srcStackPtr.setBaseId(cur.regId());
}
}
if (cur.isReg() && !cur.isIndirect()) {
WorkData& wd = workData[RegUtils::groupOf(cur.regType())];
uint32_t regId = cur.regId();
reg.setSignatureAndId(RegUtils::signatureOf(cur.regType()), regId);
wd.unassign(varId, regId);
}
else {
// Stack to reg move - tricky since we move stack to stack we can decide which register to use. In general
// we follow the rule that IntToInt moves will use GP regs with possibility to signature or zero extend,
// and all other moves will either use GP or VEC regs depending on the size of the move.
OperandSignature signature = getSuitableRegForMemToMemMove(arch, out.typeId(), cur.typeId());
if (ASMJIT_UNLIKELY(!signature.isValid())) {
return DebugUtils::errored(kErrorInvalidState);
}
WorkData& wd = workData[signature.regGroup()];
RegMask availableRegs = wd.availableRegs();
if (ASMJIT_UNLIKELY(!availableRegs)) {
return DebugUtils::errored(kErrorInvalidState);
}
uint32_t availableId = Support::ctz(availableRegs);
reg.setSignatureAndId(signature, availableId);
ASMJIT_PROPAGATE(emitArgMove(reg, out.typeId(), srcStackPtr, cur.typeId()));
}
if (cur.isIndirect() && cur.isReg()) {
workData[RegGroup::kGp].unassign(varId, cur.regId());
}
// Register to stack move.
ASMJIT_PROPAGATE(emitRegMove(dstStackPtr, reg, cur.typeId()));
var.markDone();
}
}
// Shuffle all registers that are currently assigned accordingly to target assignment.
uint32_t workFlags = kWorkNone;
for (;;) {
for (uint32_t varId = 0; varId < varCount; varId++) {
Var& var = ctx._vars[varId];
if (var.isDone() || !var.cur.isReg()) {
continue;
}
FuncValue& cur = var.cur;
FuncValue& out = var.out;
RegGroup curGroup = RegUtils::groupOf(cur.regType());
RegGroup outGroup = RegUtils::groupOf(out.regType());
uint32_t curId = cur.regId();
uint32_t outId = out.regId();
if (curGroup != outGroup) {
// TODO: Conversion is not supported.
return DebugUtils::errored(kErrorInvalidAssignment);
}
else {
WorkData& wd = workData[outGroup];
if (!wd.isAssigned(outId) || curId == outId) {
EmitMove:
ASMJIT_PROPAGATE(
emitArgMove(
Reg(RegUtils::signatureOf(out.regType()), outId), out.typeId(),
Reg(RegUtils::signatureOf(cur.regType()), curId), cur.typeId()));
// Only reassign if this is not a sign/zero extension that happens on the same in/out register.
if (curId != outId) {
wd.reassign(varId, outId, curId);
}
cur.initReg(out.regType(), outId, out.typeId());
if (outId == out.regId()) {
var.markDone();
}
workFlags |= kWorkDidSome | kWorkPending;
}
else {
uint32_t altId = wd._physToVarId[outId];
Var& altVar = ctx._vars[altId];
if (!altVar.out.isInitialized() || (altVar.out.isReg() && altVar.out.regId() == curId)) {
// Only few architectures provide swap operations, and only for few register groups.
if (archTraits.hasInstRegSwap(curGroup)) {
RegType highestType = Support::max(cur.regType(), altVar.cur.regType());
if (Support::isBetween(highestType, RegType::kGp8Lo, RegType::kGp16)) {
highestType = RegType::kGp32;
}
OperandSignature signature = RegUtils::signatureOf(highestType);
ASMJIT_PROPAGATE(emitRegSwap(Reg(signature, outId), Reg(signature, curId)));
wd.swap(varId, curId, altId, outId);
cur.setRegId(outId);
var.markDone();
altVar.cur.setRegId(curId);
if (altVar.out.isInitialized()) {
altVar.markDone();
}
workFlags |= kWorkDidSome;
}
else {
// If there is a scratch register it can be used to perform the swap.
RegMask availableRegs = wd.availableRegs();
if (availableRegs) {
RegMask inOutRegs = wd.dstRegs();
if (availableRegs & ~inOutRegs) {
availableRegs &= ~inOutRegs;
}
outId = Support::ctz(availableRegs);
goto EmitMove;
}
else {
workFlags |= kWorkPending;
}
}
}
else {
workFlags |= kWorkPending;
}
}
}
}
if (!(workFlags & kWorkPending)) {
break;
}
// If we did nothing twice it means that something is really broken.
if ((workFlags & (kWorkDidSome | kWorkPostponed)) == kWorkPostponed) {
return DebugUtils::errored(kErrorInvalidState);
}
workFlags = (workFlags & kWorkDidSome) ? kWorkNone : kWorkPostponed;
}
// Load arguments passed by stack into registers. This is pretty simple and
// it never requires multiple iterations like the previous phase.
if (ctx._hasStackSrc) {
uint32_t iterCount = 1;
if (frame.hasDynamicAlignment() && !frame.hasPreservedFP()) {
sa.setId(saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId());
}
// Base address of all arguments passed by stack.
BaseMem baseArgPtr(sa, int32_t(frame.saOffset(sa.id())));
for (uint32_t iter = 0; iter < iterCount; iter++) {
for (uint32_t varId = 0; varId < varCount; varId++) {
Var& var = ctx._vars[varId];
if (var.isDone()) {
continue;
}
if (var.cur.isStack()) {
ASMJIT_ASSERT(var.out.isReg());
uint32_t outId = var.out.regId();
RegType outType = var.out.regType();
RegGroup group = RegUtils::groupOf(outType);
WorkData& wd = workData[group];
if (outId == sa.id() && group == RegGroup::kGp) {
// This register will be processed last as we still need `saRegId`.
if (iterCount == 1) {
iterCount++;
continue;
}
wd.unassign(wd._physToVarId[outId], outId);
}
Reg dstReg = Reg(RegUtils::signatureOf(outType), outId);
BaseMem srcMem = baseArgPtr.cloneAdjusted(var.cur.stackOffset());
ASMJIT_PROPAGATE(emitArgMove(
dstReg, var.out.typeId(),
srcMem, var.cur.typeId()));
wd.assign(varId, outId);
var.cur.initReg(outType, outId, var.cur.typeId(), FuncValue::kFlagIsDone);
}
}
}
}
return kErrorOk;
}
ASMJIT_END_NAMESPACE

View File

@@ -1,62 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_EMITHELPER_P_H_INCLUDED
#define ASMJIT_CORE_EMITHELPER_P_H_INCLUDED
#include "../core/emitter.h"
#include "../core/operand.h"
#include "../core/type.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_core
//! \{
//! Helper class that provides utilities for each supported architecture.
class BaseEmitHelper {
public:
BaseEmitter* _emitter;
ASMJIT_INLINE_NODEBUG explicit BaseEmitHelper(BaseEmitter* emitter = nullptr) noexcept
: _emitter(emitter) {}
ASMJIT_INLINE_NODEBUG virtual ~BaseEmitHelper() noexcept = default;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseEmitter* emitter() const noexcept { return _emitter; }
ASMJIT_INLINE_NODEBUG void setEmitter(BaseEmitter* emitter) noexcept { _emitter = emitter; }
//! Emits a pure move operation between two registers or the same type or between a register and its home
//! slot. This function does not handle register conversion.
virtual Error emitRegMove(
const Operand_& dst_,
const Operand_& src_, TypeId typeId, const char* comment = nullptr);
//! Emits swap between two registers.
virtual Error emitRegSwap(
const Reg& a,
const Reg& b, const char* comment = nullptr);
//! Emits move from a function argument (either register or stack) to a register.
//!
//! This function can handle the necessary conversion from one argument to another, and from one register type
//! to another, if it's possible. Any attempt of conversion that requires third register of a different group
//! (for example conversion from K to MMX on X86/X64) will fail.
virtual Error emitArgMove(
const Reg& dst_, TypeId dstTypeId,
const Operand_& src_, TypeId srcTypeId, const char* comment = nullptr);
Error emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args);
};
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_EMITHELPER_P_H_INCLUDED

View File

@@ -1,443 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/emitterutils_p.h"
#include "../core/errorhandler.h"
#include "../core/logger.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// BaseEmitter - Construction & Destruction
// ========================================
BaseEmitter::BaseEmitter(EmitterType emitterType) noexcept
: _emitterType(emitterType) {}
BaseEmitter::~BaseEmitter() noexcept {
if (_code) {
_addEmitterFlags(EmitterFlags::kDestroyed);
_code->detach(this);
}
}
// BaseEmitter - Finalize
// ======================
Error BaseEmitter::finalize() {
// Does nothing by default, overridden by `BaseBuilder` and `BaseCompiler`.
return kErrorOk;
}
// BaseEmitter - Internals
// =======================
static constexpr EmitterFlags kEmitterPreservedFlags = EmitterFlags::kOwnLogger | EmitterFlags::kOwnErrorHandler;
static ASMJIT_NOINLINE void BaseEmitter_updateForcedOptions(BaseEmitter* self) noexcept {
bool emitComments = false;
bool hasDiagnosticOptions = false;
if (self->emitterType() == EmitterType::kAssembler) {
// Assembler: Don't emit comments if logger is not attached.
emitComments = self->_code != nullptr && self->_logger != nullptr;
hasDiagnosticOptions = self->hasDiagnosticOption(DiagnosticOptions::kValidateAssembler);
}
else {
// Builder/Compiler: Always emit comments, we cannot assume they won't be used.
emitComments = self->_code != nullptr;
hasDiagnosticOptions = self->hasDiagnosticOption(DiagnosticOptions::kValidateIntermediate);
}
if (emitComments) {
self->_addEmitterFlags(EmitterFlags::kLogComments);
}
else {
self->_clearEmitterFlags(EmitterFlags::kLogComments);
}
// The reserved option tells emitter (Assembler/Builder/Compiler) that there may be either a border
// case (CodeHolder not attached, for example) or that logging or validation is required.
if (self->_code == nullptr || self->_logger || hasDiagnosticOptions) {
self->_forcedInstOptions |= InstOptions::kReserved;
}
else {
self->_forcedInstOptions &= ~InstOptions::kReserved;
}
}
// BaseEmitter - Diagnostic Options
// ================================
void BaseEmitter::addDiagnosticOptions(DiagnosticOptions options) noexcept {
_diagnosticOptions |= options;
BaseEmitter_updateForcedOptions(this);
}
void BaseEmitter::clearDiagnosticOptions(DiagnosticOptions options) noexcept {
_diagnosticOptions &= ~options;
BaseEmitter_updateForcedOptions(this);
}
// BaseEmitter - Logging
// =====================
void BaseEmitter::setLogger(Logger* logger) noexcept {
#ifndef ASMJIT_NO_LOGGING
if (logger) {
_logger = logger;
_addEmitterFlags(EmitterFlags::kOwnLogger);
}
else {
_logger = nullptr;
_clearEmitterFlags(EmitterFlags::kOwnLogger);
if (_code) {
_logger = _code->logger();
}
}
BaseEmitter_updateForcedOptions(this);
#else
DebugUtils::unused(logger);
#endif
}
// BaseEmitter - Error Handling
// ============================
void BaseEmitter::setErrorHandler(ErrorHandler* errorHandler) noexcept {
if (errorHandler) {
_errorHandler = errorHandler;
_addEmitterFlags(EmitterFlags::kOwnErrorHandler);
}
else {
_errorHandler = nullptr;
_clearEmitterFlags(EmitterFlags::kOwnErrorHandler);
if (_code) {
_errorHandler = _code->errorHandler();
}
}
}
Error BaseEmitter::_reportError(Error err, const char* message) {
ASMJIT_ASSERT(err != kErrorOk);
ErrorHandler* eh = _errorHandler;
if (eh) {
if (!message) {
message = DebugUtils::errorAsString(err);
}
eh->handleError(err, message, this);
}
return err;
}
// BaseEmitter - Sections
// ======================
// [[pure virtual]]
Error BaseEmitter::section(Section* section) {
DebugUtils::unused(section);
return DebugUtils::errored(kErrorInvalidState);
}
// BaseEmitter - Labels
// ====================
// [[pure virtual]]
Label BaseEmitter::newLabel() {
return Label(Globals::kInvalidId);
}
// [[pure virtual]]
Label BaseEmitter::newNamedLabel(const char* name, size_t nameSize, LabelType type, uint32_t parentId) {
DebugUtils::unused(name, nameSize, type, parentId);
return Label(Globals::kInvalidId);
}
Label BaseEmitter::labelByName(const char* name, size_t nameSize, uint32_t parentId) noexcept {
return Label(_code ? _code->labelIdByName(name, nameSize, parentId) : Globals::kInvalidId);
}
// [[pure virtual]]
Error BaseEmitter::bind(const Label& label) {
DebugUtils::unused(label);
return DebugUtils::errored(kErrorInvalidState);
}
bool BaseEmitter::isLabelValid(uint32_t labelId) const noexcept {
return _code && labelId < _code->labelCount();
}
// BaseEmitter - Emit (Low-Level)
// ==============================
using EmitterUtils::noExt;
Error BaseEmitter::_emitI(InstId instId) {
return _emit(instId, noExt[0], noExt[1], noExt[2], noExt);
}
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0) {
return _emit(instId, o0, noExt[1], noExt[2], noExt);
}
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0, const Operand_& o1) {
return _emit(instId, o0, o1, noExt[2], noExt);
}
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2) {
return _emit(instId, o0, o1, o2, noExt);
}
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
Operand_ opExt[3] = { o3 };
return _emit(instId, o0, o1, o2, opExt);
}
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4) {
Operand_ opExt[3] = { o3, o4 };
return _emit(instId, o0, o1, o2, opExt);
}
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) {
Operand_ opExt[3] = { o3, o4, o5 };
return _emit(instId, o0, o1, o2, opExt);
}
// [[pure virtual]]
Error BaseEmitter::_emit(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* oExt) {
DebugUtils::unused(instId, o0, o1, o2, oExt);
return DebugUtils::errored(kErrorInvalidState);
}
Error BaseEmitter::_emitOpArray(InstId instId, const Operand_* operands, size_t opCount) {
const Operand_* op = operands;
Operand_ opExt[3];
switch (opCount) {
case 0:
return _emit(instId, noExt[0], noExt[1], noExt[2], noExt);
case 1:
return _emit(instId, op[0], noExt[1], noExt[2], noExt);
case 2:
return _emit(instId, op[0], op[1], noExt[2], noExt);
case 3:
return _emit(instId, op[0], op[1], op[2], noExt);
case 4:
opExt[0] = op[3];
opExt[1].reset();
opExt[2].reset();
return _emit(instId, op[0], op[1], op[2], opExt);
case 5:
opExt[0] = op[3];
opExt[1] = op[4];
opExt[2].reset();
return _emit(instId, op[0], op[1], op[2], opExt);
case 6:
return _emit(instId, op[0], op[1], op[2], op + 3);
default:
return DebugUtils::errored(kErrorInvalidArgument);
}
}
// BaseEmitter - Emit Utilities
// ============================
Error BaseEmitter::emitProlog(const FuncFrame& frame) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
return _funcs.emitProlog(this, frame);
}
Error BaseEmitter::emitEpilog(const FuncFrame& frame) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
return _funcs.emitEpilog(this, frame);
}
Error BaseEmitter::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
return _funcs.emitArgsAssignment(this, frame, args);
}
// BaseEmitter - Align
// ===================
// [[pure virtual]]
Error BaseEmitter::align(AlignMode alignMode, uint32_t alignment) {
DebugUtils::unused(alignMode, alignment);
return DebugUtils::errored(kErrorInvalidState);
}
// BaseEmitter - Embed
// ===================
// [[pure virtual]]
Error BaseEmitter::embed(const void* data, size_t dataSize) {
DebugUtils::unused(data, dataSize);
return DebugUtils::errored(kErrorInvalidState);
}
// [[pure virtual]]
Error BaseEmitter::embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t repeatCount) {
DebugUtils::unused(typeId, data, itemCount, repeatCount);
return DebugUtils::errored(kErrorInvalidState);
}
// [[pure virtual]]
Error BaseEmitter::embedConstPool(const Label& label, const ConstPool& pool) {
DebugUtils::unused(label, pool);
return DebugUtils::errored(kErrorInvalidState);
}
// [[pure virtual]]
Error BaseEmitter::embedLabel(const Label& label, size_t dataSize) {
DebugUtils::unused(label, dataSize);
return DebugUtils::errored(kErrorInvalidState);
}
// [[pure virtual]]
Error BaseEmitter::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) {
DebugUtils::unused(label, base, dataSize);
return DebugUtils::errored(kErrorInvalidState);
}
// BaseEmitter - Comment
// =====================
// [[pure virtual]]
Error BaseEmitter::comment(const char* data, size_t size) {
DebugUtils::unused(data, size);
return DebugUtils::errored(kErrorInvalidState);
}
Error BaseEmitter::commentf(const char* fmt, ...) {
if (!hasEmitterFlag(EmitterFlags::kLogComments)) {
if (!hasEmitterFlag(EmitterFlags::kAttached)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
return kErrorOk;
}
#ifndef ASMJIT_NO_LOGGING
StringTmp<1024> sb;
va_list ap;
va_start(ap, fmt);
Error err = sb.appendVFormat(fmt, ap);
va_end(ap);
ASMJIT_PROPAGATE(err);
return comment(sb.data(), sb.size());
#else
DebugUtils::unused(fmt);
return kErrorOk;
#endif
}
Error BaseEmitter::commentv(const char* fmt, va_list ap) {
if (!hasEmitterFlag(EmitterFlags::kLogComments)) {
if (!hasEmitterFlag(EmitterFlags::kAttached)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
}
return kErrorOk;
}
#ifndef ASMJIT_NO_LOGGING
StringTmp<1024> sb;
Error err = sb.appendVFormat(fmt, ap);
ASMJIT_PROPAGATE(err);
return comment(sb.data(), sb.size());
#else
DebugUtils::unused(fmt, ap);
return kErrorOk;
#endif
}
// BaseEmitter - Events
// ====================
Error BaseEmitter::onAttach(CodeHolder& code) noexcept {
_code = &code;
_environment = code.environment();
_addEmitterFlags(EmitterFlags::kAttached);
_gpSignature.setBits(
Environment::is32Bit(code.arch())
? RegTraits<RegType::kGp32>::kSignature
: RegTraits<RegType::kGp64>::kSignature
);
onSettingsUpdated();
return kErrorOk;
}
Error BaseEmitter::onDetach(CodeHolder& code) noexcept {
DebugUtils::unused(code);
if (!hasOwnLogger()) {
_logger = nullptr;
}
if (!hasOwnErrorHandler()) {
_errorHandler = nullptr;
}
_clearEmitterFlags(~kEmitterPreservedFlags);
_instructionAlignment = uint8_t(0);
_forcedInstOptions = InstOptions::kReserved;
_privateData = 0;
_environment.reset();
_gpSignature.reset();
_instOptions = InstOptions::kNone;
_extraReg.reset();
_inlineComment = nullptr;
return kErrorOk;
}
Error BaseEmitter::onReinit(CodeHolder& code) noexcept {
ASMJIT_ASSERT(_code == &code);
DebugUtils::unused(code);
_instOptions = InstOptions::kNone;
_extraReg.reset();
_inlineComment = nullptr;
return kErrorOk;
}
void BaseEmitter::onSettingsUpdated() noexcept {
// Only called when attached to CodeHolder by CodeHolder.
ASMJIT_ASSERT(_code != nullptr);
if (!hasOwnLogger()) {
_logger = _code->logger();
}
if (!hasOwnErrorHandler()) {
_errorHandler = _code->errorHandler();
}
BaseEmitter_updateForcedOptions(this);
}
ASMJIT_END_NAMESPACE

View File

@@ -1,909 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_EMITTER_H_INCLUDED
#define ASMJIT_CORE_EMITTER_H_INCLUDED
#include "../core/archtraits.h"
#include "../core/codeholder.h"
#include "../core/formatter.h"
#include "../core/inst.h"
#include "../core/operand.h"
#include "../core/type.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
class ConstPool;
class FuncFrame;
class FuncArgsAssignment;
//! Align mode, used by \ref BaseEmitter::align().
enum class AlignMode : uint8_t {
//! Align executable code.
kCode = 0,
//! Align non-executable code.
kData = 1,
//! Align by a sequence of zeros.
kZero = 2,
//! Maximum value of `AlignMode`.
kMaxValue = kZero
};
//! Emitter type used by \ref BaseEmitter.
enum class EmitterType : uint8_t {
//! Unknown or uninitialized.
kNone = 0,
//! Emitter inherits from \ref BaseAssembler.
kAssembler = 1,
//! Emitter inherits from \ref BaseBuilder.
kBuilder = 2,
//! Emitter inherits from \ref BaseCompiler.
kCompiler = 3,
//! Maximum value of `EmitterType`.
kMaxValue = kCompiler
};
//! Emitter flags, used by \ref BaseEmitter.
enum class EmitterFlags : uint8_t {
//! No flags.
kNone = 0u,
//! Emitter is attached to CodeHolder.
kAttached = 0x01u,
//! The emitter must emit comments.
kLogComments = 0x08u,
//! The emitter has its own \ref Logger (not propagated from \ref CodeHolder).
kOwnLogger = 0x10u,
//! The emitter has its own \ref ErrorHandler (not propagated from \ref CodeHolder).
kOwnErrorHandler = 0x20u,
//! The emitter was finalized.
kFinalized = 0x40u,
//! The emitter was destroyed.
//!
//! This flag is used for a very short time when an emitter is being destroyed by
//! CodeHolder.
kDestroyed = 0x80u
};
ASMJIT_DEFINE_ENUM_FLAGS(EmitterFlags)
//! Encoding options.
enum class EncodingOptions : uint32_t {
//! No encoding options.
kNone = 0,
//! Emit instructions that are optimized for size, if possible.
//!
//! Default: false.
//!
//! X86 Specific
//! ------------
//!
//! When this option is set it the assembler will try to fix instructions if possible into operation equivalent
//! instructions that take less bytes by taking advantage of implicit zero extension. For example instruction
//! like `mov r64, imm` and `and r64, imm` can be translated to `mov r32, imm` and `and r32, imm` when the
//! immediate constant is lesser than `2^31`.
kOptimizeForSize = 0x00000001u,
//! Emit optimized code-alignment sequences.
//!
//! Default: false.
//!
//! X86 Specific
//! ------------
//!
//! Default align sequence used by X86 architecture is one-byte (0x90) opcode that is often shown by disassemblers
//! as NOP. However there are more optimized align sequences for 2-11 bytes that may execute faster on certain CPUs.
//! If this feature is enabled AsmJit will generate specialized sequences for alignment between 2 to 11 bytes.
kOptimizedAlign = 0x00000002u,
//! Emit jump-prediction hints.
//!
//! Default: false.
//!
//! X86 Specific
//! ------------
//!
//! Jump prediction is usually based on the direction of the jump. If the jump is backward it is usually predicted as
//! taken; and if the jump is forward it is usually predicted as not-taken. The reason is that loops generally use
//! backward jumps and conditions usually use forward jumps. However this behavior can be overridden by using
//! instruction prefixes. If this option is enabled these hints will be emitted.
//!
//! This feature is disabled by default, because the only processor that used to take into consideration prediction
//! hints was P4. Newer processors implement heuristics for branch prediction and ignore static hints. This means
//! that this feature can be only used for annotation purposes.
kPredictedJumps = 0x00000010u
};
ASMJIT_DEFINE_ENUM_FLAGS(EncodingOptions)
//! Diagnostic options are used to tell emitters and their passes to perform diagnostics when emitting or processing
//! user code. These options control validation and extra diagnostics that can be performed by higher level emitters.
//!
//! Instruction Validation
//! ----------------------
//!
//! \ref BaseAssembler implementation perform by default only basic checks that are necessary to identify all
//! variations of an instruction so the correct encoding can be selected. This is fine for production-ready code
//! as the assembler doesn't have to perform checks that would slow it down. However, sometimes these checks are
//! beneficial especially when the project that uses AsmJit is in a development phase, in which mistakes happen
//! often. To make the experience of using AsmJit seamless it offers validation features that can be controlled
//! by \ref DiagnosticOptions.
//!
//! Compiler Diagnostics
//! --------------------
//!
//! Diagnostic options work with \ref BaseCompiler passes (precisely with its register allocation pass). These options
//! can be used to enable logging of all operations that the Compiler does.
enum class DiagnosticOptions : uint32_t {
//! No validation options.
kNone = 0,
//! Perform strict validation in \ref BaseAssembler::emit() implementations.
//!
//! This flag ensures that each instruction is checked before it's encoded into a binary representation. This flag
//! is only relevant for \ref BaseAssembler implementations, but can be set in any other emitter type, in that case
//! if that emitter needs to create an assembler on its own, for the purpose of \ref BaseEmitter::finalize() it
//! would propagate this flag to such assembler so all instructions passed to it are explicitly validated.
//!
//! Default: false.
kValidateAssembler = 0x00000001u,
//! Perform strict validation in \ref BaseBuilder::emit() and \ref BaseCompiler::emit() implementations.
//!
//! This flag ensures that each instruction is checked before an \ref InstNode representing the instruction is
//! created by \ref BaseBuilder or \ref BaseCompiler. This option could be more useful than \ref kValidateAssembler
//! in cases in which there is an invalid instruction passed to an assembler, which was invalid much earlier, most
//! likely when such instruction was passed to Builder/Compiler.
//!
//! This is a separate option that was introduced, because it's possible to manipulate the instruction stream
//! emitted by \ref BaseBuilder and \ref BaseCompiler - this means that it's allowed to emit invalid instructions
//! (for example with missing operands) that will be fixed later before finalizing it.
//!
//! Default: false.
kValidateIntermediate = 0x00000002u,
//! Annotate all nodes processed by register allocator (Compiler/RA).
//!
//! \note Annotations don't need debug options, however, some debug options like `kRADebugLiveness` may influence
//! their output (for example the mentioned option would add liveness information to per-instruction annotation).
kRAAnnotate = 0x00000080u,
//! Debug CFG generation and other related algorithms / operations (Compiler/RA).
kRADebugCFG = 0x00000100u,
//! Debug liveness analysis (Compiler/RA).
kRADebugLiveness = 0x00000200u,
//! Debug register allocation assignment (Compiler/RA).
kRADebugAssignment = 0x00000400u,
//! Debug the removal of code part of unreachable blocks.
kRADebugUnreachable = 0x00000800u,
//! Enable all debug options (Compiler/RA).
kRADebugAll = 0x0000FF00u
};
ASMJIT_DEFINE_ENUM_FLAGS(DiagnosticOptions)
//! Provides a base foundation to emitting code - specialized by \ref BaseAssembler and \ref BaseBuilder.
class ASMJIT_VIRTAPI BaseEmitter {
public:
ASMJIT_BASE_CLASS(BaseEmitter)
ASMJIT_NONCOPYABLE(BaseEmitter)
//! \name Types
//! \{
//! Emitter state that can be used to specify options and inline comment of a next node or instruction.
struct State {
InstOptions options;
RegOnly extraReg;
const char* comment;
};
//! Functions used by backend-specific emitter implementation.
//!
//! These are typically shared between Assembler/Builder/Compiler of a single backend.
struct Funcs {
using EmitProlog = Error (ASMJIT_CDECL*)(BaseEmitter* emitter, const FuncFrame& frame);
using EmitEpilog = Error (ASMJIT_CDECL*)(BaseEmitter* emitter, const FuncFrame& frame);
using EmitArgsAssignment = Error (ASMJIT_CDECL*)(BaseEmitter* emitter, const FuncFrame& frame, const FuncArgsAssignment& args);
using FormatInstruction = Error (ASMJIT_CDECL*)(
String& sb,
FormatFlags formatFlags,
const BaseEmitter* emitter,
Arch arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept;
using ValidateFunc = Error (ASMJIT_CDECL*)(const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) noexcept;
//! Emit prolog implementation.
EmitProlog emitProlog;
//! Emit epilog implementation.
EmitEpilog emitEpilog;
//! Emit arguments assignment implementation.
EmitArgsAssignment emitArgsAssignment;
//! Instruction formatter implementation.
FormatInstruction formatInstruction;
//! Instruction validation implementation.
ValidateFunc validate;
//! Resets all functions to nullptr.
ASMJIT_INLINE_NODEBUG void reset() noexcept { *this = Funcs{}; }
};
//! \}
//! \name Members
//! \{
//! See \ref EmitterType.
EmitterType _emitterType = EmitterType::kNone;
//! See \ref EmitterFlags.
EmitterFlags _emitterFlags = EmitterFlags::kNone;
//! Instruction alignment.
uint8_t _instructionAlignment = 0u;
//! Validation flags in case validation is used.
//!
//! \note Validation flags are specific to the emitter and they are setup at construction time and then never
//! changed.
ValidationFlags _validationFlags = ValidationFlags::kNone;
//! Validation options.
DiagnosticOptions _diagnosticOptions = DiagnosticOptions::kNone;
//! Encoding options.
EncodingOptions _encodingOptions = EncodingOptions::kNone;
//! Forced instruction options, combined with \ref _instOptions by \ref emit().
InstOptions _forcedInstOptions = InstOptions::kReserved;
//! All supported architectures in a bit-mask, where LSB is the bit with a zero index.
uint64_t _archMask = 0;
//! CodeHolder the emitter is attached to.
CodeHolder* _code = nullptr;
//! Attached \ref Logger.
Logger* _logger = nullptr;
//! Attached \ref ErrorHandler.
ErrorHandler* _errorHandler = nullptr;
//! Describes the target environment, matches \ref CodeHolder::environment().
Environment _environment {};
//! Native GP register signature (either a 32-bit or 64-bit GP register signature).
OperandSignature _gpSignature {};
//! Internal private data used freely by any emitter.
uint32_t _privateData = 0;
//! Next instruction options (affects the next instruction).
InstOptions _instOptions = InstOptions::kNone;
//! Extra register (op-mask {k} on AVX-512) (affects the next instruction).
RegOnly _extraReg {};
//! Inline comment of the next instruction (affects the next instruction).
const char* _inlineComment = nullptr;
//! Pointer to functions used by backend-specific emitter implementation.
Funcs _funcs {};
//! Emitter attached before this emitter in \ref CodeHolder, otherwise nullptr if there is no emitter before.
BaseEmitter* _attachedPrev = nullptr;
//! Emitter attached after this emitter in \ref CodeHolder, otherwise nullptr if there is no emitter after.
BaseEmitter* _attachedNext = nullptr;
//! \}
//! \name Construction & Destruction
//! \{
ASMJIT_API explicit BaseEmitter(EmitterType emitterType) noexcept;
ASMJIT_API virtual ~BaseEmitter() noexcept;
//! \}
//! \name Cast
//! \{
template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* as() noexcept { return reinterpret_cast<T*>(this); }
template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const T* as() const noexcept { return reinterpret_cast<const T*>(this); }
//! \}
//! \name Emitter Type & Flags
//! \{
//! Returns the type of this emitter, see `EmitterType`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG EmitterType emitterType() const noexcept { return _emitterType; }
//! Returns emitter flags , see `Flags`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG EmitterFlags emitterFlags() const noexcept { return _emitterFlags; }
//! Tests whether the emitter inherits from `BaseAssembler`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAssembler() const noexcept { return _emitterType == EmitterType::kAssembler; }
//! Tests whether the emitter inherits from `BaseBuilder`.
//!
//! \note Both Builder and Compiler emitters would return `true`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isBuilder() const noexcept { return uint32_t(_emitterType) >= uint32_t(EmitterType::kBuilder); }
//! Tests whether the emitter inherits from `BaseCompiler`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isCompiler() const noexcept { return _emitterType == EmitterType::kCompiler; }
//! Tests whether the emitter has the given `flag` enabled.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasEmitterFlag(EmitterFlags flag) const noexcept { return Support::test(_emitterFlags, flag); }
//! Tests whether the emitter is finalized.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFinalized() const noexcept { return hasEmitterFlag(EmitterFlags::kFinalized); }
//! Tests whether the emitter is destroyed (only used during destruction).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isDestroyed() const noexcept { return hasEmitterFlag(EmitterFlags::kDestroyed); }
//! \}
//! \cond INTERNAL
//! \name Internal Functions
//! \{
ASMJIT_INLINE_NODEBUG void _addEmitterFlags(EmitterFlags flags) noexcept { _emitterFlags |= flags; }
ASMJIT_INLINE_NODEBUG void _clearEmitterFlags(EmitterFlags flags) noexcept { _emitterFlags &= _emitterFlags & ~flags; }
//! \}
//! \endcond
//! \name Target Information
//! \{
//! Returns the CodeHolder this emitter is attached to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CodeHolder* code() const noexcept { return _code; }
//! Returns the target environment.
//!
//! The returned \ref Environment reference matches \ref CodeHolder::environment().
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const Environment& environment() const noexcept { return _environment; }
//! Tests whether the target architecture is 32-bit.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool is32Bit() const noexcept { return environment().is32Bit(); }
//! Tests whether the target architecture is 64-bit.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool is64Bit() const noexcept { return environment().is64Bit(); }
//! Returns the target architecture type.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return environment().arch(); }
//! Returns the target architecture sub-type.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return environment().subArch(); }
//! Returns the target architecture's GP register size (4 or 8 bytes).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t registerSize() const noexcept { return environment().registerSize(); }
//! Returns a signature of a native general purpose register (either 32-bit or 64-bit depending on the architecture).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG OperandSignature gpSignature() const noexcept { return _gpSignature; }
//! Returns instruction alignment.
//!
//! The following values are returned based on the target architecture:
//! - X86 and X86_64 - instruction alignment is 1
//! - AArch32 - instruction alignment is 4 in A32 mode and 2 in THUMB mode.
//! - AArch64 - instruction alignment is 4
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t instructionAlignment() const noexcept { return _instructionAlignment; }
//! \}
//! \name Initialization & Finalization
//! \{
//! Tests whether the emitter is initialized (i.e. attached to \ref CodeHolder).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _code != nullptr; }
//! Finalizes this emitter.
//!
//! Materializes the content of the emitter by serializing it to the attached \ref CodeHolder through an architecture
//! specific \ref BaseAssembler. This function won't do anything if the emitter inherits from \ref BaseAssembler as
//! assemblers emit directly to a \ref CodeBuffer held by \ref CodeHolder. However, if this is an emitter that
//! inherits from \ref BaseBuilder or \ref BaseCompiler then these emitters need the materialization phase as they
//! store their content in a representation not visible to \ref CodeHolder.
ASMJIT_API virtual Error finalize();
//! \}
//! \name Logging
//! \{
//! Tests whether the emitter has a logger.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasLogger() const noexcept { return _logger != nullptr; }
//! Tests whether the emitter has its own logger.
//!
//! Own logger means that it overrides the possible logger that may be used by \ref CodeHolder this emitter is
//! attached to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOwnLogger() const noexcept { return hasEmitterFlag(EmitterFlags::kOwnLogger); }
//! Returns the logger this emitter uses.
//!
//! The returned logger is either the emitter's own logger or it's logger used by \ref CodeHolder this emitter
//! is attached to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Logger* logger() const noexcept { return _logger; }
//! Sets or resets the logger of the emitter.
//!
//! If the `logger` argument is non-null then the logger will be considered emitter's own logger, see \ref
//! hasOwnLogger() for more details. If the given `logger` is null then the emitter will automatically use logger
//! that is attached to the \ref CodeHolder this emitter is attached to.
ASMJIT_API void setLogger(Logger* logger) noexcept;
//! Resets the logger of this emitter.
//!
//! The emitter will bail to using a logger attached to \ref CodeHolder this emitter is attached to, or no logger
//! at all if \ref CodeHolder doesn't have one.
ASMJIT_INLINE_NODEBUG void resetLogger() noexcept { return setLogger(nullptr); }
//! \}
//! \name Error Handling
//! \{
//! Tests whether the emitter has an error handler attached.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; }
//! Tests whether the emitter has its own error handler.
//!
//! Own error handler means that it overrides the possible error handler that may be used by \ref CodeHolder this
//! emitter is attached to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOwnErrorHandler() const noexcept { return hasEmitterFlag(EmitterFlags::kOwnErrorHandler); }
//! Returns the error handler this emitter uses.
//!
//! The returned error handler is either the emitter's own error handler or it's error handler used by
//! \ref CodeHolder this emitter is attached to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ErrorHandler* errorHandler() const noexcept { return _errorHandler; }
//! Sets or resets the error handler of the emitter.
ASMJIT_API void setErrorHandler(ErrorHandler* errorHandler) noexcept;
//! Resets the error handler.
ASMJIT_INLINE_NODEBUG void resetErrorHandler() noexcept { setErrorHandler(nullptr); }
//! \cond INTERNAL
ASMJIT_API Error _reportError(Error err, const char* message = nullptr);
//! \endcond
//! Handles the given error in the following way:
//! 1. If the emitter has \ref ErrorHandler attached, it calls its \ref ErrorHandler::handleError() member function
//! first, and then returns the error. The `handleError()` function may throw.
//! 2. if the emitter doesn't have \ref ErrorHandler, the error is simply returned.
ASMJIT_INLINE Error reportError(Error err, const char* message = nullptr) {
Error e = _reportError(err, message);
// Static analysis is not working properly without these assumptions.
ASMJIT_ASSUME(e == err);
ASMJIT_ASSUME(e != kErrorOk);
return e;
}
//! \}
//! \name Encoding Options
//! \{
//! Returns encoding options.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG EncodingOptions encodingOptions() const noexcept { return _encodingOptions; }
//! Tests whether the encoding `option` is set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasEncodingOption(EncodingOptions option) const noexcept { return Support::test(_encodingOptions, option); }
//! Enables the given encoding `options`.
ASMJIT_INLINE_NODEBUG void addEncodingOptions(EncodingOptions options) noexcept { _encodingOptions |= options; }
//! Disables the given encoding `options`.
ASMJIT_INLINE_NODEBUG void clearEncodingOptions(EncodingOptions options) noexcept { _encodingOptions &= ~options; }
//! \}
//! \name Diagnostic Options
//! \{
//! Returns the emitter's diagnostic options.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG DiagnosticOptions diagnosticOptions() const noexcept { return _diagnosticOptions; }
//! Tests whether the given `option` is present in the emitter's diagnostic options.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasDiagnosticOption(DiagnosticOptions option) const noexcept { return Support::test(_diagnosticOptions, option); }
//! Activates the given diagnostic `options`.
//!
//! This function is used to activate explicit validation options that will be then used by all emitter
//! implementations. There are in general two possibilities:
//!
//! - Architecture specific assembler is used. In this case a \ref DiagnosticOptions::kValidateAssembler can be
//! used to turn on explicit validation that will be used before an instruction is emitted. This means that
//! internally an extra step will be performed to make sure that the instruction is correct. This is needed,
//! because by default assemblers prefer speed over strictness.
//!
//! This option should be used in debug builds as it's pretty expensive.
//!
//! - Architecture specific builder or compiler is used. In this case the user can turn on
//! \ref DiagnosticOptions::kValidateIntermediate option that adds explicit validation step before the Builder
//! or Compiler creates an \ref InstNode to represent an emitted instruction. Error will be returned if the
//! instruction is ill-formed. In addition, also \ref DiagnosticOptions::kValidateAssembler can be used, which
//! would not be consumed by Builder / Compiler directly, but it would be propagated to an architecture specific
//! \ref BaseAssembler implementation it creates during \ref BaseEmitter::finalize().
ASMJIT_API void addDiagnosticOptions(DiagnosticOptions options) noexcept;
//! Deactivates the given validation `options`.
//!
//! See \ref addDiagnosticOptions() and \ref DiagnosticOptions for more details.
ASMJIT_API void clearDiagnosticOptions(DiagnosticOptions options) noexcept;
//! \}
//! \name Instruction Options
//! \{
//! Returns forced instruction options.
//!
//! Forced instruction options are merged with next instruction options before the instruction is encoded. These
//! options have some bits reserved that are used by error handling, logging, and instruction validation purposes.
//! Other options are globals that affect each instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstOptions forcedInstOptions() const noexcept { return _forcedInstOptions; }
//! Returns options of the next instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstOptions instOptions() const noexcept { return _instOptions; }
//! Returns options of the next instruction.
ASMJIT_INLINE_NODEBUG void setInstOptions(InstOptions options) noexcept { _instOptions = options; }
//! Adds options of the next instruction.
ASMJIT_INLINE_NODEBUG void addInstOptions(InstOptions options) noexcept { _instOptions |= options; }
//! Resets options of the next instruction.
ASMJIT_INLINE_NODEBUG void resetInstOptions() noexcept { _instOptions = InstOptions::kNone; }
//! Tests whether the extra register operand is valid.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasExtraReg() const noexcept { return _extraReg.isReg(); }
//! Returns an extra operand that will be used by the next instruction (architecture specific).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RegOnly& extraReg() const noexcept { return _extraReg; }
//! Sets an extra operand that will be used by the next instruction (architecture specific).
ASMJIT_INLINE_NODEBUG void setExtraReg(const Reg& reg) noexcept { _extraReg.init(reg); }
//! Sets an extra operand that will be used by the next instruction (architecture specific).
ASMJIT_INLINE_NODEBUG void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); }
//! Resets an extra operand that will be used by the next instruction (architecture specific).
ASMJIT_INLINE_NODEBUG void resetExtraReg() noexcept { _extraReg.reset(); }
//! Returns comment/annotation of the next instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* inlineComment() const noexcept { return _inlineComment; }
//! Sets comment/annotation of the next instruction.
//!
//! \note This string is set back to null by `_emit()`, but until that it has to remain valid as the Emitter is not
//! required to make a copy of it (and it would be slow to do that for each instruction).
ASMJIT_INLINE_NODEBUG void setInlineComment(const char* s) noexcept { _inlineComment = s; }
//! Resets the comment/annotation to nullptr.
ASMJIT_INLINE_NODEBUG void resetInlineComment() noexcept { _inlineComment = nullptr; }
//! \}
//! \name Emitter State
//! \{
//! Resets the emitter state, which contains instruction options, extra register, and inline comment.
//!
//! Emitter can have a state that describes instruction options and extra register used by the instruction. Most
//! instructions don't need nor use the state, however, if an instruction uses a prefix such as REX or REP prefix,
//! which is set explicitly, then the state would contain it. This allows to mimic the syntax of assemblers such
//! as X86. For example `rep().movs(...)` would map to a `REP MOVS` instuction on X86. The same applies to various
//! hints and the use of a mask register in AVX-512 mode.
ASMJIT_INLINE_NODEBUG void resetState() noexcept {
resetInstOptions();
resetExtraReg();
resetInlineComment();
}
//! \cond INTERNAL
//! Grabs the current emitter state and resets the emitter state at the same time, returning the state the emitter
//! had before the state was reset.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG State _grabState() noexcept {
State s{_instOptions | _forcedInstOptions, _extraReg, _inlineComment};
resetState();
return s;
}
//! \endcond
//! \}
//! \name Sections
//! \{
//! Switches the given `section`.
//!
//! Once switched, everything is added to the given `section`.
ASMJIT_API virtual Error section(Section* section);
//! \}
//! \name Labels
//! \{
//! Creates a new label.
[[nodiscard]]
ASMJIT_API virtual Label newLabel();
//! Creates a new named label.
[[nodiscard]]
ASMJIT_API virtual Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, LabelType type = LabelType::kGlobal, uint32_t parentId = Globals::kInvalidId);
//! Creates a new anonymous label with a name, which can only be used for debugging purposes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label newAnonymousLabel(const char* name, size_t nameSize = SIZE_MAX) { return newNamedLabel(name, nameSize, LabelType::kAnonymous); }
//! Creates a new external label.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label newExternalLabel(const char* name, size_t nameSize = SIZE_MAX) { return newNamedLabel(name, nameSize, LabelType::kExternal); }
//! Returns `Label` by `name`.
//!
//! Returns invalid Label in case that the name is invalid or label was not found.
//!
//! \note This function doesn't trigger ErrorHandler in case the name is invalid or no such label exist. You must
//! always check the validity of the `Label` returned.
[[nodiscard]]
ASMJIT_API Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept;
//! Binds the `label` to the current position of the current section.
//!
//! \note Attempt to bind the same label multiple times will return an error.
ASMJIT_API virtual Error bind(const Label& label);
//! Tests whether the label `id` is valid (i.e. registered).
[[nodiscard]]
ASMJIT_API bool isLabelValid(uint32_t labelId) const noexcept;
//! Tests whether the `label` is valid (i.e. registered).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLabelValid(const Label& label) const noexcept { return isLabelValid(label.id()); }
//! \}
//! \name Emit
//! \{
// NOTE: These `emit()` helpers are designed to address a code-bloat generated by C++ compilers to call a function
// having many arguments. Each parameter to `_emit()` requires some code to pass it, which means that if we default
// to 5 arguments in `_emit()` and instId the C++ compiler would have to generate a virtual function call having 5
// parameters and additional `this` argument, which is quite a lot. Since by default most instructions have 2 to 3
// operands it's better to introduce helpers that pass from 0 to 6 operands that help to reduce the size of emit(...)
// function call.
//! Emits an instruction (internal).
ASMJIT_API Error _emitI(InstId instId);
//! \overload
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0);
//! \overload
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0, const Operand_& o1);
//! \overload
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2);
//! \overload
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3);
//! \overload
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4);
//! \overload
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5);
//! Emits an instruction `instId` with the given `operands`.
//!
//! This is the most universal way of emitting code, which accepts an instruction identifier and instruction
//! operands. This is called an "unchecked" API as emit doesn't provide any type checks at compile-time. This
//! allows to emit instruction with just \ref Operand instances, which could be handy in some cases - for
//! example emitting generic code where you don't know whether some operand is register, memory, or immediate.
template<typename... Args>
ASMJIT_INLINE_NODEBUG Error emit(InstId instId, Args&&... operands) {
return _emitI(instId, Support::ForwardOp<Args>::forward(operands)...);
}
//! Similar to \ref emit(), but uses array of `operands` instead.
ASMJIT_INLINE_NODEBUG Error emitOpArray(InstId instId, const Operand_* operands, size_t opCount) {
return _emitOpArray(instId, operands, opCount);
}
//! Similar to \ref emit(), but emits instruction with both instruction options and extra register, followed
//! by an array of `operands`.
ASMJIT_INLINE Error emitInst(const BaseInst& inst, const Operand_* operands, size_t opCount) {
setInstOptions(inst.options());
setExtraReg(inst.extraReg());
return _emitOpArray(inst.id(), operands, opCount);
}
//! \}
//! \cond INTERNAL
//! \name Emit Internals
//! \{
//! Emits an instruction - all 6 operands must be defined.
ASMJIT_API virtual Error _emit(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* oExt);
//! Emits instruction having operands stored in array.
ASMJIT_API virtual Error _emitOpArray(InstId instId, const Operand_* operands, size_t opCount);
//! \}
//! \endcond
//! \name Emit Utilities
//! \{
//! Emits a function prolog described by the given function `frame`.
ASMJIT_API Error emitProlog(const FuncFrame& frame);
//! Emits a function epilog described by the given function `frame`.
ASMJIT_API Error emitEpilog(const FuncFrame& frame);
//! Emits code that reassigns function `frame` arguments to the given `args`.
ASMJIT_API Error emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args);
//! \}
//! \name Align
//! \{
//! Aligns the current CodeBuffer position to the `alignment` specified.
//!
//! The sequence that is used to fill the gap between the aligned location and the current location depends on the
//! align `mode`, see \ref AlignMode. The `alignment` argument specifies alignment in bytes, so for example when
//! it's `32` it means that the code buffer will be aligned to `32` bytes.
ASMJIT_API virtual Error align(AlignMode alignMode, uint32_t alignment);
//! \}
//! \name Embed
//! \{
//! Embeds raw data into the \ref CodeBuffer.
ASMJIT_API virtual Error embed(const void* data, size_t dataSize);
//! Embeds a typed data array.
//!
//! This is the most flexible function for embedding data as it allows to:
//!
//! - Assign a `typeId` to the data, so the emitter knows the type of items stored in `data`. Binary data should
//! use \ref TypeId::kUInt8.
//!
//! - Repeat the given data `repeatCount` times, so the data can be used as a fill pattern for example, or as a
//! pattern used by SIMD instructions.
ASMJIT_API virtual Error embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t repeatCount = 1);
//! Embeds int8_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedInt8(int8_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kInt8, &value, 1, repeatCount); }
//! Embeds uint8_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedUInt8(uint8_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kUInt8, &value, 1, repeatCount); }
//! Embeds int16_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedInt16(int16_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kInt16, &value, 1, repeatCount); }
//! Embeds uint16_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedUInt16(uint16_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kUInt16, &value, 1, repeatCount); }
//! Embeds int32_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedInt32(int32_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kInt32, &value, 1, repeatCount); }
//! Embeds uint32_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedUInt32(uint32_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kUInt32, &value, 1, repeatCount); }
//! Embeds int64_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedInt64(int64_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kInt64, &value, 1, repeatCount); }
//! Embeds uint64_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedUInt64(uint64_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kUInt64, &value, 1, repeatCount); }
//! Embeds a floating point `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedFloat(float value, size_t repeatCount = 1) { return embedDataArray(TypeId(TypeUtils::TypeIdOfT<float>::kTypeId), &value, 1, repeatCount); }
//! Embeds a floating point `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedDouble(double value, size_t repeatCount = 1) { return embedDataArray(TypeId(TypeUtils::TypeIdOfT<double>::kTypeId), &value, 1, repeatCount); }
//! Embeds a constant pool at the current offset by performing the following:
//! 1. Aligns by using AlignMode::kData to the minimum `pool` alignment.
//! 2. Binds the ConstPool label so it's bound to an aligned location.
//! 3. Emits ConstPool content.
ASMJIT_API virtual Error embedConstPool(const Label& label, const ConstPool& pool);
//! Embeds an absolute `label` address as data.
//!
//! The `dataSize` is an optional argument that can be used to specify the size of the address data. If it's zero
//! (default) the address size is deduced from the target architecture (either 4 or 8 bytes).
ASMJIT_API virtual Error embedLabel(const Label& label, size_t dataSize = 0);
//! Embeds a delta (distance) between the `label` and `base` calculating it as `label - base`. This function was
//! designed to make it easier to embed lookup tables where each index is a relative distance of two labels.
ASMJIT_API virtual Error embedLabelDelta(const Label& label, const Label& base, size_t dataSize = 0);
//! \}
//! \name Comment
//! \{
//! Emits a comment stored in `data` with an optional `size` parameter.
ASMJIT_API virtual Error comment(const char* data, size_t size = SIZE_MAX);
//! Emits a formatted comment specified by `fmt` and variable number of arguments.
ASMJIT_API Error commentf(const char* fmt, ...);
//! Emits a formatted comment specified by `fmt` and `ap`.
ASMJIT_API Error commentv(const char* fmt, va_list ap);
//! \}
//! \name Events
//! \{
//! Called after the emitter was attached to `CodeHolder`.
ASMJIT_API virtual Error onAttach(CodeHolder& code) noexcept;
//! Called after the emitter was detached from `CodeHolder`.
ASMJIT_API virtual Error onDetach(CodeHolder& code) noexcept;
//! Called when CodeHolder is reinitialized when the emitter is attached.
ASMJIT_API virtual Error onReinit(CodeHolder& code) noexcept;
//! Called when \ref CodeHolder has updated an important setting, which involves the following:
//!
//! - \ref Logger has been changed (\ref CodeHolder::setLogger() has been called).
//!
//! - \ref ErrorHandler has been changed (\ref CodeHolder::setErrorHandler() has been called).
//!
//! This function ensures that the settings are properly propagated from \ref CodeHolder to the emitter.
//!
//! \note This function is virtual and can be overridden, however, if you do so, always call \ref
//! BaseEmitter::onSettingsUpdated() within your own implementation to ensure that the emitter is
//! in a consistent state.
ASMJIT_API virtual void onSettingsUpdated() noexcept;
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_EMITTER_H_INCLUDED

View File

@@ -1,129 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/assembler.h"
#include "../core/emitterutils_p.h"
#include "../core/formatter_p.h"
#include "../core/logger.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
namespace EmitterUtils {
#ifndef ASMJIT_NO_LOGGING
Error finishFormattedLine(String& sb, const FormatOptions& formatOptions, const uint8_t* binData, size_t binSize, size_t offsetSize, size_t immSize, const char* comment) noexcept {
ASMJIT_ASSERT(binSize >= offsetSize);
const size_t kNoBinSize = SIZE_MAX;
size_t commentSize = comment ? Support::strLen(comment, Globals::kMaxCommentSize) : 0;
if ((binSize != 0 && binSize != kNoBinSize) || commentSize) {
char sep = ';';
size_t padding = Formatter::paddingFromOptions(formatOptions, FormatPaddingGroup::kRegularLine);
for (size_t i = (binSize == kNoBinSize); i < 2; i++) {
ASMJIT_PROPAGATE(sb.padEnd(padding));
if (sep) {
ASMJIT_PROPAGATE(sb.append(sep));
ASMJIT_PROPAGATE(sb.append(' '));
}
// Append binary data or comment.
if (i == 0) {
ASMJIT_PROPAGATE(sb.appendHex(binData, binSize - offsetSize - immSize));
ASMJIT_PROPAGATE(sb.appendChars('.', offsetSize * 2));
ASMJIT_PROPAGATE(sb.appendHex(binData + binSize - immSize, immSize));
if (commentSize == 0) break;
}
else {
ASMJIT_PROPAGATE(sb.append(comment, commentSize));
}
sep = '|';
padding += Formatter::paddingFromOptions(formatOptions, FormatPaddingGroup::kMachineCode);
}
}
return sb.append('\n');
}
void logLabelBound(BaseAssembler* self, const Label& label) noexcept {
Logger* logger = self->logger();
StringTmp<512> sb;
size_t binSize = logger->hasFlag(FormatFlags::kMachineCode) ? size_t(0) : SIZE_MAX;
sb.appendChars(' ', logger->indentation(FormatIndentationGroup::kLabel));
Formatter::formatLabel(sb, logger->flags(), self, label.id());
sb.append(':');
finishFormattedLine(sb, logger->options(), nullptr, binSize, 0, 0, self->_inlineComment);
logger->log(sb.data(), sb.size());
}
void logInstructionEmitted(
BaseAssembler* self,
InstId instId,
InstOptions options,
const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt,
uint32_t relSize, uint32_t immSize, uint8_t* afterCursor) {
Logger* logger = self->logger();
ASMJIT_ASSERT(logger != nullptr);
StringTmp<256> sb;
FormatFlags formatFlags = logger->flags();
uint8_t* beforeCursor = self->bufferPtr();
intptr_t emittedSize = (intptr_t)(afterCursor - beforeCursor);
Operand_ opArray[Globals::kMaxOpCount];
opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
sb.appendChars(' ', logger->indentation(FormatIndentationGroup::kCode));
self->_funcs.formatInstruction(sb, formatFlags, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
if (Support::test(formatFlags, FormatFlags::kMachineCode)) {
finishFormattedLine(sb, logger->options(), self->bufferPtr(), size_t(emittedSize), relSize, immSize, self->inlineComment());
}
else {
finishFormattedLine(sb, logger->options(), nullptr, SIZE_MAX, 0, 0, self->inlineComment());
}
logger->log(sb);
}
Error logInstructionFailed(
BaseEmitter* self,
Error err,
InstId instId,
InstOptions options,
const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) {
StringTmp<256> sb;
sb.append(DebugUtils::errorAsString(err));
sb.append(": ");
Operand_ opArray[Globals::kMaxOpCount];
opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
self->_funcs.formatInstruction(sb, FormatFlags::kRegType, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
if (self->inlineComment()) {
sb.append(" ; ");
sb.append(self->inlineComment());
}
self->resetState();
return self->reportError(err, sb.data());
}
#endif
} // {EmitterUtils}
ASMJIT_END_NAMESPACE

View File

@@ -1,90 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_EMITTERUTILS_P_H_INCLUDED
#define ASMJIT_CORE_EMITTERUTILS_P_H_INCLUDED
#include "../core/emitter.h"
#include "../core/operand.h"
ASMJIT_BEGIN_NAMESPACE
class BaseAssembler;
class FormatOptions;
//! \cond INTERNAL
//! \addtogroup asmjit_core
//! \{
//! Utilities used by various emitters, mostly Assembler implementations.
namespace EmitterUtils {
//! Default paddings used by Emitter utils and Formatter.
static constexpr Operand noExt[3] = { {}, {}, {} };
enum kOpIndex : uint32_t {
kOp3 = 0,
kOp4 = 1,
kOp5 = 2
};
[[nodiscard]]
static ASMJIT_INLINE uint32_t opCountFromEmitArgs(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
uint32_t opCount = 0;
if (opExt[kOp3].isNone()) {
if (!o0.isNone()) opCount = 1;
if (!o1.isNone()) opCount = 2;
if (!o2.isNone()) opCount = 3;
}
else {
opCount = 4;
if (!opExt[kOp4].isNone()) {
opCount = 5 + uint32_t(!opExt[kOp5].isNone());
}
}
return opCount;
}
static ASMJIT_INLINE void opArrayFromEmitArgs(Operand_ dst[Globals::kMaxOpCount], const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
dst[0].copyFrom(o0);
dst[1].copyFrom(o1);
dst[2].copyFrom(o2);
dst[3].copyFrom(opExt[kOp3]);
dst[4].copyFrom(opExt[kOp4]);
dst[5].copyFrom(opExt[kOp5]);
}
#ifndef ASMJIT_NO_LOGGING
Error finishFormattedLine(String& sb, const FormatOptions& formatOptions, const uint8_t* binData, size_t binSize, size_t offsetSize, size_t immSize, const char* comment) noexcept;
void logLabelBound(BaseAssembler* self, const Label& label) noexcept;
void logInstructionEmitted(
BaseAssembler* self,
InstId instId,
InstOptions options,
const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt,
uint32_t relSize, uint32_t immSize, uint8_t* afterCursor);
Error logInstructionFailed(
BaseEmitter* self,
Error err,
InstId instId,
InstOptions options,
const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt);
#endif
}
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_EMITTERUTILS_P_H_INCLUDED

View File

@@ -1,47 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/environment.h"
ASMJIT_BEGIN_NAMESPACE
// X86 Target
// ----------
//
// - 32-bit - Linux, OSX, BSD, and apparently also Haiku guarantee 16-byte
// stack alignment. Other operating systems are assumed to have
// 4-byte alignment by default for safety reasons.
// - 64-bit - stack must be aligned to 16 bytes.
//
// ARM Target
// ----------
//
// - 32-bit - Stack must be aligned to 8 bytes.
// - 64-bit - Stack must be aligned to 16 bytes (hardware requirement).
uint32_t Environment::stackAlignment() const noexcept {
if (is64Bit()) {
// Assume 16-byte alignment on any 64-bit target.
return 16;
}
else {
// The following platforms use 16-byte alignment in 32-bit mode.
if (isPlatformLinux() ||
isPlatformBSD() ||
isPlatformApple() ||
isPlatformHaiku()) {
return 16u;
}
if (isFamilyARM()) {
return 8;
}
// Bail to 4-byte alignment if we don't know.
return 4;
}
}
ASMJIT_END_NAMESPACE

View File

@@ -1,622 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ENVIRONMENT_H_INCLUDED
#define ASMJIT_CORE_ENVIRONMENT_H_INCLUDED
#include "../core/archtraits.h"
#if defined(__APPLE__)
#include <TargetConditionals.h>
#endif
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
//! Vendor.
//!
//! \note AsmJit doesn't use vendor information at the moment. It's provided for future use, if required.
enum class Vendor : uint8_t {
//! Unknown or uninitialized platform vendor.
kUnknown = 0,
//! Maximum value of `Vendor`.
kMaxValue = kUnknown,
//! Platform vendor detected at compile-time.
kHost =
#if defined(_DOXYGEN)
DETECTED_AT_COMPILE_TIME
#else
kUnknown
#endif
};
//! Platform - runtime environment or operating system.
enum class Platform : uint8_t {
//! Unknown or uninitialized platform.
kUnknown = 0,
//! Windows OS.
kWindows,
//! Other platform that is not Windows, most likely POSIX based.
kOther,
//! Linux OS.
kLinux,
//! GNU/Hurd OS.
kHurd,
//! FreeBSD OS.
kFreeBSD,
//! OpenBSD OS.
kOpenBSD,
//! NetBSD OS.
kNetBSD,
//! DragonFly BSD OS.
kDragonFlyBSD,
//! Haiku OS.
kHaiku,
//! Apple OSX.
kOSX,
//! Apple iOS.
kIOS,
//! Apple TVOS.
kTVOS,
//! Apple WatchOS.
kWatchOS,
//! Emscripten platform.
kEmscripten,
//! Maximum value of `Platform`.
kMaxValue = kEmscripten,
//! Platform detected at compile-time (platform of the host).
kHost =
#if defined(_DOXYGEN)
DETECTED_AT_COMPILE_TIME
#elif defined(__EMSCRIPTEN__)
kEmscripten
#elif defined(_WIN32)
kWindows
#elif defined(__linux__)
kLinux
#elif defined(__gnu_hurd__)
kHurd
#elif defined(__FreeBSD__)
kFreeBSD
#elif defined(__OpenBSD__)
kOpenBSD
#elif defined(__NetBSD__)
kNetBSD
#elif defined(__DragonFly__)
kDragonFlyBSD
#elif defined(__HAIKU__)
kHaiku
#elif defined(__APPLE__) && TARGET_OS_OSX
kOSX
#elif defined(__APPLE__) && TARGET_OS_TV
kTVOS
#elif defined(__APPLE__) && TARGET_OS_WATCH
kWatchOS
#elif defined(__APPLE__) && TARGET_OS_IPHONE
kIOS
#else
kOther
#endif
};
//! Platform ABI (application binary interface).
enum class PlatformABI : uint8_t {
//! Unknown or uninitialized environment.
kUnknown = 0,
//! Microsoft ABI.
kMSVC,
//! GNU ABI.
kGNU,
//! Android Environment / ABI.
kAndroid,
//! Cygwin ABI.
kCygwin,
//! Darwin ABI.
kDarwin,
//! Maximum value of `PlatformABI`.
kMaxValue,
//! Host ABI detected at compile-time.
kHost =
#if defined(_DOXYGEN)
DETECTED_AT_COMPILE_TIME
#elif defined(_MSC_VER)
kMSVC
#elif defined(__CYGWIN__)
kCygwin
#elif defined(__MINGW32__) || defined(__GLIBC__)
kGNU
#elif defined(__ANDROID__)
kAndroid
#elif defined(__APPLE__)
kDarwin
#else
kUnknown
#endif
};
//! Floating point ABI (ARM).
enum class FloatABI : uint8_t {
kHardFloat = 0,
kSoftFloat,
kHost =
#if ASMJIT_ARCH_ARM == 32 && defined(__SOFTFP__)
kSoftFloat
#else
kHardFloat
#endif
};
//! Object format.
//!
//! \note AsmJit doesn't really use anything except \ref ObjectFormat::kUnknown and \ref ObjectFormat::kJIT at
//! the moment. Object file formats are provided for future extensibility and a possibility to generate object
//! files at some point.
enum class ObjectFormat : uint8_t {
//! Unknown or uninitialized object format.
kUnknown = 0,
//! JIT code generation object, most likely \ref JitRuntime or a custom
//! \ref Target implementation.
kJIT,
//! Executable and linkable format (ELF).
kELF,
//! Common object file format.
kCOFF,
//! Extended COFF object format.
kXCOFF,
//! Mach object file format.
kMachO,
//! Maximum value of `ObjectFormat`.
kMaxValue
};
//! Represents an environment, which is usually related to a \ref Target.
//!
//! Environment has usually an 'arch-subarch-vendor-os-abi' format, which is sometimes called "Triple" (historically
//! it used to be 3 only parts) or "Tuple", which is a convention used by Debian Linux.
//!
//! AsmJit doesn't support all possible combinations or architectures and ABIs, however, it models the environment
//! similarly to other compilers for future extensibility.
class Environment {
public:
//! \name Members
//! \{
//! Architecture.
Arch _arch = Arch::kUnknown;
//! Sub-architecture type.
SubArch _subArch = SubArch::kUnknown;
//! Vendor type.
Vendor _vendor = Vendor::kUnknown;
//! Platform.
Platform _platform = Platform::kUnknown;
//! Platform ABI.
PlatformABI _platformABI = PlatformABI::kUnknown;
//! Object format.
ObjectFormat _objectFormat = ObjectFormat::kUnknown;
//! Floating point ABI.
FloatABI _floatABI = FloatABI::kHardFloat;
//! Reserved for future use, must be zero.
uint8_t _reserved = 0;
//! \}
//! \name Construction & Destruction
//! \{
//! Creates a default initialized environment (all values either unknown or set to safe defaults).
ASMJIT_INLINE_CONSTEXPR Environment() noexcept = default;
//! Creates a copy of `other` instance.
ASMJIT_INLINE_CONSTEXPR Environment(const Environment& other) noexcept = default;
//! Creates \ref Environment initialized to `arch`, `subArch`, `vendor`, `platform`, `platformABI`, `objectFormat`,
//! and `floatABI`.
ASMJIT_INLINE_CONSTEXPR explicit Environment(
Arch arch,
SubArch subArch = SubArch::kUnknown,
Vendor vendor = Vendor::kUnknown,
Platform platform = Platform::kUnknown,
PlatformABI platformABI = PlatformABI::kUnknown,
ObjectFormat objectFormat = ObjectFormat::kUnknown,
FloatABI floatABI = FloatABI::kHardFloat) noexcept
: _arch(arch),
_subArch(subArch),
_vendor(vendor),
_platform(platform),
_platformABI(platformABI),
_objectFormat(objectFormat),
_floatABI(floatABI) {}
//! Returns the host environment constructed from preprocessor macros defined by the compiler.
//!
//! The returned environment should precisely match the target host architecture, sub-architecture, platform,
//! and ABI.
static ASMJIT_INLINE_CONSTEXPR Environment host() noexcept {
return Environment(Arch::kHost, SubArch::kHost, Vendor::kHost, Platform::kHost, PlatformABI::kHost, ObjectFormat::kUnknown, FloatABI::kHost);
}
//! \}
//! \name Overloaded Operators
//! \{
ASMJIT_INLINE_NODEBUG Environment& operator=(const Environment& other) noexcept = default;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator==(const Environment& other) const noexcept { return equals(other); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator!=(const Environment& other) const noexcept { return !equals(other); }
//! \}
//! \name Accessors
//! \{
//! Tests whether the environment is not set up.
//!
//! Returns true if all members are zero, and thus unknown.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept {
// Unfortunately compilers won't optimize fields are checked one by one...
return _packed() == 0;
}
//! Tests whether the environment is initialized, which means it must have
//! a valid architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept {
return _arch != Arch::kUnknown;
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t _packed() const noexcept {
uint64_t x;
memcpy(&x, this, 8);
return x;
}
//! Resets all members of the environment to zero / unknown.
ASMJIT_INLINE_NODEBUG void reset() noexcept { *this = Environment{}; }
//! Tests whether this environment is equal to `other`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool equals(const Environment& other) const noexcept { return _packed() == other._packed(); }
//! Returns the architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; }
//! Returns the sub-architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return _subArch; }
//! Returns vendor.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Vendor vendor() const noexcept { return _vendor; }
//! Returns target's platform or operating system.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Platform platform() const noexcept { return _platform; }
//! Returns target's ABI.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG PlatformABI platformABI() const noexcept { return _platformABI; }
//! Returns target's object format.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ObjectFormat objectFormat() const noexcept { return _objectFormat; }
//! Returns floating point ABI.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FloatABI floatABI() const noexcept { return _floatABI; }
//! Initializes \ref Environment to `arch`, `subArch`, `vendor`, `platform`, `platformABI`, `objectFormat`,
//! and `floatABI`.
inline void init(
Arch arch,
SubArch subArch = SubArch::kUnknown,
Vendor vendor = Vendor::kUnknown,
Platform platform = Platform::kUnknown,
PlatformABI platformABI = PlatformABI::kUnknown,
ObjectFormat objectFormat = ObjectFormat::kUnknown,
FloatABI floatABI = FloatABI::kHardFloat) noexcept {
_arch = arch;
_subArch = subArch;
_vendor = vendor;
_platform = platform;
_platformABI = platformABI;
_objectFormat = objectFormat;
_floatABI = floatABI;
_reserved = 0;
}
//! Tests whether this environment describes a 32-bit X86.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchX86() const noexcept { return _arch == Arch::kX86; }
//! Tests whether this environment describes a 64-bit X86.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchX64() const noexcept { return _arch == Arch::kX64; }
//! Tests whether this environment describes a 32-bit ARM.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchARM() const noexcept { return isArchARM(_arch); }
//! Tests whether this environment describes a 32-bit ARM in THUMB mode.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchThumb() const noexcept { return isArchThumb(_arch); }
//! Tests whether this environment describes a 64-bit X86.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchAArch64() const noexcept { return isArchAArch64(_arch); }
//! Tests whether this environment describes a 32-bit MIPS.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchMIPS32() const noexcept { return isArchMIPS32(_arch); }
//! Tests whether this environment describes a 64-bit MIPS.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchMIPS64() const noexcept { return isArchMIPS64(_arch); }
//! Tests whether this environment describes a 32-bit RISC-V.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchRISCV32() const noexcept { return _arch == Arch::kRISCV32; }
//! Tests whether this environment describes a 64-bit RISC-V.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchRISCV64() const noexcept { return _arch == Arch::kRISCV64; }
//! Tests whether the architecture is 32-bit.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool is32Bit() const noexcept { return is32Bit(_arch); }
//! Tests whether the architecture is 64-bit.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool is64Bit() const noexcept { return is64Bit(_arch); }
//! Tests whether the architecture is little endian.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLittleEndian() const noexcept { return isLittleEndian(_arch); }
//! Tests whether the architecture is big endian.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isBigEndian() const noexcept { return isBigEndian(_arch); }
//! Tests whether this architecture is of X86 family.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyX86() const noexcept { return isFamilyX86(_arch); }
//! Tests whether this architecture family is ARM, THUMB, or AArch64.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyARM() const noexcept { return isFamilyARM(_arch); }
//! Tests whether this architecture family is AArch32 (ARM or THUMB).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyAArch32() const noexcept { return isFamilyAArch32(_arch); }
//! Tests whether this architecture family is AArch64.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyAArch64() const noexcept { return isFamilyAArch64(_arch); }
//! Tests whether this architecture family is MISP or MIPS64.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyMIPS() const noexcept { return isFamilyMIPS(_arch); }
//! Tests whether this architecture family is RISC-V (both 32-bit and 64-bit).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyRISCV() const noexcept { return isFamilyRISCV(_arch); }
//! Tests whether the environment platform is Windows.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformWindows() const noexcept { return _platform == Platform::kWindows; }
//! Tests whether the environment platform is Linux.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformLinux() const noexcept { return _platform == Platform::kLinux; }
//! Tests whether the environment platform is Hurd.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformHurd() const noexcept { return _platform == Platform::kHurd; }
//! Tests whether the environment platform is Haiku.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformHaiku() const noexcept { return _platform == Platform::kHaiku; }
//! Tests whether the environment platform is any BSD.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformBSD() const noexcept {
return _platform == Platform::kFreeBSD ||
_platform == Platform::kOpenBSD ||
_platform == Platform::kNetBSD ||
_platform == Platform::kDragonFlyBSD;
}
//! Tests whether the environment platform is any Apple platform (OSX, iOS, TVOS, WatchOS).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformApple() const noexcept {
return _platform == Platform::kOSX ||
_platform == Platform::kIOS ||
_platform == Platform::kTVOS ||
_platform == Platform::kWatchOS;
}
//! Tests whether the ABI is MSVC.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMSVC() const noexcept { return _platformABI == PlatformABI::kMSVC; }
//! Tests whether the ABI is GNU.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isGNU() const noexcept { return _platformABI == PlatformABI::kGNU; }
//! Tests whether the ABI is GNU.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isDarwin() const noexcept { return _platformABI == PlatformABI::kDarwin; }
//! Returns a calculated stack alignment for this environment.
[[nodiscard]]
ASMJIT_API uint32_t stackAlignment() const noexcept;
//! Returns a native register size of this architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t registerSize() const noexcept { return registerSizeFromArch(_arch); }
//! Sets the architecture to `arch`.
ASMJIT_INLINE_NODEBUG void setArch(Arch arch) noexcept { _arch = arch; }
//! Sets the sub-architecture to `subArch`.
ASMJIT_INLINE_NODEBUG void setSubArch(SubArch subArch) noexcept { _subArch = subArch; }
//! Sets the vendor to `vendor`.
ASMJIT_INLINE_NODEBUG void setVendor(Vendor vendor) noexcept { _vendor = vendor; }
//! Sets the platform to `platform`.
ASMJIT_INLINE_NODEBUG void setPlatform(Platform platform) noexcept { _platform = platform; }
//! Sets the ABI to `platformABI`.
ASMJIT_INLINE_NODEBUG void setPlatformABI(PlatformABI platformABI) noexcept { _platformABI = platformABI; }
//! Sets the object format to `objectFormat`.
ASMJIT_INLINE_NODEBUG void setObjectFormat(ObjectFormat objectFormat) noexcept { _objectFormat = objectFormat; }
//! Sets floating point ABI to `floatABI`.
ASMJIT_INLINE_NODEBUG void setFloatABI(FloatABI floatABI) noexcept { _floatABI = floatABI; }
//! \}
//! \name Static Utilities
//! \{
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isDefinedArch(Arch arch) noexcept {
return uint32_t(arch) <= uint32_t(Arch::kMaxValue);
}
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isValidArch(Arch arch) noexcept {
return arch != Arch::kUnknown && uint32_t(arch) <= uint32_t(Arch::kMaxValue);
}
//! Tests whether the given architecture `arch` is 32-bit.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool is32Bit(Arch arch) noexcept {
return (uint32_t(arch) & uint32_t(Arch::k32BitMask)) == uint32_t(Arch::k32BitMask);
}
//! Tests whether the given architecture `arch` is 64-bit.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool is64Bit(Arch arch) noexcept {
return (uint32_t(arch) & uint32_t(Arch::k32BitMask)) == 0;
}
//! Tests whether the given architecture `arch` is little endian.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isLittleEndian(Arch arch) noexcept {
return uint32_t(arch) < uint32_t(Arch::kBigEndian);
}
//! Tests whether the given architecture `arch` is big endian.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isBigEndian(Arch arch) noexcept {
return uint32_t(arch) >= uint32_t(Arch::kBigEndian);
}
//! Tests whether the given architecture is Thumb or Thumb_BE.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isArchThumb(Arch arch) noexcept {
return arch == Arch::kThumb || arch == Arch::kThumb_BE;
}
//! Tests whether the given architecture is ARM or ARM_BE.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isArchARM(Arch arch) noexcept {
return arch == Arch::kARM || arch == Arch::kARM_BE;
}
//! Tests whether the given architecture is AArch64 or AArch64_BE.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isArchAArch64(Arch arch) noexcept {
return arch == Arch::kAArch64 || arch == Arch::kAArch64_BE;
}
//! Tests whether the given architecture is MIPS32_LE or MIPS32_BE.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isArchMIPS32(Arch arch) noexcept {
return arch == Arch::kMIPS32_LE || arch == Arch::kMIPS32_BE;
}
//! Tests whether the given architecture is MIPS64_LE or MIPS64_BE.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isArchMIPS64(Arch arch) noexcept {
return arch == Arch::kMIPS64_LE || arch == Arch::kMIPS64_BE;
}
//! Tests whether the given architecture family is X86 or X64.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyX86(Arch arch) noexcept {
return arch == Arch::kX86 || arch == Arch::kX64;
}
//! Tests whether the given architecture family is AArch32 (ARM or THUMB).
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyAArch32(Arch arch) noexcept {
return isArchARM(arch) || isArchThumb(arch);
}
//! Tests whether the given architecture family is AArch64.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyAArch64(Arch arch) noexcept {
return isArchAArch64(arch);
}
//! Tests whether the given architecture family is ARM, THUMB, or AArch64.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyARM(Arch arch) noexcept {
return isFamilyAArch32(arch) || isFamilyAArch64(arch);
}
//! Tests whether the given architecture family is MIPS or MIPS64.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyMIPS(Arch arch) noexcept {
return isArchMIPS32(arch) || isArchMIPS64(arch);
}
//! Tests whether the given architecture family is RISC-V (both 32-bit and 64-bit).
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyRISCV(Arch arch) noexcept {
return arch == Arch::kRISCV32 || arch == Arch::kRISCV64;
}
//! Returns a native general purpose register size from the given architecture.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG uint32_t registerSizeFromArch(Arch arch) noexcept {
return is32Bit(arch) ? 4u : 8u;
}
//! \}
};
static_assert(sizeof(Environment) == 8,
"Environment must occupy exactly 8 bytes.");
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ENVIRONMENT_H_INCLUDED

View File

@@ -1,18 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/errorhandler.h"
ASMJIT_BEGIN_NAMESPACE
ErrorHandler::ErrorHandler() noexcept {}
ErrorHandler::~ErrorHandler() noexcept {}
void ErrorHandler::handleError(Error err, const char* message, BaseEmitter* origin) {
DebugUtils::unused(err, message, origin);
}
ASMJIT_END_NAMESPACE

View File

@@ -1,228 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ERRORHANDLER_H_INCLUDED
#define ASMJIT_CORE_ERRORHANDLER_H_INCLUDED
#include "../core/globals.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_error_handling
//! \{
class BaseEmitter;
//! Error handler can be used to override the default behavior of error handling.
//!
//! It's available to all classes that inherit `BaseEmitter`. Override \ref ErrorHandler::handleError() to implement
//! your own error handler.
//!
//! The following use-cases are supported:
//!
//! - Record the error and continue code generation. This is the simplest approach that can be used to at least log
//! possible errors.
//! - Throw an exception. AsmJit doesn't use exceptions and is completely exception-safe, but it's perfectly legal
//! to throw an exception from the error handler.
//! - Use plain old C's `setjmp()` and `longjmp()`. Asmjit always puts Assembler, Builder and Compiler to
//! a consistent state before calling \ref handleError(), so `longjmp()` can be used without issues to cancel the
//! code generation if an error occurred. This method can be used if exception handling in your project is turned
//! off and you still want some comfort. In most cases it should be safe as AsmJit uses \ref Zone memory and the
//! ownership of memory it allocates always ends with the instance that allocated it. If using this approach please
//! never jump outside the life-time of \ref CodeHolder and \ref BaseEmitter.
//!
//! \ref ErrorHandler can be attached to \ref CodeHolder or \ref BaseEmitter, which has a priority. The example below
//! uses error handler that just prints the error, but lets AsmJit continue:
//!
//! ```
//! // Error Handling #1 - Logging and returning Error.
//! #include <asmjit/x86.h>
//! #include <stdio.h>
//!
//! using namespace asmjit;
//!
//! // Error handler that just prints the error and lets AsmJit ignore it.
//! class SimpleErrorHandler : public ErrorHandler {
//! public:
//! Error err;
//!
//! inline SimpleErrorHandler() : err(kErrorOk) {}
//!
//! void handleError(Error err, const char* message, BaseEmitter* origin) override {
//! this->err = err;
//! fprintf(stderr, "ERROR: %s\n", message);
//! }
//! };
//!
//! int main() {
//! JitRuntime rt;
//! SimpleErrorHandler eh;
//!
//! CodeHolder code;
//! code.init(rt.environment(), rt.cpuFeatures());
//! code.setErrorHandler(&eh);
//!
//! // Try to emit instruction that doesn't exist.
//! x86::Assembler a(&code);
//! a.emit(x86::Inst::kIdMov, x86::xmm0, x86::xmm1);
//!
//! if (eh.err) {
//! // Assembler failed!
//! return 1;
//! }
//!
//! return 0;
//! }
//! ```
//!
//! If error happens during instruction emitting / encoding the assembler behaves transactionally - the output buffer
//! won't advance if encoding failed, thus either a fully encoded instruction or nothing is emitted. The error handling
//! shown above is useful, but it's still not the best way of dealing with errors in AsmJit. The following example
//! shows how to use exception handling to handle errors in a more C++ way:
//!
//! ```
//! // Error Handling #2 - Throwing an exception.
//! #include <asmjit/x86.h>
//! #include <exception>
//! #include <string>
//! #include <stdio.h>
//!
//! using namespace asmjit;
//!
//! // Error handler that throws a user-defined `AsmJitException`.
//! class AsmJitException : public std::exception {
//! public:
//! Error err;
//! std::string message;
//!
//! AsmJitException(Error err, const char* message) noexcept
//! : err(err),
//! message(message) {}
//!
//! const char* what() const noexcept override { return message.c_str(); }
//! };
//!
//! class ThrowableErrorHandler : public ErrorHandler {
//! public:
//! // Throw is possible, functions that use ErrorHandler are never 'noexcept'.
//! void handleError(Error err, const char* message, BaseEmitter* origin) override {
//! throw AsmJitException(err, message);
//! }
//! };
//!
//! int main() {
//! JitRuntime rt;
//! ThrowableErrorHandler eh;
//!
//! CodeHolder code;
//! code.init(rt.environment(), rt.cpuFeatures());
//! code.setErrorHandler(&eh);
//!
//! x86::Assembler a(&code);
//!
//! // Try to emit instruction that doesn't exist.
//! try {
//! a.emit(x86::Inst::kIdMov, x86::xmm0, x86::xmm1);
//! }
//! catch (const AsmJitException& ex) {
//! printf("EXCEPTION THROWN: %s\n", ex.what());
//! return 1;
//! }
//!
//! return 0;
//! }
//! ```
//!
//! If C++ exceptions are not what you like or your project turns off them completely there is still a way of reducing
//! the error handling to a minimum by using a standard setjmp/longjmp approach. AsmJit is exception-safe and cleans
//! up everything before calling the ErrorHandler, so any approach is safe. You can simply jump from the error handler
//! without causing any side-effects or memory leaks. The following example demonstrates how it could be done:
//!
//! ```
//! // Error Handling #3 - Using setjmp/longjmp if exceptions are not allowed.
//! #include <asmjit/x86.h>
//! #include <setjmp.h>
//! #include <stdio.h>
//!
//! class LongJmpErrorHandler : public asmjit::ErrorHandler {
//! public:
//! inline LongJmpErrorHandler() : err(asmjit::kErrorOk) {}
//!
//! void handleError(asmjit::Error err, const char* message, asmjit::BaseEmitter* origin) override {
//! this->err = err;
//! longjmp(state, 1);
//! }
//!
//! jmp_buf state;
//! asmjit::Error err;
//! };
//!
//! int main(int argc, char* argv[]) {
//! using namespace asmjit;
//!
//! JitRuntime rt;
//! LongJmpErrorHandler eh;
//!
//! CodeHolder code;
//! code.init(rt.environment(), rt.cpuFeatures());
//! code.setErrorHandler(&eh);
//!
//! x86::Assembler a(&code);
//!
//! if (!setjmp(eh.state)) {
//! // Try to emit instruction that doesn't exist.
//! a.emit(x86::Inst::kIdMov, x86::xmm0, x86::xmm1);
//! }
//! else {
//! Error err = eh.err;
//! printf("ASMJIT ERROR: 0x%08X [%s]\n", err, DebugUtils::errorAsString(err));
//! }
//!
//! return 0;
//! }
//! ```
class ASMJIT_VIRTAPI ErrorHandler {
public:
ASMJIT_BASE_CLASS(ErrorHandler)
//! \name Construction & Destruction
//! \{
//! Creates a new `ErrorHandler` instance.
ASMJIT_API ErrorHandler() noexcept;
//! Destroys the `ErrorHandler` instance.
ASMJIT_API virtual ~ErrorHandler() noexcept;
//! \}
//! \name Interface
//! \{
//! Error handler (must be reimplemented).
//!
//! Error handler is called after an error happened and before it's propagated to the caller. There are multiple
//! ways how the error handler can be used:
//!
//! 1. User-based error handling without throwing exception or using C's`longjmp()`. This is for users that don't
//! use exceptions and want customized error handling.
//!
//! 2. Throwing an exception. AsmJit doesn't use exceptions and is completely exception-safe, but you can throw
//! exception from your error handler if this way is the preferred way of handling errors in your project.
//!
//! 3. Using plain old C's `setjmp()` and `longjmp()`. Asmjit always puts `BaseEmitter` to a consistent state before
//! calling `handleError()` so `longjmp()` can be used without any issues to cancel the code generation if an
//! error occurred. There is no difference between exceptions and `longjmp()` from AsmJit's perspective, however,
//! never jump outside of `CodeHolder` and `BaseEmitter` scope as you would leak memory.
ASMJIT_API virtual void handleError(Error err, const char* message, BaseEmitter* origin);
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ERRORHANDLER_H_INCLUDED

View File

@@ -1,282 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_FIXUP_H_INCLUDED
#define ASMJIT_CORE_FIXUP_H_INCLUDED
#include "../core/globals.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
//! Offset format type, used by \ref OffsetFormat.
enum class OffsetType : uint8_t {
// Common Offset Formats
// ---------------------
//! A value having `_immBitCount` bits and shifted by `_immBitShift`.
//!
//! This offset type is sufficient for many targets that store offset as a continuous set bits within an
//! instruction word / sequence of bytes.
kSignedOffset,
//! An unsigned value having `_immBitCount` bits and shifted by `_immBitShift`.
kUnsignedOffset,
// AArch64 Specific Offset Formats
// -------------------------------
//! AArch64 ADR format of `[.|immlo:2|.....|immhi:19|.....]`.
kAArch64_ADR,
//! AArch64 ADRP format of `[.|immlo:2|.....|immhi:19|.....]` (4kB pages).
kAArch64_ADRP,
// AArch32 Specific Offset Formats (T16 & T32)
// -------------------------------------------
//! AArch32 THUMBv2 immediate encoding of 'ADR' instruction (12-bit payload and sign bit):
//!
//! `|.....|imm:1|..N.N|......|imm:3|....|imm:8|`
//!
//! Where `N` is one if the offset is negative. The immediate is encoded as absolute value of the offset if negative.
kThumb32_ADR,
//! AArch32 THUMBv2 immediate encoding of 'BLX' instruction (23-bit immediate payload, multiplied by 4):
//!
//! `|.....|imm[22]|imm[19:10]|..|ja|1|jb|imm[9:0]|0`
//!
//! Where:
//!
//! - `ja` is calculated as imm[22] ^ imm[21] ^ 1.
//! - `jb` is calculated as imm[22] ^ imm[20] ^ 1.
kThumb32_BLX,
//! AArch32 THUMBv2 immediate encoding of 'B' instruction without `<cond>` (24-bit immediate payload, multiplied by 2):
//!
//! `|.....|imm[23]|imm[20:11]|..|ja|1|jb|imm[10:0]`
//!
//! Where:
//!
//! - `ja` is calculated as imm[23] ^ imm[22] ^ 1.
//! - `jb` is calculated as imm[23] ^ imm[21] ^ 1.
kThumb32_B,
//! AArch32 THUMBv2 immediate encoding of 'B' instruction with `<cond>` (20-bit immediate payload, multiplied by 2).
//!
//! `|.....|imm[19]|....|imm[16:11]|..|ja|1|jb|imm[10:0]`
//!
//! Where:
//!
//! - `ja` is calculated as imm[19] ^ imm[18] ^ 1.
//! - `jb` is calculated as imm[19] ^ imm[17] ^ 1.
kThumb32_BCond,
// AArch32 Specific Offset Formats (A32)
// -------------------------------------
//! AArch32 ADR instruction, which uses a standard 12-bit immediate encoding that is used by other ARM instructions.
kAArch32_ADR,
//! AArch32 signed offset that is similar to `kSignedOffset`, however it uses absolute value of the offset and its
//! sign is encoded in 23rd bit of the opcode.
//!
//! `|........|U.......|........|........|`
//!
kAArch32_U23_SignedOffset,
//! AArch32 offset format that encodes 8-bit offset as:
//!
//! `|........|U.......|....|imm[7:4]|....|imm[3:0]|`
//!
//! in a 32-bit word, where U is a sign of the displacement and the displacement itself is encoded as its absolute
//! value.
kAArch32_U23_0To3At0_4To7At8,
//! AArch32 offset format that encodes a signed 25-bit offset as:
//!
//! `|.......|imm[0]|imm[24:1]|`
//!
//! in a 32-bit word.
kAArch32_1To24At0_0At24,
//! Maximum value of `OffsetFormatType`.
kMaxValue = kAArch32_1To24At0_0At24
};
//! Provides information about formatting offsets, absolute addresses, or their parts. Offset format is used by both
//! \ref RelocEntry and \ref Fixup. The illustration below describes the relation of region size and offset size.
//! Region size is the size of the whole unit whereas offset size is the size of the unit that will be patched.
//!
//! ```
//! +-> Code buffer | The subject of the relocation (region) |
//! | | (Word-Offset) (Word-Size) |
//! |xxxxxxxxxxxxxxx|................|*PATCHED*|................|xxxxxxxxxxxx->
//! | |
//! [Word Offset points here]----+ +--- [WordOffset + WordSize]
//! ```
//!
//! Once the offset word has been located it can be patched like this:
//!
//! ```
//! |ImmDiscardLSB (discard LSB bits).
//! |..
//! [0000000000000iiiiiiiiiiiiiiiiiDD] - Offset value (32-bit)
//! [000000000000000iiiiiiiiiiiiiiiii] - Offset value after discard LSB.
//! [00000000000iiiiiiiiiiiiiiiii0000] - Offset value shifted by ImmBitShift.
//! [xxxxxxxxxxxiiiiiiiiiiiiiiiiixxxx] - Patched word (32-bit)
//! |...............|
//! (ImmBitCount) +- ImmBitShift
//! ```
struct OffsetFormat {
//! \name Members
//! \{
//! Type of the offset.
OffsetType _type;
//! Encoding flags.
uint8_t _flags;
//! Size of the region (in bytes) containing the offset value, if the offset value is part of an instruction,
//! otherwise it would be the same as `_valueSize`.
uint8_t _regionSize;
//! Size of the offset value, in bytes (1, 2, 4, or 8).
uint8_t _valueSize;
//! Offset of the offset value, in bytes, relative to the start of the region or data. Value offset would be
//! zero if both region size and value size are equal.
uint8_t _valueOffset;
//! Size of the offset immediate value in bits.
uint8_t _immBitCount;
//! Shift of the offset immediate value in bits in the target word.
uint8_t _immBitShift;
//! Number of least significant bits to discard before writing the immediate to the destination. All discarded
//! bits must be zero otherwise the value is invalid.
uint8_t _immDiscardLsb;
//! \}
//! \name Accessors
//! \{
//! Returns the type of the offset.
ASMJIT_INLINE_NODEBUG OffsetType type() const noexcept { return _type; }
//! Returns whether the offset is encoded as an absolute value of the offset with additional field(s) that represent
//! the sign (AArch32 U/N fields in the opcode).
//!
//! If true, the offset itself is always positive and a separate U/N field is used to indicate the sign of the offset
//! (usually `U==1` means ADD, but sometimes `N==1` means negative offset, which implies SUB).
ASMJIT_INLINE_NODEBUG bool hasSignBit() const noexcept {
return _type == OffsetType::kThumb32_ADR ||
_type == OffsetType::kAArch32_ADR ||
_type == OffsetType::kAArch32_U23_SignedOffset ||
_type == OffsetType::kAArch32_U23_0To3At0_4To7At8;
}
//! Returns flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t flags() const noexcept { return _flags; }
//! Returns the size of the region/instruction where the offset is encoded.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t regionSize() const noexcept { return _regionSize; }
//! Returns the offset of the word relative to the start of the region where the offset is.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t valueOffset() const noexcept { return _valueOffset; }
//! Returns the size of the data-type (word) that contains the offset, in bytes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t valueSize() const noexcept { return _valueSize; }
//! Returns the count of bits of the offset value in the data it's stored in.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t immBitCount() const noexcept { return _immBitCount; }
//! Returns the bit-shift of the offset value in the data it's stored in.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t immBitShift() const noexcept { return _immBitShift; }
//! Returns the number of least significant bits of the offset value, that must be zero and that are not part of
//! the encoded data.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t immDiscardLsb() const noexcept { return _immDiscardLsb; }
//! Resets this offset format to a simple data value of `dataSize` bytes.
//!
//! The region will be the same size as data and immediate bits would correspond to `dataSize * 8`. There will be
//! no immediate bit shift or discarded bits.
inline void resetToSimpleValue(OffsetType type, size_t valueSize) noexcept {
ASMJIT_ASSERT(valueSize <= 8u);
_type = type;
_flags = uint8_t(0);
_regionSize = uint8_t(valueSize);
_valueSize = uint8_t(valueSize);
_valueOffset = uint8_t(0);
_immBitCount = uint8_t(valueSize * 8u);
_immBitShift = uint8_t(0);
_immDiscardLsb = uint8_t(0);
}
inline void resetToImmValue(OffsetType type, size_t valueSize, uint32_t immBitShift, uint32_t immBitCount, uint32_t immDiscardLsb) noexcept {
ASMJIT_ASSERT(valueSize <= 8u);
ASMJIT_ASSERT(immBitShift < valueSize * 8u);
ASMJIT_ASSERT(immBitCount <= 64u);
ASMJIT_ASSERT(immDiscardLsb <= 64u);
_type = type;
_flags = uint8_t(0);
_regionSize = uint8_t(valueSize);
_valueSize = uint8_t(valueSize);
_valueOffset = uint8_t(0);
_immBitCount = uint8_t(immBitCount);
_immBitShift = uint8_t(immBitShift);
_immDiscardLsb = uint8_t(immDiscardLsb);
}
inline void setRegion(size_t regionSize, size_t valueOffset) noexcept {
_regionSize = uint8_t(regionSize);
_valueOffset = uint8_t(valueOffset);
}
inline void setLeadingAndTrailingSize(size_t leadingSize, size_t trailingSize) noexcept {
_regionSize = uint8_t(leadingSize + trailingSize + _valueSize);
_valueOffset = uint8_t(leadingSize);
}
//! \}
};
//! Data structure used to mark where a fixup in code or data is necessary.
//!
//! Fixups are generally resolved during machine code generation. For example if a branch instruction is used to
//! jump to a label, which hasn't been bound yet, a fixup is created. However, when such label is bound, the fixup
//! is processed and removed from a list of fixups.
struct Fixup {
//! Next fixup in a single-linked list.
Fixup* next;
//! Section where the fixup comes from.
uint32_t sectionId;
//! Label id, relocation id, or \ref Globals::kInvalidId.
//!
//! \note Fixup that is used with a LabelEntry always uses relocation id here, however, when a fixup is turned
//! into unresolved and generally detached from LabelEntry, this field becomes a label identifier as unresolved
//! fixups won't reference a relocation. This is just a space optimization.
uint32_t labelOrRelocId;
//! Label offset relative to the start of the section where the unresolved link comes from.
size_t offset;
//! Inlined rel8/rel32.
intptr_t rel;
//! Offset format information.
OffsetFormat format;
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_FIXUP_H_INCLUDED

View File

@@ -1,641 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_LOGGING
#include "../core/archtraits.h"
#include "../core/builder.h"
#include "../core/codeholder.h"
#include "../core/compiler.h"
#include "../core/emitter.h"
#include "../core/formatter_p.h"
#include "../core/string.h"
#include "../core/support.h"
#include "../core/type.h"
#if !defined(ASMJIT_NO_X86)
#include "../x86/x86formatter_p.h"
#endif
#if !defined(ASMJIT_NO_AARCH64)
#include "../arm/a64formatter_p.h"
#endif
ASMJIT_BEGIN_NAMESPACE
#if defined(ASMJIT_NO_COMPILER)
class VirtReg;
#endif
namespace Formatter {
Error formatVirtRegName(String& sb, const VirtReg* vReg) noexcept {
if (vReg->nameSize()) {
return sb.append(vReg->name(), vReg->nameSize());
}
else {
return sb.appendFormat("%%%u", unsigned(Operand::virtIdToIndex(vReg->id())));
}
}
Error formatVirtRegNameWithPrefix(String& sb, const char* prefix, size_t prefixSize, const VirtReg* vReg) noexcept {
ASMJIT_PROPAGATE(sb.append(prefix, prefixSize));
if (vReg->nameSize()) {
return sb.append(vReg->name(), vReg->nameSize());
}
else {
return sb.appendFormat("%%%u", unsigned(Operand::virtIdToIndex(vReg->id())));
}
}
static const char wordNameTable[][8] = {
"db",
"dw",
"dd",
"dq",
"byte",
"half",
"word",
"hword",
"dword",
"qword",
"xword",
"short",
"long",
"quad"
};
Error formatTypeId(String& sb, TypeId typeId) noexcept {
if (typeId == TypeId::kVoid) {
return sb.append("void");
}
if (!TypeUtils::isValid(typeId)) {
return sb.append("unknown");
}
const char* typeName = nullptr;
uint32_t typeSize = TypeUtils::sizeOf(typeId);
TypeId scalarType = TypeUtils::scalarOf(typeId);
switch (scalarType) {
case TypeId::kIntPtr : typeName = "intptr" ; break;
case TypeId::kUIntPtr: typeName = "uintptr"; break;
case TypeId::kInt8 : typeName = "int8" ; break;
case TypeId::kUInt8 : typeName = "uint8" ; break;
case TypeId::kInt16 : typeName = "int16" ; break;
case TypeId::kUInt16 : typeName = "uint16" ; break;
case TypeId::kInt32 : typeName = "int32" ; break;
case TypeId::kUInt32 : typeName = "uint32" ; break;
case TypeId::kInt64 : typeName = "int64" ; break;
case TypeId::kUInt64 : typeName = "uint64" ; break;
case TypeId::kFloat32: typeName = "float32"; break;
case TypeId::kFloat64: typeName = "float64"; break;
case TypeId::kFloat80: typeName = "float80"; break;
case TypeId::kMask8 : typeName = "mask8" ; break;
case TypeId::kMask16 : typeName = "mask16" ; break;
case TypeId::kMask32 : typeName = "mask32" ; break;
case TypeId::kMask64 : typeName = "mask64" ; break;
case TypeId::kMmx32 : typeName = "mmx32" ; break;
case TypeId::kMmx64 : typeName = "mmx64" ; break;
default:
typeName = "unknown";
break;
}
uint32_t baseSize = TypeUtils::sizeOf(scalarType);
if (typeSize > baseSize) {
uint32_t count = typeSize / baseSize;
return sb.appendFormat("%sx%u", typeName, unsigned(count));
}
else {
return sb.append(typeName);
}
}
Error formatFeature(
String& sb,
Arch arch,
uint32_t featureId) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) {
return x86::FormatterInternal::formatFeature(sb, featureId);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyARM(arch)) {
return arm::FormatterInternal::formatFeature(sb, featureId);
}
#endif
return kErrorInvalidArch;
}
Error formatLabel(
String& sb,
FormatFlags formatFlags,
const BaseEmitter* emitter,
uint32_t labelId) noexcept {
DebugUtils::unused(formatFlags);
if (emitter && emitter->code()) {
CodeHolder* code = emitter->code();
if (ASMJIT_UNLIKELY(!code->isLabelValid(labelId))) {
return sb.appendFormat("<InvalidLabel:%u>", labelId);
}
const LabelEntry& le = code->labelEntry(labelId);
if (le.hasName()) {
if (le.hasParent()) {
uint32_t parentId = le.parentId();
const LabelEntry& pe = code->labelEntry(parentId);
if (pe.hasName()) {
ASMJIT_PROPAGATE(sb.append(pe.name()));
}
else {
ASMJIT_PROPAGATE(sb.appendFormat("L%u", parentId));
}
ASMJIT_PROPAGATE(sb.append('.'));
}
if (le.labelType() == LabelType::kAnonymous) {
ASMJIT_PROPAGATE(sb.appendFormat("L%u@", labelId));
}
return sb.append(le.name());
}
}
return sb.appendFormat("L%u", labelId);
}
Error formatRegister(
String& sb,
FormatFlags formatFlags,
const BaseEmitter* emitter,
Arch arch,
RegType regType,
uint32_t regId) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) {
return x86::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyARM(arch)) {
return arm::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId);
}
#endif
return kErrorInvalidArch;
}
Error formatOperand(
String& sb,
FormatFlags formatFlags,
const BaseEmitter* emitter,
Arch arch,
const Operand_& op) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) {
return x86::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyARM(arch)) {
return arm::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op);
}
#endif
return kErrorInvalidArch;
}
ASMJIT_API Error formatDataType(
String& sb,
FormatFlags formatFlags,
Arch arch,
TypeId typeId) noexcept
{
DebugUtils::unused(formatFlags);
if (ASMJIT_UNLIKELY(uint32_t(arch) > uint32_t(Arch::kMaxValue))) {
return DebugUtils::errored(kErrorInvalidArch);
}
uint32_t typeSize = TypeUtils::sizeOf(typeId);
if (typeSize == 0 || typeSize > 8) {
return DebugUtils::errored(kErrorInvalidState);
}
uint32_t typeSizeLog2 = Support::ctz(typeSize);
return sb.append(wordNameTable[size_t(ArchTraits::byArch(arch).typeNameIdByIndex(typeSizeLog2))]);
}
static Error formatDataHelper(String& sb, const char* typeName, uint32_t typeSize, const uint8_t* data, size_t itemCount) noexcept {
sb.append('.');
sb.append(typeName);
sb.append(' ');
for (size_t i = 0; i < itemCount; i++) {
uint64_t v = 0;
if (i != 0) {
ASMJIT_PROPAGATE(sb.append(", ", 2));
}
switch (typeSize) {
case 1: v = data[0]; break;
case 2: v = Support::loadu_u16(data); break;
case 4: v = Support::loadu_u32(data); break;
case 8: v = Support::loadu_u64(data); break;
}
ASMJIT_PROPAGATE(sb.appendUInt(v, 16, typeSize * 2, StringFormatFlags::kAlternate));
data += typeSize;
}
return kErrorOk;
}
Error formatData(
String& sb,
FormatFlags formatFlags,
Arch arch,
TypeId typeId, const void* data, size_t itemCount, size_t repeatCount
) noexcept {
DebugUtils::unused(formatFlags);
if (ASMJIT_UNLIKELY(!Environment::isDefinedArch(arch))) {
return DebugUtils::errored(kErrorInvalidArch);
}
uint32_t typeSize = TypeUtils::sizeOf(typeId);
if (typeSize == 0) {
return DebugUtils::errored(kErrorInvalidState);
}
if (!Support::isPowerOf2(typeSize)) {
itemCount *= typeSize;
typeSize = 1;
}
while (typeSize > 8u) {
typeSize >>= 1;
itemCount <<= 1;
}
uint32_t typeSizeLog2 = Support::ctz(typeSize);
const char* wordName = wordNameTable[size_t(ArchTraits::byArch(arch).typeNameIdByIndex(typeSizeLog2))];
if (repeatCount > 1) {
ASMJIT_PROPAGATE(sb.appendFormat(".repeat %zu ", repeatCount));
}
return formatDataHelper(sb, wordName, typeSize, static_cast<const uint8_t*>(data), itemCount);
}
Error formatInstruction(
String& sb,
FormatFlags formatFlags,
const BaseEmitter* emitter,
Arch arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) {
return x86::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyAArch64(arch)) {
return a64::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount);
}
#endif
return kErrorInvalidArch;
}
#ifndef ASMJIT_NO_BUILDER
#ifndef ASMJIT_NO_COMPILER
static Error formatFuncValue(String& sb, FormatFlags formatFlags, const BaseEmitter* emitter, FuncValue value) noexcept {
TypeId typeId = value.typeId();
ASMJIT_PROPAGATE(formatTypeId(sb, typeId));
if (value.isAssigned()) {
ASMJIT_PROPAGATE(sb.append('@'));
if (value.isIndirect()) {
ASMJIT_PROPAGATE(sb.append('['));
}
// NOTE: It should be either reg or stack, but never both. We
// use two IFs on purpose so if the FuncValue is both it would
// show in logs.
if (value.isReg()) {
ASMJIT_PROPAGATE(formatRegister(sb, formatFlags, emitter, emitter->arch(), value.regType(), value.regId()));
}
if (value.isStack()) {
ASMJIT_PROPAGATE(sb.appendFormat("[%d]", int(value.stackOffset())));
}
if (value.isIndirect()) {
ASMJIT_PROPAGATE(sb.append(']'));
}
}
return kErrorOk;
}
static Error formatFuncValuePack(
String& sb,
FormatFlags formatFlags,
const BaseCompiler* cc,
const FuncValuePack& pack,
const RegOnly* vRegs) noexcept {
size_t count = pack.count();
if (!count) {
return sb.append("void");
}
if (count > 1) {
ASMJIT_PROPAGATE(sb.append('['));
}
for (uint32_t valueIndex = 0; valueIndex < count; valueIndex++) {
const FuncValue& value = pack[valueIndex];
if (!value) {
break;
}
if (valueIndex) {
ASMJIT_PROPAGATE(sb.append(", "));
}
ASMJIT_PROPAGATE(formatFuncValue(sb, formatFlags, cc, value));
if (vRegs) {
const VirtReg* virtReg = nullptr;
static const char nullReg[] = "<none>";
if (vRegs[valueIndex].isReg() && cc->isVirtIdValid(vRegs[valueIndex].id())) {
virtReg = cc->virtRegById(vRegs[valueIndex].id());
}
ASMJIT_PROPAGATE(sb.append(' '));
if (virtReg) {
ASMJIT_PROPAGATE(Formatter::formatVirtRegName(sb, virtReg));
}
else {
ASMJIT_PROPAGATE(sb.append(nullReg, sizeof(nullReg) - 1));
}
}
}
if (count > 1) {
ASMJIT_PROPAGATE(sb.append(']'));
}
return kErrorOk;
}
static Error formatFuncRets(
String& sb,
FormatFlags formatFlags,
const BaseCompiler* cc,
const FuncDetail& fd) noexcept {
return formatFuncValuePack(sb, formatFlags, cc, fd.retPack(), nullptr);
}
static Error formatFuncArgs(
String& sb,
FormatFlags formatFlags,
const BaseCompiler* cc,
const FuncDetail& fd,
const FuncNode::ArgPack* argPacks) noexcept {
uint32_t argCount = fd.argCount();
if (!argCount) {
return sb.append("void");
}
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
if (argIndex) {
ASMJIT_PROPAGATE(sb.append(", "));
}
ASMJIT_PROPAGATE(formatFuncValuePack(sb, formatFlags, cc, fd.argPack(argIndex), argPacks[argIndex]._data));
}
return kErrorOk;
}
#endif
Error formatNode(
String& sb,
const FormatOptions& formatOptions,
const BaseBuilder* builder,
const BaseNode* node) noexcept {
if (node->hasPosition() && formatOptions.hasFlag(FormatFlags::kPositions)) {
ASMJIT_PROPAGATE(sb.appendFormat("<%05u> ", node->position()));
}
size_t startLineIndex = sb.size();
switch (node->type()) {
case NodeType::kInst:
case NodeType::kJump: {
const InstNode* instNode = node->as<InstNode>();
ASMJIT_PROPAGATE(builder->_funcs.formatInstruction(sb, formatOptions.flags(), builder,
builder->arch(),
instNode->baseInst(), instNode->operands(), instNode->opCount()));
break;
}
case NodeType::kSection: {
const SectionNode* sectionNode = node->as<SectionNode>();
if (builder->_code->isSectionValid(sectionNode->sectionId())) {
const Section* section = builder->_code->sectionById(sectionNode->sectionId());
ASMJIT_PROPAGATE(sb.appendFormat(".section %s", section->name()));
}
break;
}
case NodeType::kLabel: {
const LabelNode* labelNode = node->as<LabelNode>();
ASMJIT_PROPAGATE(formatLabel(sb, formatOptions.flags(), builder, labelNode->labelId()));
ASMJIT_PROPAGATE(sb.append(":"));
break;
}
case NodeType::kAlign: {
const AlignNode* alignNode = node->as<AlignNode>();
ASMJIT_PROPAGATE(sb.appendFormat(".align %u (%s)",
alignNode->alignment(),
alignNode->alignMode() == AlignMode::kCode ? "code" : "data"));
break;
}
case NodeType::kEmbedData: {
const EmbedDataNode* embedNode = node->as<EmbedDataNode>();
ASMJIT_PROPAGATE(sb.append('.'));
ASMJIT_PROPAGATE(formatDataType(sb, formatOptions.flags(), builder->arch(), embedNode->typeId()));
ASMJIT_PROPAGATE(sb.appendFormat(" {Count=%zu Repeat=%zu TotalSize=%zu}", embedNode->itemCount(), embedNode->repeatCount(), embedNode->dataSize()));
break;
}
case NodeType::kEmbedLabel: {
const EmbedLabelNode* embedNode = node->as<EmbedLabelNode>();
ASMJIT_PROPAGATE(sb.append(".label "));
ASMJIT_PROPAGATE(formatLabel(sb, formatOptions.flags(), builder, embedNode->labelId()));
break;
}
case NodeType::kEmbedLabelDelta: {
const EmbedLabelDeltaNode* embedNode = node->as<EmbedLabelDeltaNode>();
ASMJIT_PROPAGATE(sb.append(".label ("));
ASMJIT_PROPAGATE(formatLabel(sb, formatOptions.flags(), builder, embedNode->labelId()));
ASMJIT_PROPAGATE(sb.append(" - "));
ASMJIT_PROPAGATE(formatLabel(sb, formatOptions.flags(), builder, embedNode->baseLabelId()));
ASMJIT_PROPAGATE(sb.append(")"));
break;
}
case NodeType::kConstPool: {
const ConstPoolNode* constPoolNode = node->as<ConstPoolNode>();
ASMJIT_PROPAGATE(sb.appendFormat("[ConstPool Size=%zu Alignment=%zu]", constPoolNode->size(), constPoolNode->alignment()));
break;
};
case NodeType::kComment: {
const CommentNode* commentNode = node->as<CommentNode>();
return sb.appendFormat("; %s", commentNode->inlineComment());
}
case NodeType::kSentinel: {
const SentinelNode* sentinelNode = node->as<SentinelNode>();
const char* sentinelName = nullptr;
switch (sentinelNode->sentinelType()) {
case SentinelType::kFuncEnd:
sentinelName = "[FuncEnd]";
break;
default:
sentinelName = "[Sentinel]";
break;
}
ASMJIT_PROPAGATE(sb.append(sentinelName));
break;
}
#ifndef ASMJIT_NO_COMPILER
case NodeType::kFunc: {
const FuncNode* funcNode = node->as<FuncNode>();
if (builder->isCompiler()) {
ASMJIT_PROPAGATE(formatLabel(sb, formatOptions.flags(), builder, funcNode->labelId()));
ASMJIT_PROPAGATE(sb.append(": "));
ASMJIT_PROPAGATE(formatFuncRets(sb, formatOptions.flags(), static_cast<const BaseCompiler*>(builder), funcNode->detail()));
ASMJIT_PROPAGATE(sb.append(" Func("));
ASMJIT_PROPAGATE(formatFuncArgs(sb, formatOptions.flags(), static_cast<const BaseCompiler*>(builder), funcNode->detail(), funcNode->argPacks()));
ASMJIT_PROPAGATE(sb.append(")"));
}
break;
}
case NodeType::kFuncRet: {
const FuncRetNode* retNode = node->as<FuncRetNode>();
ASMJIT_PROPAGATE(sb.append("[FuncRet]"));
for (uint32_t i = 0; i < 2; i++) {
const Operand_& op = retNode->op(i);
if (!op.isNone()) {
ASMJIT_PROPAGATE(sb.append(i == 0 ? " " : ", "));
ASMJIT_PROPAGATE(formatOperand(sb, formatOptions.flags(), builder, builder->arch(), op));
}
}
break;
}
case NodeType::kInvoke: {
const InvokeNode* invokeNode = node->as<InvokeNode>();
ASMJIT_PROPAGATE(builder->_funcs.formatInstruction(sb, formatOptions.flags(), builder,
builder->arch(),
invokeNode->baseInst(), invokeNode->operands(), invokeNode->opCount()));
break;
}
#endif
default: {
ASMJIT_PROPAGATE(sb.appendFormat("[UserNode:%u]", node->type()));
break;
}
}
if (node->hasInlineComment()) {
size_t requiredPadding = paddingFromOptions(formatOptions, FormatPaddingGroup::kRegularLine);
size_t currentPadding = sb.size() - startLineIndex;
if (currentPadding < requiredPadding) {
ASMJIT_PROPAGATE(sb.appendChars(' ', requiredPadding - currentPadding));
}
ASMJIT_PROPAGATE(sb.append("; "));
ASMJIT_PROPAGATE(sb.append(node->inlineComment()));
}
return kErrorOk;
}
Error formatNodeList(
String& sb,
const FormatOptions& formatOptions,
const BaseBuilder* builder) noexcept {
return formatNodeList(sb, formatOptions, builder, builder->firstNode(), nullptr);
}
Error formatNodeList(
String& sb,
const FormatOptions& formatOptions,
const BaseBuilder* builder,
const BaseNode* begin,
const BaseNode* end) noexcept {
const BaseNode* node = begin;
while (node != end) {
ASMJIT_PROPAGATE(formatNode(sb, formatOptions, builder, node));
ASMJIT_PROPAGATE(sb.append('\n'));
node = node->next();
}
return kErrorOk;
}
#endif
} // {Formatter}
ASMJIT_END_NAMESPACE
#endif

View File

@@ -1,265 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_FORMATTER_H_INCLUDED
#define ASMJIT_CORE_FORMATTER_H_INCLUDED
#include "../core/globals.h"
#include "../core/inst.h"
#include "../core/string.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_logging
//! \{
class BaseBuilder;
class BaseEmitter;
class BaseNode;
struct Operand_;
//! Format flags used by \ref Logger and \ref FormatOptions.
enum class FormatFlags : uint32_t {
//! No formatting flags.
kNone = 0u,
//! Show also a binary representation of each logged instruction (Assembler).
kMachineCode = 0x00000001u,
//! Show aliases of some instructions that have them.
//!
//! This option is now mostly for x86/x64 to show aliases of instructions such as `cmov<cc>`, `j<cc>`, `set<cc>`,
//! etc...
kShowAliases = 0x00000008u,
//! Show a text explanation of some immediate values.
kExplainImms = 0x00000010u,
//! Use hexadecimal notation of immediate values.
kHexImms = 0x00000020u,
//! Use hexadecimal notation of addresses and offsets in addresses.
kHexOffsets = 0x00000040u,
//! Show casts between virtual register types (Compiler).
kRegCasts = 0x00000100u,
//! Show positions associated with nodes (Compiler).
kPositions = 0x00000200u,
//! Always format a register type (Compiler).
kRegType = 0x00000400u
};
ASMJIT_DEFINE_ENUM_FLAGS(FormatFlags)
//! Format indentation group, used by \ref FormatOptions.
enum class FormatIndentationGroup : uint32_t {
//! Indentation used for instructions and directives.
kCode = 0u,
//! Indentation used for labels and function nodes.
kLabel = 1u,
//! Indentation used for comments (not inline comments).
kComment = 2u,
//! \cond INTERNAL
//! Reserved for future use.
kReserved = 3u,
//! \endcond
//! Maximum value of `FormatIndentationGroup`.
kMaxValue = kReserved
};
//! Format padding group, used by \ref FormatOptions.
enum class FormatPaddingGroup : uint32_t {
//! Describes padding of a regular line, which can represent instruction, data, or assembler directives.
kRegularLine = 0,
//! Describes padding of machine code dump that is visible next to the instruction, if enabled.
kMachineCode = 1,
//! Maximum value of `FormatPaddingGroup`.
kMaxValue = kMachineCode
};
//! Formatting options used by \ref Logger and \ref Formatter.
class FormatOptions {
public:
//! \name Members
//! \{
//! Format flags.
FormatFlags _flags = FormatFlags::kNone;
//! Indentations for each indentation group.
Support::Array<uint8_t, uint32_t(FormatIndentationGroup::kMaxValue) + 1> _indentation {};
//! Paddings for each padding group.
Support::Array<uint16_t, uint32_t(FormatPaddingGroup::kMaxValue) + 1> _padding {};
//! \}
//! \name Reset
//! \{
//! Resets FormatOptions to its default initialized state.
ASMJIT_INLINE_NODEBUG void reset() noexcept {
_flags = FormatFlags::kNone;
_indentation.fill(uint8_t(0));
_padding.fill(uint16_t(0));
}
//! \}
//! \name Accessors
//! \{
//! Returns format flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FormatFlags flags() const noexcept { return _flags; }
//! Tests whether the given `flag` is set in format flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(FormatFlags flag) const noexcept { return Support::test(_flags, flag); }
//! Resets all format flags to `flags`.
ASMJIT_INLINE_NODEBUG void setFlags(FormatFlags flags) noexcept { _flags = flags; }
//! Adds `flags` to format flags.
ASMJIT_INLINE_NODEBUG void addFlags(FormatFlags flags) noexcept { _flags |= flags; }
//! Removes `flags` from format flags.
ASMJIT_INLINE_NODEBUG void clearFlags(FormatFlags flags) noexcept { _flags &= ~flags; }
//! Returns indentation for the given indentation `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t indentation(FormatIndentationGroup group) const noexcept { return _indentation[group]; }
//! Sets indentation for the given indentation `group`.
ASMJIT_INLINE_NODEBUG void setIndentation(FormatIndentationGroup group, uint32_t n) noexcept { _indentation[group] = uint8_t(n); }
//! Resets indentation for the given indentation `group` to zero.
ASMJIT_INLINE_NODEBUG void resetIndentation(FormatIndentationGroup group) noexcept { _indentation[group] = uint8_t(0); }
//! Returns padding for the given padding `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t padding(FormatPaddingGroup group) const noexcept { return _padding[group]; }
//! Sets padding for the given padding `group`.
ASMJIT_INLINE_NODEBUG void setPadding(FormatPaddingGroup group, size_t n) noexcept { _padding[group] = uint16_t(n); }
//! Resets padding for the given padding `group` to zero, which means that a default padding will be used
//! based on the target architecture properties.
ASMJIT_INLINE_NODEBUG void resetPadding(FormatPaddingGroup group) noexcept { _padding[group] = uint16_t(0); }
//! \}
};
//! Provides formatting functionality to format operands, instructions, and nodes.
namespace Formatter {
#ifndef ASMJIT_NO_LOGGING
//! Appends a formatted `typeId` to the output string `sb`.
ASMJIT_API Error formatTypeId(
String& sb,
TypeId typeId) noexcept;
//! Appends a formatted `featureId` to the output string `sb`.
//!
//! See \ref CpuFeatures.
ASMJIT_API Error formatFeature(
String& sb,
Arch arch,
uint32_t featureId) noexcept;
//! Appends a formatted register to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format virtual registers, which won't be formatted properly
//! if the `emitter` is not provided.
ASMJIT_API Error formatRegister(
String& sb,
FormatFlags formatFlags,
const BaseEmitter* emitter,
Arch arch,
RegType regType,
uint32_t regId) noexcept;
//! Appends a formatted label to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format named labels properly, otherwise the formatted as
//! it is an anonymous label.
ASMJIT_API Error formatLabel(
String& sb,
FormatFlags formatFlags,
const BaseEmitter* emitter,
uint32_t labelId) noexcept;
//! Appends a formatted operand to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format named labels and virtual registers. See
//! \ref formatRegister() and \ref formatLabel() for more details.
ASMJIT_API Error formatOperand(
String& sb,
FormatFlags formatFlags,
const BaseEmitter* emitter,
Arch arch,
const Operand_& op) noexcept;
//! Appends a formatted data-type to the output string `sb`.
ASMJIT_API Error formatDataType(
String& sb,
FormatFlags formatFlags,
Arch arch,
TypeId typeId) noexcept;
//! Appends a formatted data to the output string `sb`.
ASMJIT_API Error formatData(
String& sb,
FormatFlags formatFlags,
Arch arch,
TypeId typeId, const void* data, size_t itemCount, size_t repeatCount = 1) noexcept;
//! Appends a formatted instruction to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format named labels and virtual registers. See
//! \ref formatRegister() and \ref formatLabel() for more details.
ASMJIT_API Error formatInstruction(
String& sb,
FormatFlags formatFlags,
const BaseEmitter* emitter,
Arch arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept;
#ifndef ASMJIT_NO_BUILDER
//! Appends a formatted node to the output string `sb`.
//!
//! The `node` must belong to the provided `builder`.
ASMJIT_API Error formatNode(
String& sb,
const FormatOptions& formatOptions,
const BaseBuilder* builder,
const BaseNode* node) noexcept;
//! Appends formatted nodes to the output string `sb`.
//!
//! All nodes that are part of the given `builder` will be appended.
ASMJIT_API Error formatNodeList(
String& sb,
const FormatOptions& formatOptions,
const BaseBuilder* builder) noexcept;
//! Appends formatted nodes to the output string `sb`.
//!
//! This function works the same as \ref formatNode(), but appends more nodes to the output string,
//! separating each node with a newline '\n' character.
ASMJIT_API Error formatNodeList(
String& sb,
const FormatOptions& formatOptions,
const BaseBuilder* builder,
const BaseNode* begin,
const BaseNode* end) noexcept;
#endif
#endif
} // {Formatter}
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_FORMATTER_H_INCLUDED

View File

@@ -1,40 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_FORMATTER_P_H_INCLUDED
#define ASMJIT_CORE_FORMATTER_P_H_INCLUDED
#include "../core/compilerdefs.h"
#include "../core/formatter.h"
#include "../core/operand.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_logging
//! \{
namespace Formatter {
[[maybe_unused]]
static ASMJIT_INLINE size_t paddingFromOptions(const FormatOptions& formatOptions, FormatPaddingGroup group) noexcept {
static constexpr uint16_t _defaultPaddingTable[uint32_t(FormatPaddingGroup::kMaxValue) + 1] = { 44, 26 };
static_assert(uint32_t(FormatPaddingGroup::kMaxValue) + 1 == 2, "If a new group is defined it must be added here");
size_t padding = formatOptions.padding(group);
return padding ? padding : size_t(_defaultPaddingTable[uint32_t(group)]);
}
Error formatVirtRegName(String& sb, const VirtReg* vReg) noexcept;
Error formatVirtRegNameWithPrefix(String& sb, const char* prefix, size_t prefixSize, const VirtReg* vReg) noexcept;
} // {Formatter}
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_FORMATTER_H_P_INCLUDED

View File

@@ -1,316 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/archtraits.h"
#include "../core/func.h"
#include "../core/operand.h"
#include "../core/type.h"
#include "../core/funcargscontext_p.h"
#if !defined(ASMJIT_NO_X86)
#include "../x86/x86func_p.h"
#endif
#if !defined(ASMJIT_NO_AARCH64)
#include "../arm/a64func_p.h"
#endif
ASMJIT_BEGIN_NAMESPACE
// CallConv - Initialization & Reset
// =================================
ASMJIT_FAVOR_SIZE Error CallConv::init(CallConvId ccId, const Environment& environment) noexcept {
reset();
#if !defined(ASMJIT_NO_X86)
if (environment.isFamilyX86()) {
return x86::FuncInternal::initCallConv(*this, ccId, environment);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (environment.isFamilyAArch64()) {
return a64::FuncInternal::initCallConv(*this, ccId, environment);
}
#endif
return DebugUtils::errored(kErrorInvalidArgument);
}
// FuncDetail - Init / Reset
// =========================
ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& signature, const Environment& environment) noexcept {
CallConvId ccId = signature.callConvId();
uint32_t argCount = signature.argCount();
if (ASMJIT_UNLIKELY(argCount > Globals::kMaxFuncArgs)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
CallConv& cc = _callConv;
ASMJIT_PROPAGATE(cc.init(ccId, environment));
uint32_t registerSize = Environment::registerSizeFromArch(cc.arch());
uint32_t deabstractDelta = TypeUtils::deabstractDeltaOfSize(registerSize);
const TypeId* signatureArgs = signature.args();
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
FuncValuePack& argPack = _args[argIndex];
argPack[0].initTypeId(TypeUtils::deabstract(signatureArgs[argIndex], deabstractDelta));
}
_argCount = uint8_t(argCount);
_vaIndex = uint8_t(signature.vaIndex());
TypeId ret = signature.ret();
if (ret != TypeId::kVoid) {
_rets[0].initTypeId(TypeUtils::deabstract(ret, deabstractDelta));
}
#if !defined(ASMJIT_NO_X86)
if (environment.isFamilyX86()) {
return x86::FuncInternal::initFuncDetail(*this, signature, registerSize);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (environment.isFamilyAArch64()) {
return a64::FuncInternal::initFuncDetail(*this, signature);
}
#endif
// We should never bubble here as if `cc.init()` succeeded then there has to be an implementation for the current
// architecture. However, stay safe.
return DebugUtils::errored(kErrorInvalidArgument);
}
// FuncFrame - Init
// ================
ASMJIT_FAVOR_SIZE Error FuncFrame::init(const FuncDetail& func) noexcept {
Arch arch = func.callConv().arch();
if (!Environment::isValidArch(arch)) {
return DebugUtils::errored(kErrorInvalidArch);
}
const ArchTraits& archTraits = ArchTraits::byArch(arch);
// Initializing FuncFrame means making a copy of some properties of `func`. Properties like `_localStackSize` will
// be set by the user before the frame is finalized.
reset();
_arch = arch;
_spRegId = uint8_t(archTraits.spRegId());
_saRegId = uint8_t(Reg::kIdBad);
uint32_t naturalStackAlignment = func.callConv().naturalStackAlignment();
uint32_t minDynamicAlignment = Support::max<uint32_t>(naturalStackAlignment, 16);
if (minDynamicAlignment == naturalStackAlignment) {
minDynamicAlignment <<= 1;
}
_naturalStackAlignment = uint8_t(naturalStackAlignment);
_minDynamicAlignment = uint8_t(minDynamicAlignment);
_redZoneSize = uint8_t(func.redZoneSize());
_spillZoneSize = uint8_t(func.spillZoneSize());
_finalStackAlignment = uint8_t(_naturalStackAlignment);
if (func.hasFlag(CallConvFlags::kCalleePopsStack)) {
_calleeStackCleanup = uint16_t(func.argStackSize());
}
// Initial masks of dirty and preserved registers.
for (RegGroup group : RegGroupVirtValues{}) {
_dirtyRegs[group] = func.usedRegs(group);
_preservedRegs[group] = func.preservedRegs(group);
}
// Exclude stack pointer - this register is never included in saved GP regs.
_preservedRegs[RegGroup::kGp] &= ~Support::bitMask(archTraits.spRegId());
// The size and alignment of save/restore area of registers for each virtual register group
_saveRestoreRegSize = func.callConv()._saveRestoreRegSize;
_saveRestoreAlignment = func.callConv()._saveRestoreAlignment;
return kErrorOk;
}
// FuncFrame - Finalize
// ====================
ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
if (!Environment::isValidArch(arch())) {
return DebugUtils::errored(kErrorInvalidArch);
}
const ArchTraits& archTraits = ArchTraits::byArch(arch());
uint32_t registerSize = _saveRestoreRegSize[RegGroup::kGp];
uint32_t vectorSize = _saveRestoreRegSize[RegGroup::kVec];
uint32_t returnAddressSize = archTraits.hasLinkReg() ? 0u : registerSize;
// The final stack alignment must be updated accordingly to call and local stack alignments.
uint32_t stackAlignment = _finalStackAlignment;
ASMJIT_ASSERT(stackAlignment == Support::max(_naturalStackAlignment, _callStackAlignment, _localStackAlignment));
bool hasFP = hasPreservedFP();
bool hasDA = hasDynamicAlignment();
uint32_t kSp = archTraits.spRegId();
uint32_t kFp = archTraits.fpRegId();
uint32_t kLr = archTraits.linkRegId();
// Make frame pointer dirty if the function uses it.
if (hasFP) {
_dirtyRegs[RegGroup::kGp] |= Support::bitMask(kFp);
// Currently required by ARM, if this works differently across architectures we would have to generalize most
// likely in CallConv.
if (kLr != Reg::kIdBad) {
_dirtyRegs[RegGroup::kGp] |= Support::bitMask(kLr);
}
}
// These two are identical if the function doesn't align its stack dynamically.
uint32_t saRegId = _saRegId;
if (saRegId == Reg::kIdBad) {
saRegId = kSp;
}
// Fix stack arguments base-register from SP to FP in case it was not picked before and the function performs
// dynamic stack alignment.
if (hasDA && saRegId == kSp) {
saRegId = kFp;
}
// Mark as dirty any register but SP if used as SA pointer.
if (saRegId != kSp) {
_dirtyRegs[RegGroup::kGp] |= Support::bitMask(saRegId);
}
_spRegId = uint8_t(kSp);
_saRegId = uint8_t(saRegId);
// Setup stack size used to save preserved registers.
uint32_t saveRestoreSizes[2] {};
for (RegGroup group : RegGroupVirtValues{}) {
saveRestoreSizes[size_t(!archTraits.hasInstPushPop(group))]
+= Support::alignUp(Support::popcnt(savedRegs(group)) * saveRestoreRegSize(group), saveRestoreAlignment(group));
}
_pushPopSaveSize = uint16_t(saveRestoreSizes[0]);
_extraRegSaveSize = uint16_t(saveRestoreSizes[1]);
uint32_t v = 0; // The beginning of the stack frame relative to SP after prolog.
v += callStackSize(); // Count 'callStackSize' <- This is used to call functions.
v = Support::alignUp(v, stackAlignment); // Align to function's stack alignment.
_localStackOffset = v; // Store 'localStackOffset' <- Function's local stack starts here.
v += localStackSize(); // Count 'localStackSize' <- Function's local stack ends here.
// If the function's stack must be aligned, calculate the alignment necessary to store vector registers, and set
// `FuncAttributes::kAlignedVecSR` to inform PEI that it can use instructions that perform aligned stores/loads.
if (stackAlignment >= vectorSize && _extraRegSaveSize) {
addAttributes(FuncAttributes::kAlignedVecSR);
v = Support::alignUp(v, vectorSize); // Align 'extraRegSaveOffset'.
}
_extraRegSaveOffset = v; // Store 'extraRegSaveOffset' <- Non-GP save/restore starts here.
v += _extraRegSaveSize; // Count 'extraRegSaveSize' <- Non-GP save/restore ends here.
// Calculate if dynamic alignment (DA) slot (stored as offset relative to SP) is required and its offset.
if (hasDA && !hasFP) {
_daOffset = v; // Store 'daOffset' <- DA pointer would be stored here.
v += registerSize; // Count 'daOffset'.
}
else {
_daOffset = FuncFrame::kTagInvalidOffset;
}
// Link Register
// -------------
//
// The stack is aligned after the function call as the return address is stored in a link register. Some
// architectures may require to always have aligned stack after PUSH/POP operation, which is represented
// by ArchTraits::stackAlignmentConstraint().
//
// No Link Register (X86/X64)
// --------------------------
//
// The return address should be stored after GP save/restore regs. It has the same size as `registerSize`
// (basically the native register/pointer size). We don't adjust it now as `v` now contains the exact size
// that the function requires to adjust (call frame + stack frame, vec stack size). The stack (if we consider
// this size) is misaligned now, as it's always aligned before the function call - when `call()` is executed
// it pushes the current EIP|RIP onto the stack, and unaligns it by 12 or 8 bytes (depending on the
// architecture). So count number of bytes needed to align it up to the function's CallFrame (the beginning).
if (v || hasFuncCalls() || !returnAddressSize) {
v += Support::alignUpDiff(v + pushPopSaveSize() + returnAddressSize, stackAlignment);
}
_pushPopSaveOffset = v; // Store 'pushPopSaveOffset' <- Function's push/pop save/restore starts here.
_stackAdjustment = v; // Store 'stackAdjustment' <- SA used by 'add SP, SA' and 'sub SP, SA'.
v += _pushPopSaveSize; // Count 'pushPopSaveSize' <- Function's push/pop save/restore ends here.
_finalStackSize = v; // Store 'finalStackSize' <- Final stack used by the function.
if (!archTraits.hasLinkReg()) {
v += registerSize; // Count 'ReturnAddress' <- As CALL pushes onto stack.
}
// If the function performs dynamic stack alignment then the stack-adjustment must be aligned.
if (hasDA) {
_stackAdjustment = Support::alignUp(_stackAdjustment, stackAlignment);
}
// Calculate where the function arguments start relative to SP.
_saOffsetFromSP = hasDA ? FuncFrame::kTagInvalidOffset : v;
// Calculate where the function arguments start relative to FP or user-provided register.
_saOffsetFromSA = hasFP ? returnAddressSize + registerSize // Return address + frame pointer.
: returnAddressSize + _pushPopSaveSize; // Return address + all push/pop regs.
return kErrorOk;
}
// FuncArgsAssignment - UpdateFuncFrame
// ====================================
ASMJIT_FAVOR_SIZE Error FuncArgsAssignment::updateFuncFrame(FuncFrame& frame) const noexcept {
Arch arch = frame.arch();
const FuncDetail* func = funcDetail();
if (!func) {
return DebugUtils::errored(kErrorInvalidState);
}
RAConstraints constraints;
ASMJIT_PROPAGATE(constraints.init(arch));
FuncArgsContext ctx;
ASMJIT_PROPAGATE(ctx.initWorkData(frame, *this, &constraints));
ASMJIT_PROPAGATE(ctx.markDstRegsDirty(frame));
ASMJIT_PROPAGATE(ctx.markScratchRegs(frame));
ASMJIT_PROPAGATE(ctx.markStackArgsReg(frame));
return kErrorOk;
}
// Func API - Tests
// ================
#if defined(ASMJIT_TEST)
UNIT(func_signature) {
FuncSignature signature;
signature.setRetT<int8_t>();
signature.addArgT<int16_t>();
signature.addArg(TypeId::kInt32);
EXPECT_EQ(signature, FuncSignature::build<int8_t, int16_t, int32_t>());
}
#endif
ASMJIT_END_NAMESPACE

File diff suppressed because it is too large Load Diff

View File

@@ -1,338 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/funcargscontext_p.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_core
//! \{
FuncArgsContext::FuncArgsContext() noexcept {
for (RegGroup group : RegGroupVirtValues{}) {
_workData[size_t(group)].reset();
}
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::initWorkData(const FuncFrame& frame, const FuncArgsAssignment& args, const RAConstraints* constraints) noexcept {
Arch arch = frame.arch();
const FuncDetail& func = *args.funcDetail();
_archTraits = &ArchTraits::byArch(arch);
_constraints = constraints;
_arch = arch;
// Initialize `_archRegs`.
for (RegGroup group : RegGroupVirtValues{}) {
_workData[group]._archRegs = _constraints->availableRegs(group);
}
if (frame.hasPreservedFP()) {
_workData[size_t(RegGroup::kGp)]._archRegs &= ~Support::bitMask(archTraits().fpRegId());
}
uint32_t reassignmentFlagMask = 0;
// Extract information from all function arguments/assignments and build Var[] array.
uint32_t varId = 0;
for (uint32_t argIndex = 0; argIndex < Globals::kMaxFuncArgs; argIndex++) {
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
const FuncValue& dst_ = args.arg(argIndex, valueIndex);
if (!dst_.isAssigned()) {
continue;
}
const FuncValue& src_ = func.arg(argIndex, valueIndex);
if (ASMJIT_UNLIKELY(!src_.isAssigned())) {
return DebugUtils::errored(kErrorInvalidState);
}
Var& var = _vars[varId];
var.init(src_, dst_);
FuncValue& src = var.cur;
FuncValue& dst = var.out;
RegGroup dstGroup = RegGroup::kMaxValue;
uint32_t dstId = Reg::kIdBad;
WorkData* dstWd = nullptr;
// Not supported.
if (src.isIndirect()) {
return DebugUtils::errored(kErrorInvalidAssignment);
}
if (dst.isReg()) {
RegType dstType = dst.regType();
if (ASMJIT_UNLIKELY(!archTraits().hasRegType(dstType))) {
return DebugUtils::errored(kErrorInvalidRegType);
}
// Copy TypeId from source if the destination doesn't have it. The RA used by BaseCompiler would never
// leave TypeId undefined, but users of FuncAPI can just assign phys regs without specifying their types.
if (!dst.hasTypeId()) {
dst.setTypeId(RegUtils::typeIdOf(dst.regType()));
}
dstGroup = RegUtils::groupOf(dstType);
if (ASMJIT_UNLIKELY(dstGroup > RegGroup::kMaxVirt)) {
return DebugUtils::errored(kErrorInvalidRegGroup);
}
dstWd = &_workData[dstGroup];
dstId = dst.regId();
if (ASMJIT_UNLIKELY(dstId >= 32 || !Support::bitTest(dstWd->archRegs(), dstId))) {
return DebugUtils::errored(kErrorInvalidPhysId);
}
if (ASMJIT_UNLIKELY(Support::bitTest(dstWd->dstRegs(), dstId))) {
return DebugUtils::errored(kErrorOverlappedRegs);
}
dstWd->_dstRegs |= Support::bitMask(dstId);
dstWd->_dstShuf |= Support::bitMask(dstId);
dstWd->_usedRegs |= Support::bitMask(dstId);
}
else {
if (!dst.hasTypeId()) {
dst.setTypeId(src.typeId());
}
OperandSignature signature = getSuitableRegForMemToMemMove(arch, dst.typeId(), src.typeId());
if (ASMJIT_UNLIKELY(!signature.isValid())) {
return DebugUtils::errored(kErrorInvalidState);
}
_stackDstMask = uint8_t(_stackDstMask | Support::bitMask(signature.regGroup()));
}
if (src.isReg()) {
uint32_t srcId = src.regId();
RegGroup srcGroup = RegUtils::groupOf(src.regType());
if (dstGroup == srcGroup) {
ASMJIT_ASSERT(dstWd != nullptr);
dstWd->assign(varId, srcId);
reassignmentFlagMask |= uint32_t(dstId != srcId) << uint32_t(dstGroup);
if (dstId == srcId) {
// The best case, register is allocated where it is expected to be. However, we should
// not mark this as done if both registers are GP and sign or zero extension is required.
if (dstGroup != RegGroup::kGp) {
var.markDone();
}
else {
TypeId dt = dst.typeId();
TypeId st = src.typeId();
uint32_t dstSize = TypeUtils::sizeOf(dt);
uint32_t srcSize = TypeUtils::sizeOf(st);
if (dt == TypeId::kVoid || st == TypeId::kVoid || dstSize <= srcSize) {
var.markDone();
}
}
}
}
else {
if (ASMJIT_UNLIKELY(srcGroup > RegGroup::kMaxVirt)) {
return DebugUtils::errored(kErrorInvalidState);
}
WorkData& srcData = _workData[size_t(srcGroup)];
srcData.assign(varId, srcId);
reassignmentFlagMask |= 1u << uint32_t(dstGroup);
}
}
else {
if (dstWd)
dstWd->_numStackArgs++;
_hasStackSrc = true;
}
varId++;
}
}
// Initialize WorkData::workRegs.
for (RegGroup group : RegGroupVirtValues{}) {
_workData[group]._workRegs =
(_workData[group].archRegs() & (frame.dirtyRegs(group) | ~frame.preservedRegs(group))) | _workData[group].dstRegs() | _workData[group].assignedRegs();
_workData[group]._needsScratch = (reassignmentFlagMask >> uint32_t(group)) & 1u;
}
// Create a variable that represents `SARegId` if necessary.
bool saRegRequired = _hasStackSrc && frame.hasDynamicAlignment() && !frame.hasPreservedFP();
WorkData& gpRegs = _workData[RegGroup::kGp];
uint32_t saCurRegId = frame.saRegId();
uint32_t saOutRegId = args.saRegId();
if (saCurRegId != Reg::kIdBad) {
// Check if the provided `SARegId` doesn't collide with input registers.
if (ASMJIT_UNLIKELY(gpRegs.isAssigned(saCurRegId))) {
return DebugUtils::errored(kErrorOverlappedRegs);
}
}
if (saOutRegId != Reg::kIdBad) {
// Check if the provided `SARegId` doesn't collide with argument assignments.
if (ASMJIT_UNLIKELY(Support::bitTest(gpRegs.dstRegs(), saOutRegId))) {
return DebugUtils::errored(kErrorOverlappedRegs);
}
saRegRequired = true;
}
if (saRegRequired) {
TypeId ptrTypeId = Environment::is32Bit(arch) ? TypeId::kUInt32 : TypeId::kUInt64;
RegType ptrRegType = Environment::is32Bit(arch) ? RegType::kGp32 : RegType::kGp64;
_saVarId = uint8_t(varId);
_hasPreservedFP = frame.hasPreservedFP();
Var& var = _vars[varId];
var.reset();
if (saCurRegId == Reg::kIdBad) {
if (saOutRegId != Reg::kIdBad && !gpRegs.isAssigned(saOutRegId)) {
saCurRegId = saOutRegId;
}
else {
RegMask availableRegs = gpRegs.availableRegs();
if (!availableRegs) {
availableRegs = gpRegs.archRegs() & ~gpRegs.workRegs();
}
if (ASMJIT_UNLIKELY(!availableRegs)) {
return DebugUtils::errored(kErrorNoMorePhysRegs);
}
saCurRegId = Support::ctz(availableRegs);
}
}
var.cur.initReg(ptrRegType, saCurRegId, ptrTypeId);
gpRegs.assign(varId, saCurRegId);
gpRegs._workRegs |= Support::bitMask(saCurRegId);
if (saOutRegId != Reg::kIdBad) {
var.out.initReg(ptrRegType, saOutRegId, ptrTypeId);
gpRegs._dstRegs |= Support::bitMask(saOutRegId);
gpRegs._workRegs |= Support::bitMask(saOutRegId);
}
else {
var.markDone();
}
varId++;
}
_varCount = varId;
// Detect register swaps.
for (varId = 0; varId < _varCount; varId++) {
Var& var = _vars[varId];
if (var.cur.isReg() && var.out.isReg()) {
uint32_t srcId = var.cur.regId();
uint32_t dstId = var.out.regId();
RegGroup group = RegUtils::groupOf(var.cur.regType());
if (group != RegUtils::groupOf(var.out.regType())) {
continue;
}
WorkData& wd = _workData[group];
if (wd.isAssigned(dstId)) {
Var& other = _vars[wd._physToVarId[dstId]];
if (RegUtils::groupOf(other.out.regType()) == group && other.out.regId() == srcId) {
wd._numSwaps++;
_regSwapsMask = uint8_t(_regSwapsMask | Support::bitMask(group));
}
}
}
}
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::markDstRegsDirty(FuncFrame& frame) noexcept {
for (RegGroup group : RegGroupVirtValues{}) {
WorkData& wd = _workData[group];
uint32_t regs = wd.usedRegs() | wd._dstShuf;
wd._workRegs |= regs;
frame.addDirtyRegs(group, regs);
}
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::markScratchRegs(FuncFrame& frame) noexcept {
uint32_t groupMask = 0;
// Handle stack to stack moves.
groupMask |= _stackDstMask;
// Handle register swaps.
groupMask |= _regSwapsMask & ~Support::bitMask(RegGroup::kGp);
if (!groupMask)
return kErrorOk;
// Selects one dirty register per affected group that can be used as a scratch register.
for (RegGroup group : RegGroupVirtValues{}) {
if (Support::bitTest(groupMask, group)) {
WorkData& wd = _workData[group];
if (wd._needsScratch) {
// Initially, pick some clobbered or dirty register.
RegMask workRegs = wd.workRegs();
RegMask regs = workRegs & ~(wd.usedRegs() | wd._dstShuf);
// If that didn't work out pick some register which is not in 'used'.
if (!regs) {
regs = workRegs & ~wd.usedRegs();
}
// If that didn't work out pick any other register that is allocable.
// This last resort case will, however, result in marking one more
// register dirty.
if (!regs) {
regs = wd.archRegs() & ~workRegs;
}
// If that didn't work out we will have to use XORs instead of MOVs.
if (!regs) {
continue;
}
RegMask regMask = Support::blsi(regs);
wd._workRegs |= regMask;
frame.addDirtyRegs(group, regMask);
}
}
}
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::markStackArgsReg(FuncFrame& frame) noexcept {
if (_saVarId != kVarIdNone) {
const Var& var = _vars[_saVarId];
frame.setSARegId(var.cur.regId());
}
else if (frame.hasPreservedFP()) {
frame.setSARegId(archTraits().fpRegId());
}
return kErrorOk;
}
//! \}
//! \endcond
ASMJIT_END_NAMESPACE

View File

@@ -1,226 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_FUNCARGSCONTEXT_P_H_INCLUDED
#define ASMJIT_CORE_FUNCARGSCONTEXT_P_H_INCLUDED
#include "../core/archtraits.h"
#include "../core/environment.h"
#include "../core/func.h"
#include "../core/operand.h"
#include "../core/radefs_p.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_core
//! \{
static inline OperandSignature getSuitableRegForMemToMemMove(Arch arch, TypeId dstTypeId, TypeId srcTypeId) noexcept {
const ArchTraits& archTraits = ArchTraits::byArch(arch);
uint32_t signature = 0u;
uint32_t dstSize = TypeUtils::sizeOf(dstTypeId);
uint32_t srcSize = TypeUtils::sizeOf(srcTypeId);
uint32_t maxSize = Support::max<uint32_t>(dstSize, srcSize);
uint32_t regSize = Environment::registerSizeFromArch(arch);
if (maxSize <= regSize || (TypeUtils::isInt(dstTypeId) && TypeUtils::isInt(srcTypeId))) {
signature = maxSize <= 4 ? RegTraits<RegType::kGp32>::kSignature
: RegTraits<RegType::kGp64>::kSignature;
}
else if (maxSize <= 8 && archTraits.hasRegType(RegType::kVec64)) {
signature = RegTraits<RegType::kVec64>::kSignature;
}
else if (maxSize <= 16 && archTraits.hasRegType(RegType::kVec128)) {
signature = RegTraits<RegType::kVec128>::kSignature;
}
else if (maxSize <= 32 && archTraits.hasRegType(RegType::kVec256)) {
signature = RegTraits<RegType::kVec256>::kSignature;
}
else if (maxSize <= 64 && archTraits.hasRegType(RegType::kVec512)) {
signature = RegTraits<RegType::kVec512>::kSignature;
}
return OperandSignature{signature};
}
class FuncArgsContext {
public:
static inline constexpr uint32_t kVarIdNone = 0xFF;
//! Contains information about a single argument or SA register that may need shuffling.
struct Var {
FuncValue cur;
FuncValue out;
inline void init(const FuncValue& cur_, const FuncValue& out_) noexcept {
cur = cur_;
out = out_;
}
//! Reset the value to its unassigned state.
inline void reset() noexcept {
cur.reset();
out.reset();
}
ASMJIT_INLINE_NODEBUG bool isDone() const noexcept { return cur.isDone(); }
ASMJIT_INLINE_NODEBUG void markDone() noexcept { cur.addFlags(FuncValue::kFlagIsDone); }
};
struct WorkData {
//! All allocable registers provided by the architecture.
RegMask _archRegs;
//! All registers that can be used by the shuffler.
RegMask _workRegs;
//! Registers used by the shuffler (all).
RegMask _usedRegs;
//! Assigned registers.
RegMask _assignedRegs;
//! Destination registers assigned to arguments or SA.
RegMask _dstRegs;
//! Destination registers that require shuffling.
RegMask _dstShuf;
//! Number of register swaps.
uint8_t _numSwaps;
//! Number of stack loads.
uint8_t _numStackArgs;
//! Whether this work data would need reassignment.
uint8_t _needsScratch;
//! Reserved (only used as padding).
uint8_t _reserved[5];
//! Physical ID to variable ID mapping.
uint8_t _physToVarId[32];
inline void reset() noexcept {
_archRegs = 0;
_workRegs = 0;
_usedRegs = 0;
_assignedRegs = 0;
_dstRegs = 0;
_dstShuf = 0;
_numSwaps = 0;
_numStackArgs = 0;
_needsScratch = 0;
memset(_reserved, 0, sizeof(_reserved));
memset(_physToVarId, kVarIdNone, 32);
}
[[nodiscard]]
inline bool isAssigned(uint32_t regId) const noexcept {
ASMJIT_ASSERT(regId < 32);
return Support::bitTest(_assignedRegs, regId);
}
inline void assign(uint32_t varId, uint32_t regId) noexcept {
ASMJIT_ASSERT(!isAssigned(regId));
ASMJIT_ASSERT(_physToVarId[regId] == kVarIdNone);
_physToVarId[regId] = uint8_t(varId);
_assignedRegs ^= Support::bitMask(regId);
}
inline void reassign(uint32_t varId, uint32_t newId, uint32_t oldId) noexcept {
ASMJIT_ASSERT( isAssigned(oldId));
ASMJIT_ASSERT(!isAssigned(newId));
ASMJIT_ASSERT(_physToVarId[oldId] == varId);
ASMJIT_ASSERT(_physToVarId[newId] == kVarIdNone);
_physToVarId[oldId] = uint8_t(kVarIdNone);
_physToVarId[newId] = uint8_t(varId);
_assignedRegs ^= Support::bitMask(newId) ^ Support::bitMask(oldId);
}
inline void swap(uint32_t aVarId, uint32_t aRegId, uint32_t bVarId, uint32_t bRegId) noexcept {
ASMJIT_ASSERT(isAssigned(aRegId));
ASMJIT_ASSERT(isAssigned(bRegId));
ASMJIT_ASSERT(_physToVarId[aRegId] == aVarId);
ASMJIT_ASSERT(_physToVarId[bRegId] == bVarId);
_physToVarId[aRegId] = uint8_t(bVarId);
_physToVarId[bRegId] = uint8_t(aVarId);
}
inline void unassign(uint32_t varId, uint32_t regId) noexcept {
ASMJIT_ASSERT(isAssigned(regId));
ASMJIT_ASSERT(_physToVarId[regId] == varId);
DebugUtils::unused(varId);
_physToVarId[regId] = uint8_t(kVarIdNone);
_assignedRegs ^= Support::bitMask(regId);
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask archRegs() const noexcept { return _archRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask workRegs() const noexcept { return _workRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask usedRegs() const noexcept { return _usedRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask assignedRegs() const noexcept { return _assignedRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask dstRegs() const noexcept { return _dstRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask availableRegs() const noexcept { return _workRegs & ~_assignedRegs; }
};
//! Architecture traits.
const ArchTraits* _archTraits = nullptr;
//! Architecture constraints.
const RAConstraints* _constraints = nullptr;
//! Target architecture.
Arch _arch = Arch::kUnknown;
//! Has arguments passed via stack (SRC).
bool _hasStackSrc = false;
//! Has preserved frame-pointer (FP).
bool _hasPreservedFP = false;
//! Has arguments assigned to stack (DST).
uint8_t _stackDstMask = 0;
//! Register swap groups (bit-mask).
uint8_t _regSwapsMask = 0;
uint8_t _saVarId = kVarIdNone;
uint32_t _varCount = 0;
Support::Array<WorkData, Globals::kNumVirtGroups> _workData;
Var _vars[Globals::kMaxFuncArgs * Globals::kMaxValuePack + 1];
FuncArgsContext() noexcept;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ArchTraits& archTraits() const noexcept { return *_archTraits; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t varCount() const noexcept { return _varCount; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t indexOf(const Var* var) const noexcept { return (size_t)(var - _vars); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Var& var(size_t varId) noexcept { return _vars[varId]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const Var& var(size_t varId) const noexcept { return _vars[varId]; }
Error initWorkData(const FuncFrame& frame, const FuncArgsAssignment& args, const RAConstraints* constraints) noexcept;
Error markScratchRegs(FuncFrame& frame) noexcept;
Error markDstRegsDirty(FuncFrame& frame) noexcept;
Error markStackArgsReg(FuncFrame& frame) noexcept;
};
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_FUNCARGSCONTEXT_P_H_INCLUDED

View File

@@ -1,135 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/globals.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// DebugUtils - Error As String
// ============================
ASMJIT_FAVOR_SIZE const char* DebugUtils::errorAsString(Error err) noexcept {
#ifndef ASMJIT_NO_TEXT
// @EnumStringBegin{"enum": "ErrorCode", "output": "sError", "strip": "kError"}@
static const char sErrorString[] =
"Ok\0"
"OutOfMemory\0"
"InvalidArgument\0"
"InvalidState\0"
"InvalidArch\0"
"NotInitialized\0"
"AlreadyInitialized\0"
"FeatureNotEnabled\0"
"TooManyHandles\0"
"TooLarge\0"
"NoCodeGenerated\0"
"InvalidDirective\0"
"InvalidLabel\0"
"TooManyLabels\0"
"LabelAlreadyBound\0"
"LabelAlreadyDefined\0"
"LabelNameTooLong\0"
"InvalidLabelName\0"
"InvalidParentLabel\0"
"InvalidSection\0"
"TooManySections\0"
"InvalidSectionName\0"
"TooManyRelocations\0"
"InvalidRelocEntry\0"
"RelocOffsetOutOfRange\0"
"InvalidAssignment\0"
"InvalidInstruction\0"
"InvalidRegType\0"
"InvalidRegGroup\0"
"InvalidPhysId\0"
"InvalidVirtId\0"
"InvalidElementIndex\0"
"InvalidPrefixCombination\0"
"InvalidLockPrefix\0"
"InvalidXAcquirePrefix\0"
"InvalidXReleasePrefix\0"
"InvalidRepPrefix\0"
"InvalidRexPrefix\0"
"InvalidExtraReg\0"
"InvalidKMaskUse\0"
"InvalidKZeroUse\0"
"InvalidBroadcast\0"
"InvalidEROrSAE\0"
"InvalidAddress\0"
"InvalidAddressIndex\0"
"InvalidAddressScale\0"
"InvalidAddress64Bit\0"
"InvalidAddress64BitZeroExtension\0"
"InvalidDisplacement\0"
"InvalidSegment\0"
"InvalidImmediate\0"
"InvalidOperandSize\0"
"AmbiguousOperandSize\0"
"OperandSizeMismatch\0"
"InvalidOption\0"
"OptionAlreadyDefined\0"
"InvalidTypeId\0"
"InvalidUseOfGpbHi\0"
"InvalidUseOfGpq\0"
"InvalidUseOfF80\0"
"NotConsecutiveRegs\0"
"ConsecutiveRegsAllocation\0"
"IllegalVirtReg\0"
"TooManyVirtRegs\0"
"NoMorePhysRegs\0"
"OverlappedRegs\0"
"OverlappingStackRegWithRegArg\0"
"ExpressionLabelNotBound\0"
"ExpressionOverflow\0"
"FailedToOpenAnonymousMemory\0"
"FailedToOpenFile\0"
"ProtectionFailure\0"
"<Unknown>\0";
static const uint16_t sErrorIndex[] = {
0, 3, 15, 31, 44, 56, 71, 90, 108, 123, 132, 148, 165, 178, 192, 210, 230,
247, 264, 283, 298, 314, 333, 352, 370, 392, 410, 429, 444, 460, 474, 488,
508, 533, 551, 573, 595, 612, 629, 645, 661, 677, 694, 709, 724, 744, 764,
784, 817, 837, 852, 869, 888, 909, 929, 943, 964, 978, 996, 1012, 1028, 1047,
1073, 1088, 1104, 1119, 1134, 1164, 1188, 1207, 1235, 1252, 1270
};
// @EnumStringEnd@
return sErrorString + sErrorIndex[Support::min<Error>(err, kErrorCount)];
#else
DebugUtils::unused(err);
static const char noMessage[] = "";
return noMessage;
#endif
}
// DebugUtils - Debug Output
// =========================
ASMJIT_FAVOR_SIZE void DebugUtils::debugOutput(const char* str) noexcept {
#if defined(_WIN32)
::OutputDebugStringA(str);
#else
::fputs(str, stderr);
#endif
}
// DebugUtils - Fatal Errors
// =========================
ASMJIT_FAVOR_SIZE void DebugUtils::assertionFailed(const char* file, int line, const char* msg) noexcept {
char str[1024];
snprintf(str, 1024,
"[asmjit] Assertion failed at %s (line %d):\n"
"[asmjit] %s\n", file, line, msg);
debugOutput(str);
::abort();
}
ASMJIT_END_NAMESPACE

View File

@@ -1,446 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_GLOBALS_H_INCLUDED
#define ASMJIT_CORE_GLOBALS_H_INCLUDED
#include "../core/api-config.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_utilities
//! \{
namespace Support {
//! Cast designed to cast between function and void* pointers.
template<typename Dst, typename Src>
static inline Dst ptr_cast_impl(Src p) noexcept { return (Dst)p; }
//! Helper to implement placement new/delete without relying on `<new>` header.
struct PlacementNew { void* ptr; };
} // {Support}
#if defined(ASMJIT_NO_STDCXX)
namespace Support {
ASMJIT_INLINE void* operatorNew(size_t n) noexcept { return malloc(n); }
ASMJIT_INLINE void operatorDelete(void* p) noexcept { if (p) free(p); }
} // {Support}
#define ASMJIT_BASE_CLASS(TYPE) \
ASMJIT_INLINE void* operator new(size_t n) noexcept { return Support::operatorNew(n); } \
ASMJIT_INLINE void operator delete(void* ptr) noexcept { Support::operatorDelete(ptr); } \
\
ASMJIT_INLINE void* operator new(size_t, void* ptr) noexcept { return ptr; } \
ASMJIT_INLINE void operator delete(void*, void*) noexcept {} \
\
ASMJIT_INLINE void* operator new(size_t, Support::PlacementNew ptr) noexcept { return ptr.ptr; } \
ASMJIT_INLINE void operator delete(void*, Support::PlacementNew) noexcept {}
#else
#define ASMJIT_BASE_CLASS(TYPE)
#endif
//! \}
//! \endcond
//! \addtogroup asmjit_core
//! \{
//! Byte order.
enum class ByteOrder {
//! Little endian.
kLE = 0,
//! Big endian.
kBE = 1,
//! Native byte order of the target architecture.
kNative = ASMJIT_ARCH_LE ? kLE : kBE,
//! Swapped byte order of the target architecture.
kSwapped = ASMJIT_ARCH_LE ? kBE : kLE
};
//! A policy that can be used with some `reset()` member functions.
enum class ResetPolicy : uint32_t {
//! Soft reset, doesn't deallocate memory (default).
kSoft = 0,
//! Hard reset, releases all memory used, if any.
kHard = 1
};
//! Contains constants and variables used globally across AsmJit.
namespace Globals {
//! Host memory allocator overhead.
static constexpr uint32_t kAllocOverhead = uint32_t(sizeof(intptr_t) * 4u);
//! Host memory allocator alignment.
static constexpr uint32_t kAllocAlignment = 8u;
//! Aggressive growing strategy threshold.
static constexpr uint32_t kGrowThreshold = 1024u * 1024u * 16u;
//! Default alignment of allocation requests to use when using Zone.
static constexpr uint32_t kZoneAlignment = 8u;
//! Maximum depth of RB-Tree is:
//!
//! `2 * log2(n + 1)`
//!
//! Size of RB node is at least two pointers (without data), so a theoretical architecture limit would be:
//!
//! `2 * log2(addressableMemorySize / sizeof(Node) + 1)`
//!
//! Which yields 30 on 32-bit arch and 61 on 64-bit arch. The final value was adjusted by +1 for safety reasons.
static constexpr uint32_t kMaxTreeHeight = (ASMJIT_ARCH_BITS == 32 ? 30 : 61) + 1;
//! Maximum number of operands per a single instruction.
static constexpr uint32_t kMaxOpCount = 6;
//! Maximum arguments of a function supported by the Compiler / Function API.
static constexpr uint32_t kMaxFuncArgs = 32;
//! The number of values that can be assigned to a single function argument or return value.
static constexpr uint32_t kMaxValuePack = 4;
//! Maximum number of physical registers AsmJit can use per register group.
static constexpr uint32_t kMaxPhysRegs = 32;
//! Maximum alignment.
static constexpr uint32_t kMaxAlignment = 64;
//! Maximum label or symbol size in bytes.
static constexpr uint32_t kMaxLabelNameSize = 2048;
//! Maximum section name size.
static constexpr uint32_t kMaxSectionNameSize = 35;
//! Maximum size of comment.
static constexpr uint32_t kMaxCommentSize = 1024;
//! Invalid identifier.
static constexpr uint32_t kInvalidId = 0xFFFFFFFFu;
//! Returned by `indexOf()` and similar when working with containers that use 32-bit index/size.
static constexpr uint32_t kNotFound = 0xFFFFFFFFu;
//! Invalid base address.
static constexpr uint64_t kNoBaseAddress = ~uint64_t(0);
//! Number of virtual register groups.
static constexpr uint32_t kNumVirtGroups = 4;
struct Init_ {};
struct NoInit_ {};
//! A decorator used to initialize.
static const constexpr Init_ Init {};
//! A decorator used to not initialize.
static const constexpr NoInit_ NoInit {};
} // {Globals}
//! Casts a `void*` pointer `func` to a function pointer `Func`.
template<typename Func>
static ASMJIT_INLINE_NODEBUG Func ptr_as_func(void* func) noexcept { return Support::ptr_cast_impl<Func, void*>(func); }
//! Casts a function pointer `func` to a void pointer `void*`.
template<typename Func>
static ASMJIT_INLINE_NODEBUG void* func_as_ptr(Func func) noexcept { return Support::ptr_cast_impl<void*, Func>(func); }
//! \}
//! \addtogroup asmjit_error_handling
//! \{
//! AsmJit error type (uint32_t).
using Error = uint32_t;
//! AsmJit error codes.
enum ErrorCode : uint32_t {
// @EnumValuesBegin{"enum": "ErrorCode"}@
//! No error (success).
kErrorOk = 0,
//! Out of memory.
kErrorOutOfMemory,
//! Invalid argument.
kErrorInvalidArgument,
//! Invalid state.
//!
//! If this error is returned it means that either you are doing something wrong or AsmJit caught itself by
//! doing something wrong. This error should never be ignored.
kErrorInvalidState,
//! Invalid or incompatible architecture.
kErrorInvalidArch,
//! The object is not initialized.
kErrorNotInitialized,
//! The object is already initialized.
kErrorAlreadyInitialized,
//! Either a built-in feature was disabled at compile time and it's not available or the feature is not
//! available on the target platform.
//!
//! For example trying to allocate large pages on unsupported platform would return this error.
kErrorFeatureNotEnabled,
//! Too many handles (Windows) or file descriptors (Unix/Posix).
kErrorTooManyHandles,
//! Code generated is larger than allowed.
kErrorTooLarge,
//! No code generated.
//!
//! Returned by runtime if the \ref CodeHolder contains no code.
kErrorNoCodeGenerated,
//! Invalid directive.
kErrorInvalidDirective,
//! Attempt to use uninitialized label.
kErrorInvalidLabel,
//! Label index overflow - a single \ref BaseAssembler instance can hold almost 2^32 (4 billion) labels. If
//! there is an attempt to create more labels then this error is returned.
kErrorTooManyLabels,
//! Label is already bound.
kErrorLabelAlreadyBound,
//! Label is already defined (named labels).
kErrorLabelAlreadyDefined,
//! Label name is too long.
kErrorLabelNameTooLong,
//! Label must always be local if it's anonymous (without a name).
kErrorInvalidLabelName,
//! Parent id passed to \ref CodeHolder::newNamedLabelId() was either invalid or parent is not supported by
//! the requested `LabelType`.
kErrorInvalidParentLabel,
//! Invalid section.
kErrorInvalidSection,
//! Too many sections (section index overflow).
kErrorTooManySections,
//! Invalid section name (most probably too long).
kErrorInvalidSectionName,
//! Relocation index overflow (too many relocations).
kErrorTooManyRelocations,
//! Invalid relocation entry.
kErrorInvalidRelocEntry,
//! Reloc entry contains address that is out of range (unencodable).
kErrorRelocOffsetOutOfRange,
//! Invalid assignment to a register, function argument, or function return value.
kErrorInvalidAssignment,
//! Invalid instruction.
kErrorInvalidInstruction,
//! Invalid register type.
kErrorInvalidRegType,
//! Invalid register group.
kErrorInvalidRegGroup,
//! Invalid physical register id.
kErrorInvalidPhysId,
//! Invalid virtual register id.
kErrorInvalidVirtId,
//! Invalid element index (ARM).
kErrorInvalidElementIndex,
//! Invalid prefix combination (X86|X64).
kErrorInvalidPrefixCombination,
//! Invalid LOCK prefix (X86|X64).
kErrorInvalidLockPrefix,
//! Invalid XACQUIRE prefix (X86|X64).
kErrorInvalidXAcquirePrefix,
//! Invalid XRELEASE prefix (X86|X64).
kErrorInvalidXReleasePrefix,
//! Invalid REP prefix (X86|X64).
kErrorInvalidRepPrefix,
//! Invalid REX prefix (X86|X64).
kErrorInvalidRexPrefix,
//! Invalid {...} register (X86|X64).
kErrorInvalidExtraReg,
//! Invalid {k} use (not supported by the instruction) (X86|X64).
kErrorInvalidKMaskUse,
//! Invalid {k}{z} use (not supported by the instruction) (X86|X64).
kErrorInvalidKZeroUse,
//! Invalid broadcast - Currently only related to invalid use of AVX-512 {1tox} (X86|X64).
kErrorInvalidBroadcast,
//! Invalid 'embedded-rounding' {er} or 'suppress-all-exceptions' {sae} (AVX-512) (X86|X64).
kErrorInvalidEROrSAE,
//! Invalid address used (not encodable).
kErrorInvalidAddress,
//! Invalid index register used in memory address (not encodable).
kErrorInvalidAddressIndex,
//! Invalid address scale (not encodable).
kErrorInvalidAddressScale,
//! Invalid use of 64-bit address.
kErrorInvalidAddress64Bit,
//! Invalid use of 64-bit address that require 32-bit zero-extension (X64).
kErrorInvalidAddress64BitZeroExtension,
//! Invalid displacement (not encodable).
kErrorInvalidDisplacement,
//! Invalid segment (X86|X86_64).
kErrorInvalidSegment,
//! Invalid immediate (out of bounds on X86 and invalid pattern on ARM).
kErrorInvalidImmediate,
//! Invalid operand size.
kErrorInvalidOperandSize,
//! Ambiguous operand size (memory has zero size while it's required to determine the operation type.
kErrorAmbiguousOperandSize,
//! Mismatching operand size (size of multiple operands doesn't match the operation size).
kErrorOperandSizeMismatch,
//! Invalid option.
kErrorInvalidOption,
//! Option already defined.
kErrorOptionAlreadyDefined,
//! Invalid TypeId.
kErrorInvalidTypeId,
//! Invalid use of a 8-bit GPB-HIGH register.
kErrorInvalidUseOfGpbHi,
//! Invalid use of a 64-bit GPQ register in 32-bit mode.
kErrorInvalidUseOfGpq,
//! Invalid use of an 80-bit float (\ref TypeId::kFloat80).
kErrorInvalidUseOfF80,
//! Instruction requires the use of consecutive registers, but registers in operands weren't (AVX512, ASIMD load/store, etc...).
kErrorNotConsecutiveRegs,
//! Failed to allocate consecutive registers - allocable registers either too restricted or a bug in RW info.
kErrorConsecutiveRegsAllocation,
//! Illegal virtual register - reported by instruction validation.
kErrorIllegalVirtReg,
//! AsmJit cannot create more virtual registers.
kErrorTooManyVirtRegs,
//! AsmJit requires a physical register, but no one is available.
kErrorNoMorePhysRegs,
//! A variable has been assigned more than once to a function argument (BaseCompiler).
kErrorOverlappedRegs,
//! Invalid register to hold stack arguments offset.
kErrorOverlappingStackRegWithRegArg,
//! Unbound label cannot be evaluated by expression.
kErrorExpressionLabelNotBound,
//! Arithmetic overflow during expression evaluation.
kErrorExpressionOverflow,
//! Failed to open anonymous memory handle or file descriptor.
kErrorFailedToOpenAnonymousMemory,
//! Failed to open a file.
//!
//! \note This is a generic error that is used by internal filesystem API.
kErrorFailedToOpenFile,
//! Protection failure can be returned from a virtual memory allocator or when trying to change memory access
//! permissions.
kErrorProtectionFailure,
// @EnumValuesEnd@
//! Count of AsmJit error codes.
kErrorCount
};
//! Debugging utilities.
namespace DebugUtils {
//! \cond INTERNAL
//! Used to silence warnings about unused arguments or variables.
template<typename... Args>
static ASMJIT_INLINE_NODEBUG void unused(Args&&...) noexcept {}
//! \endcond
//! Returns the error `err` passed.
//!
//! Provided for debugging purposes. Putting a breakpoint inside `errored` can help with tracing the origin of any
//! error reported / returned by AsmJit.
[[nodiscard]]
static constexpr Error errored(Error err) noexcept { return err; }
//! Returns a printable version of `asmjit::Error` code.
[[nodiscard]]
ASMJIT_API const char* errorAsString(Error err) noexcept;
//! Called to output debugging message(s).
ASMJIT_API void debugOutput(const char* str) noexcept;
//! Called on assertion failure.
//!
//! \param file Source file name where it happened.
//! \param line Line in the source file.
//! \param msg Message to display.
//!
//! If you have problems with assertion failures a breakpoint can be put at \ref assertionFailed() function
//! (asmjit/core/globals.cpp). A call stack will be available when such assertion failure is triggered. AsmJit
//! always returns errors on failures, assertions are a last resort and usually mean unrecoverable state due to out
//! of range array access or totally invalid arguments like nullptr where a valid pointer should be provided, etc...
[[noreturn]]
ASMJIT_API void assertionFailed(const char* file, int line, const char* msg) noexcept;
} // {DebugUtils}
//! \def ASMJIT_ASSERT(...)
//!
//! AsmJit's own assert macro used in AsmJit code-base.
#if defined(ASMJIT_BUILD_DEBUG)
#define ASMJIT_ASSERT(...) \
do { \
if (ASMJIT_UNLIKELY(!(__VA_ARGS__))) { \
::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, #__VA_ARGS__); \
} \
} while (0)
#else
#define ASMJIT_ASSERT(...) ((void)0)
#endif
#define ASMJIT_RUNTIME_ASSERT(...) \
do { \
if (ASMJIT_UNLIKELY(!(__VA_ARGS__))) { \
::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, #__VA_ARGS__); \
} \
} while (0)
//! \def ASMJIT_NOT_REACHED()
//!
//! Run-time assertion used in code that should never be reached.
#if defined(ASMJIT_BUILD_DEBUG)
#define ASMJIT_NOT_REACHED() ::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, "ASMJIT_NOT_REACHED()")
#elif defined(__GNUC__)
#define ASMJIT_NOT_REACHED() __builtin_unreachable()
#else
#define ASMJIT_NOT_REACHED() ASMJIT_ASSUME(0)
#endif
//! \def ASMJIT_PROPAGATE(...)
//!
//! Propagates a possible `Error` produced by `...` to the caller by returning the error immediately. Used by AsmJit
//! internally, but kept public for users that want to use the same technique to propagate errors to the caller.
#define ASMJIT_PROPAGATE(...) \
do { \
::asmjit::Error _err_ = __VA_ARGS__; \
if (ASMJIT_UNLIKELY(_err_)) { \
return _err_; \
} \
} while (0)
//! \}
ASMJIT_END_NAMESPACE
//! Implementation of a placement new so we don't have to depend on `<new>`.
ASMJIT_INLINE_NODEBUG void* operator new(size_t, const asmjit::Support::PlacementNew& p) noexcept {
#if defined(_MSC_VER) && !defined(__clang__)
__assume(p.ptr != nullptr); // Otherwise MSVC would emit a nullptr check.
#endif
return p.ptr;
}
ASMJIT_INLINE_NODEBUG void operator delete(void*, const asmjit::Support::PlacementNew&) noexcept {}
#endif // ASMJIT_CORE_GLOBALS_H_INCLUDED

View File

@@ -1,129 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/archtraits.h"
#include "../core/inst.h"
#if !defined(ASMJIT_NO_X86)
#include "../x86/x86instapi_p.h"
#endif
#if !defined(ASMJIT_NO_AARCH64)
#include "../arm/a64instapi_p.h"
#endif
ASMJIT_BEGIN_NAMESPACE
// InstAPI - InstId <-> String
// ===========================
#ifndef ASMJIT_NO_TEXT
Error InstAPI::instIdToString(Arch arch, InstId instId, InstStringifyOptions options, String& output) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) {
return x86::InstInternal::instIdToString(instId, options, output);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyAArch64(arch)) {
return a64::InstInternal::instIdToString(instId, options, output);
}
#endif
return DebugUtils::errored(kErrorInvalidArch);
}
InstId InstAPI::stringToInstId(Arch arch, const char* s, size_t len) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) {
return x86::InstInternal::stringToInstId(s, len);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyAArch64(arch)) {
return a64::InstInternal::stringToInstId(s, len);
}
#endif
return 0;
}
#endif // !ASMJIT_NO_TEXT
// InstAPI - Validate
// ==================
#ifndef ASMJIT_NO_VALIDATION
Error InstAPI::validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) {
if (arch == Arch::kX86) {
return x86::InstInternal::validateX86(inst, operands, opCount, validationFlags);
}
else {
return x86::InstInternal::validateX64(inst, operands, opCount, validationFlags);
}
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyAArch64(arch)) {
return a64::InstInternal::validate(inst, operands, opCount, validationFlags);
}
#endif
return DebugUtils::errored(kErrorInvalidArch);
}
#endif // !ASMJIT_NO_VALIDATION
// InstAPI - QueryRWInfo
// =====================
#ifndef ASMJIT_NO_INTROSPECTION
Error InstAPI::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept {
if (ASMJIT_UNLIKELY(opCount > Globals::kMaxOpCount)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) {
return x86::InstInternal::queryRWInfo(arch, inst, operands, opCount, out);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyAArch64(arch)) {
return a64::InstInternal::queryRWInfo(inst, operands, opCount, out);
}
#endif
return DebugUtils::errored(kErrorInvalidArch);
}
#endif // !ASMJIT_NO_INTROSPECTION
// InstAPI - QueryFeatures
// =======================
#ifndef ASMJIT_NO_INTROSPECTION
Error InstAPI::queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, CpuFeatures* out) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) {
return x86::InstInternal::queryFeatures(arch, inst, operands, opCount, out);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyAArch64(arch)) {
return a64::InstInternal::queryFeatures(inst, operands, opCount, out);
}
#endif
return DebugUtils::errored(kErrorInvalidArch);
}
#endif // !ASMJIT_NO_INTROSPECTION
ASMJIT_END_NAMESPACE

View File

@@ -1,932 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_INST_H_INCLUDED
#define ASMJIT_CORE_INST_H_INCLUDED
#include "../core/cpuinfo.h"
#include "../core/operand.h"
#include "../core/string.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_instruction_db
//! \{
//! Describes an instruction id and modifiers used together with the id.
//!
//! Each architecture has a set of valid instructions indexed from 0. Instruction with 0 id is, however, a special
//! instruction that describes a "no instruction" or "invalid instruction". Different architectures can assign a.
//! different instruction to the same id, each architecture typically has its own instructions indexed from 1.
//!
//! Instruction identifiers listed by architecture:
//!
//! - \ref x86::Inst (X86 and X86_64)
//! - \ref a64::Inst (AArch64)
using InstId = uint32_t;
//! Instruction id parts.
//!
//! A mask that specifies a bit-layout of \ref InstId.
enum class InstIdParts : uint32_t {
// Common Masks
// ------------
//! Real id without any modifiers (always 16 least significant bits).
kRealId = 0x0000FFFFu,
//! Instruction is abstract (or virtual, IR, etc...).
kAbstract = 0x80000000u,
// ARM Specific
// ------------
//! AArch32 first data type, used by ASIMD instructions (`inst.dt.dt2`).
kA32_DT = 0x000F0000u,
//! AArch32 second data type, used by ASIMD instructions (`inst.dt.dt2`).
kA32_DT2 = 0x00F00000u,
//! AArch32/AArch64 condition code.
kARM_Cond = 0x78000000u
};
//! Instruction options.
//!
//! Instruction options complement instruction identifier and attributes.
enum class InstOptions : uint32_t {
//! No options.
kNone = 0,
//! Used internally by emitters for handling errors and rare cases.
kReserved = 0x00000001u,
//! Prevents following a jump during compilation (Compiler).
kUnfollow = 0x00000002u,
//! Overwrite the destination operand(s) (Compiler).
//!
//! Hint that is important for register liveness analysis. It tells the compiler that the destination operand will
//! be overwritten now or by adjacent instructions. Compiler knows when a register is completely overwritten by a
//! single instruction, for example you don't have to mark "movaps" or "pxor x, x", however, if a pair of
//! instructions is used and the first of them doesn't completely overwrite the content of the destination,
//! Compiler fails to mark that register as dead.
//!
//! X86 Specific
//! ------------
//!
//! - All instructions that always overwrite at least the size of the register the virtual-register uses, for
//! example "mov", "movq", "movaps" don't need the overwrite option to be used - conversion, shuffle, and
//! other miscellaneous instructions included.
//!
//! - All instructions that clear the destination register if all operands are the same, for example "xor x, x",
//! "pcmpeqb x x", etc...
//!
//! - Consecutive instructions that partially overwrite the variable until there is no old content require
//! `BaseCompiler::overwrite()` to be used. Some examples (not always the best use cases thought):
//!
//! - `movlps xmm0, ?` followed by `movhps xmm0, ?` and vice versa
//! - `movlpd xmm0, ?` followed by `movhpd xmm0, ?` and vice versa
//! - `mov al, ?` followed by `and ax, 0xFF`
//! - `mov al, ?` followed by `mov ah, al`
//! - `pinsrq xmm0, ?, 0` followed by `pinsrq xmm0, ?, 1`
//!
//! - If the allocated virtual register is used temporarily for scalar operations. For example if you allocate a
//! full vector like `x86::Compiler::newXmm()` and then use that vector for scalar operations you should use
//! `overwrite()` directive:
//!
//! - `sqrtss x, y` - only LO element of `x` is changed, if you don't
//! use HI elements, use `compiler.overwrite().sqrtss(x, y)`.
kOverwrite = 0x00000004u,
//! Emit short-form of the instruction.
kShortForm = 0x00000010u,
//! Emit long-form of the instruction.
kLongForm = 0x00000020u,
//! Conditional jump is likely to be taken.
kTaken = 0x00000040u,
//! Conditional jump is unlikely to be taken.
kNotTaken = 0x00000080u,
// X86 & X64 Options
// -----------------
//! Use ModMR instead of ModRM if applicable.
kX86_ModMR = 0x00000100u,
//! Use ModRM instead of ModMR if applicable.
kX86_ModRM = 0x00000200u,
//! Use 3-byte VEX prefix if possible (AVX) (must be 0x00000400).
kX86_Vex3 = 0x00000400u,
//! Use VEX prefix when both VEX|EVEX prefixes are available (HINT: AVX_VNNI).
kX86_Vex = 0x00000800u,
//! Use 4-byte EVEX prefix if possible (AVX-512) (must be 0x00001000).
kX86_Evex = 0x00001000u,
//! LOCK prefix (lock-enabled instructions only).
kX86_Lock = 0x00002000u,
//! REP prefix (string instructions only).
kX86_Rep = 0x00004000u,
//! REPNE prefix (string instructions only).
kX86_Repne = 0x00008000u,
//! XACQUIRE prefix (only allowed instructions).
kX86_XAcquire = 0x00010000u,
//! XRELEASE prefix (only allowed instructions).
kX86_XRelease = 0x00020000u,
//! AVX-512: embedded-rounding {er} and implicit {sae}.
kX86_ER = 0x00040000u,
//! AVX-512: suppress-all-exceptions {sae}.
kX86_SAE = 0x00080000u,
//! AVX-512: round-to-nearest (even) {rn-sae} (bits 00).
kX86_RN_SAE = 0x00000000u,
//! AVX-512: round-down (toward -inf) {rd-sae} (bits 01).
kX86_RD_SAE = 0x00200000u,
//! AVX-512: round-up (toward +inf) {ru-sae} (bits 10).
kX86_RU_SAE = 0x00400000u,
//! AVX-512: round-toward-zero (truncate) {rz-sae} (bits 11).
kX86_RZ_SAE = 0x00600000u,
//! AVX-512: Use zeroing {k}{z} instead of merging {k}.
kX86_ZMask = 0x00800000u,
//! AVX-512: Mask to get embedded rounding bits (2 bits).
kX86_ERMask = kX86_RZ_SAE,
//! AVX-512: Mask of all possible AVX-512 options except EVEX prefix flag.
kX86_AVX512Mask = 0x00FC0000u,
//! Force REX.B and/or VEX.B field (X64 only).
kX86_OpCodeB = 0x01000000u,
//! Force REX.X and/or VEX.X field (X64 only).
kX86_OpCodeX = 0x02000000u,
//! Force REX.R and/or VEX.R field (X64 only).
kX86_OpCodeR = 0x04000000u,
//! Force REX.W and/or VEX.W field (X64 only).
kX86_OpCodeW = 0x08000000u,
//! Force REX prefix (X64 only).
kX86_Rex = 0x40000000u,
//! Invalid REX prefix (set by X86 or when AH|BH|CH|DH regs are used on X64).
kX86_InvalidRex = 0x80000000u
};
ASMJIT_DEFINE_ENUM_FLAGS(InstOptions)
//! Instruction control flow.
enum class InstControlFlow : uint32_t {
//! Regular instruction.
kRegular = 0u,
//! Unconditional jump.
kJump = 1u,
//! Conditional jump (branch).
kBranch = 2u,
//! Function call.
kCall = 3u,
//! Function return.
kReturn = 4u,
//! Maximum value of `InstType`.
kMaxValue = kReturn
};
//! Hint that is used when both input operands to the instruction are the same.
//!
//! Provides hints to the instruction RW query regarding special cases in which two or more operands are the same
//! registers. This is required by instructions such as XOR, AND, OR, SUB, etc... These hints will influence the
//! RW operations query.
enum class InstSameRegHint : uint8_t {
//! No special handling.
kNone = 0,
//! Operands become read-only, the operation doesn't change the content - `X & X` and similar.
kRO = 1,
//! Operands become write-only, the content of the input(s) don't matter - `X ^ X`, `X - X`, and similar.
kWO = 2
};
//! Options that can be used when converting instruction IDs to strings.
enum class InstStringifyOptions : uint32_t {
//! No options.
kNone = 0x00000000u,
//! Stringify a full instruction name with known aliases.
//!
//! This option is designed for architectures where instruction aliases are common, for example X86, and where
//! multiple aliases can be used in assembly code to distinguish between intention - for example instructions
//! such as JZ and JE are the same, but the first is used in a context of equality to zero, and the second is
//! used when two values equal (for example JE next to CMP).
kAliases = 0x00000001u
};
ASMJIT_DEFINE_ENUM_FLAGS(InstStringifyOptions)
//! Instruction id, options, and extraReg in a single structure. This structure exists mainly to simplify analysis
//! and validation API that requires `BaseInst` and `Operand[]` array.
class BaseInst {
public:
//! \name Members
//! \{
//! Instruction id with modifiers.
InstId _id;
//! Instruction options.
InstOptions _options;
//! Extra register used by the instruction (either REP register or AVX-512 selector).
RegOnly _extraReg;
enum Id : uint32_t {
//! Invalid or uninitialized instruction id.
kIdNone = 0x00000000u,
//! Abstract instruction (BaseBuilder and BaseCompiler).
kIdAbstract = 0x80000000u
};
//! \}
//! \name Construction & Destruction
//! \{
//! Creates a new BaseInst instance with `id` and `options` set.
//!
//! Default values of `id` and `options` are zero, which means 'none' instruction. Such instruction is guaranteed
//! to never exist for any architecture supported by AsmJit.
ASMJIT_INLINE_NODEBUG explicit BaseInst(InstId instId = 0, InstOptions options = InstOptions::kNone) noexcept
: _id(instId),
_options(options),
_extraReg() {}
ASMJIT_INLINE_NODEBUG BaseInst(InstId instId, InstOptions options, const RegOnly& extraReg) noexcept
: _id(instId),
_options(options),
_extraReg(extraReg) {}
ASMJIT_INLINE_NODEBUG BaseInst(InstId instId, InstOptions options, const Reg& extraReg) noexcept
: _id(instId),
_options(options),
_extraReg { extraReg.signature(), extraReg.id() } {}
//! \}
//! \name Instruction id and modifiers
//! \{
//! Returns the instruction id with modifiers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstId id() const noexcept { return _id; }
//! Sets the instruction id and modiiers from `id`.
ASMJIT_INLINE_NODEBUG void setId(InstId id) noexcept { _id = id; }
//! Resets the instruction id and modifiers to zero, see \ref kIdNone.
ASMJIT_INLINE_NODEBUG void resetId() noexcept { _id = 0; }
//! Returns a real instruction id that doesn't contain any modifiers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstId realId() const noexcept { return _id & uint32_t(InstIdParts::kRealId); }
template<InstIdParts kPart>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t getInstIdPart() const noexcept {
return (uint32_t(_id) & uint32_t(kPart)) >> Support::ConstCTZ<uint32_t(kPart)>::value;
}
template<InstIdParts kPart>
ASMJIT_INLINE_NODEBUG void setInstIdPart(uint32_t value) noexcept {
_id = (_id & ~uint32_t(kPart)) | (value << Support::ConstCTZ<uint32_t(kPart)>::value);
}
//! \}
//! \name Instruction Options
//! \{
//! Returns instruction options associated with this instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstOptions options() const noexcept { return _options; }
//! Tests whether the given instruction `option` is enabled.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOption(InstOptions option) const noexcept { return Support::test(_options, option); }
//! Replaces all instruction options by the given `options`.
ASMJIT_INLINE_NODEBUG void setOptions(InstOptions options) noexcept { _options = options; }
//! Adds instruction options provided by `options`.
ASMJIT_INLINE_NODEBUG void addOptions(InstOptions options) noexcept { _options |= options; }
//! Clears instruction options provided by `options`.
ASMJIT_INLINE_NODEBUG void clearOptions(InstOptions options) noexcept { _options &= ~options; }
//! Resets all instruction options to `InstOptions::kNone` (there will be no instruction options active after reset).
ASMJIT_INLINE_NODEBUG void resetOptions() noexcept { _options = InstOptions::kNone; }
//! \}
//! \name Extra Register
//! \{
//! Tests whether the instruction has associated an extra register.
//!
//! \note Extra registers are currently only used on X86 by AVX-512 masking such as `{k}` and `{k}{z}` and by repeated
//! instructions to explicitly assign a virtual register that would be ECX/RCX.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasExtraReg() const noexcept { return _extraReg.isReg(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegOnly& extraReg() noexcept { return _extraReg; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RegOnly& extraReg() const noexcept { return _extraReg; }
ASMJIT_INLINE_NODEBUG void setExtraReg(const Reg& reg) noexcept { _extraReg.init(reg); }
ASMJIT_INLINE_NODEBUG void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); }
ASMJIT_INLINE_NODEBUG void resetExtraReg() noexcept { _extraReg.reset(); }
//! \}
//! \name ARM Specific
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG arm::CondCode armCondCode() const noexcept { return (arm::CondCode)getInstIdPart<InstIdParts::kARM_Cond>(); }
ASMJIT_INLINE_NODEBUG void setArmCondCode(arm::CondCode cc) noexcept { setInstIdPart<InstIdParts::kARM_Cond>(uint32_t(cc)); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG a32::DataType armDt() const noexcept { return (a32::DataType)getInstIdPart<InstIdParts::kA32_DT>(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG a32::DataType armDt2() const noexcept { return (a32::DataType)getInstIdPart<InstIdParts::kA32_DT2>(); }
//! \}
//! \name Statics
//! \{
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR InstId composeARMInstId(uint32_t id, arm::CondCode cc) noexcept {
return id | (uint32_t(cc) << Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
}
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR InstId composeARMInstId(uint32_t id, a32::DataType dt, arm::CondCode cc = arm::CondCode::kAL) noexcept {
return id | (uint32_t(dt) << Support::ConstCTZ<uint32_t(InstIdParts::kA32_DT)>::value)
| (uint32_t(cc) << Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
}
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR InstId composeARMInstId(uint32_t id, a32::DataType dt, a32::DataType dt2, arm::CondCode cc = arm::CondCode::kAL) noexcept {
return id | (uint32_t(dt) << Support::ConstCTZ<uint32_t(InstIdParts::kA32_DT)>::value)
| (uint32_t(dt2) << Support::ConstCTZ<uint32_t(InstIdParts::kA32_DT2)>::value)
| (uint32_t(cc) << Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
}
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR InstId extractRealId(uint32_t id) noexcept {
return id & uint32_t(InstIdParts::kRealId);
}
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR arm::CondCode extractARMCondCode(uint32_t id) noexcept {
return (arm::CondCode)((uint32_t(id) & uint32_t(InstIdParts::kARM_Cond)) >> Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
}
//! \}
};
//! CPU read/write flags used by \ref InstRWInfo.
//!
//! These flags can be used to get a basic overview about CPU specifics flags used by instructions.
enum class CpuRWFlags : uint32_t {
//! No flags.
kNone = 0x00000000u,
// Common RW Flags (0x000000FF)
// ----------------------------
//! Signed overflow flag.
kOF = 0x00000001u,
//! Carry flag.
kCF = 0x00000002u,
//! Zero and/or equality flag (1 if zero/equal).
kZF = 0x00000004u,
//! Sign flag (negative/sign, if set).
kSF = 0x00000008u,
// X86 Specific RW Flags
// ----------------------------------
//! Carry flag (X86|X86_64).
kX86_CF = kCF,
//! Overflow flag (X86|X86_64).
kX86_OF = kOF,
//! Sign flag (X86|X86_64).
kX86_SF = kSF,
//! Zero flag (X86|X86_64).
kX86_ZF = kZF,
//! Adjust flag (X86|X86_64).
kX86_AF = 0x00000100u,
//! Parity flag (X86|X86_64).
kX86_PF = 0x00000200u,
//! Direction flag (X86|X86_64).
kX86_DF = 0x00000400u,
//! Interrupt enable flag (X86|X86_64).
kX86_IF = 0x00000800u,
//! Alignment check flag (X86|X86_64).
kX86_AC = 0x00001000u,
//! FPU C0 status flag (X86|X86_64).
kX86_C0 = 0x00010000u,
//! FPU C1 status flag (X86|X86_64).
kX86_C1 = 0x00020000u,
//! FPU C2 status flag (X86|X86_64).
kX86_C2 = 0x00040000u,
//! FPU C3 status flag (X86|X86_64).
kX86_C3 = 0x00080000u,
// ARM Specific RW Flags
// ----------------------------------
kARM_V = kOF,
kARM_C = kCF,
kARM_Z = kZF,
kARM_N = kSF,
kARM_Q = 0x00000100u,
kARM_GE = 0x00000200u
};
ASMJIT_DEFINE_ENUM_FLAGS(CpuRWFlags)
//! Operand read/write flags describe how the operand is accessed and some additional features.
enum class OpRWFlags : uint32_t {
//! No flags.
kNone = 0,
//! Operand is read.
kRead = 0x00000001u,
//! Operand is written.
kWrite = 0x00000002u,
//! Operand is both read and written.
kRW = 0x00000003u,
//! Register operand can be replaced by a memory operand.
kRegMem = 0x00000004u,
//! The register must be allocated to the index of the previous register + 1.
//!
//! This flag is used by all architectures to describe instructions that use consecutive registers, where only the
//! first one is encoded in the instruction, and the others are just a sequence that starts with the first one. On
//! X86|X86_64 architecture this is used by instructions such as VP2INTERSECTD and VP2INTERSECTQ. On ARM/AArch64
//! this is used by vector load and store instructions that can load or store multiple registers at once.
kConsecutive = 0x00000008u,
//! The `extendByteMask()` represents a zero extension.
kZExt = 0x00000010u,
//! The register must have assigned a unique physical ID, which cannot be assigned to any other register.
kUnique = 0x00000080u,
//! Register operand must use \ref OpRWInfo::physId().
kRegPhysId = 0x00000100u,
//! Base register of a memory operand must use \ref OpRWInfo::physId().
kMemPhysId = 0x00000200u,
//! This memory operand is only used to encode registers and doesn't access memory.
//!
//! X86 Specific
//! ------------
//!
//! Instructions that use such feature include BNDLDX, BNDSTX, and LEA.
kMemFake = 0x000000400u,
//! Base register of the memory operand will be read.
kMemBaseRead = 0x00001000u,
//! Base register of the memory operand will be written.
kMemBaseWrite = 0x00002000u,
//! Base register of the memory operand will be read & written.
kMemBaseRW = 0x00003000u,
//! Index register of the memory operand will be read.
kMemIndexRead = 0x00004000u,
//! Index register of the memory operand will be written.
kMemIndexWrite = 0x00008000u,
//! Index register of the memory operand will be read & written.
kMemIndexRW = 0x0000C000u,
//! Base register of the memory operand will be modified before the operation.
kMemBasePreModify = 0x00010000u,
//! Base register of the memory operand will be modified after the operation.
kMemBasePostModify = 0x00020000u
};
ASMJIT_DEFINE_ENUM_FLAGS(OpRWFlags)
// Don't remove these asserts. Read/Write flags are used extensively
// by Compiler and they must always be compatible with constants below.
static_assert(uint32_t(OpRWFlags::kRead) == 0x1, "OpRWFlags::kRead flag must be 0x1");
static_assert(uint32_t(OpRWFlags::kWrite) == 0x2, "OpRWFlags::kWrite flag must be 0x2");
static_assert(uint32_t(OpRWFlags::kRegMem) == 0x4, "OpRWFlags::kRegMem flag must be 0x4");
//! Read/Write information related to a single operand, used by \ref InstRWInfo.
struct OpRWInfo {
//! \name Members
//! \{
//! Read/Write flags.
OpRWFlags _opFlags;
//! Physical register index, if required.
uint8_t _physId;
//! Size of a possible memory operand that can replace a register operand.
uint8_t _rmSize;
//! If non-zero, then this is a consecutive lead register, and the value describes how many registers follow.
uint8_t _consecutiveLeadCount;
//! Reserved for future use.
uint8_t _reserved[1];
//! Read bit-mask where each bit represents one byte read from Reg/Mem.
uint64_t _readByteMask;
//! Write bit-mask where each bit represents one byte written to Reg/Mem.
uint64_t _writeByteMask;
//! Zero/Sign extend bit-mask where each bit represents one byte written to Reg/Mem.
uint64_t _extendByteMask;
//! \}
//! \name Reset
//! \{
//! Resets this operand information to all zeros.
ASMJIT_INLINE_NODEBUG void reset() noexcept { *this = OpRWInfo{}; }
//! Resets this operand info (resets all members) and set common information
//! to the given `opFlags`, `regSize`, and possibly `physId`.
inline void reset(OpRWFlags opFlags, uint32_t regSize, uint32_t physId = Reg::kIdBad) noexcept {
_opFlags = opFlags;
_physId = uint8_t(physId);
_rmSize = Support::test(opFlags, OpRWFlags::kRegMem) ? uint8_t(regSize) : uint8_t(0);
_consecutiveLeadCount = 0;
_resetReserved();
uint64_t mask = Support::lsbMask<uint64_t>(Support::min<uint32_t>(regSize, 64));
_readByteMask = Support::test(opFlags, OpRWFlags::kRead) ? mask : uint64_t(0);
_writeByteMask = Support::test(opFlags, OpRWFlags::kWrite) ? mask : uint64_t(0);
_extendByteMask = 0;
}
ASMJIT_INLINE_NODEBUG void _resetReserved() noexcept {
_reserved[0] = 0;
}
//! \}
//! \name Operand Flags
//! \{
//! Returns operand flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG OpRWFlags opFlags() const noexcept { return _opFlags; }
//! Tests whether operand flags contain the given `flag`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOpFlag(OpRWFlags flag) const noexcept { return Support::test(_opFlags, flag); }
//! Adds the given `flags` to operand flags.
ASMJIT_INLINE_NODEBUG void addOpFlags(OpRWFlags flags) noexcept { _opFlags |= flags; }
//! Removes the given `flags` from operand flags.
ASMJIT_INLINE_NODEBUG void clearOpFlags(OpRWFlags flags) noexcept { _opFlags &= ~flags; }
//! Tests whether this operand is read from.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isRead() const noexcept { return hasOpFlag(OpRWFlags::kRead); }
//! Tests whether this operand is written to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isWrite() const noexcept { return hasOpFlag(OpRWFlags::kWrite); }
//! Tests whether this operand is both read and write.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isReadWrite() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kRW; }
//! Tests whether this operand is read only.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isReadOnly() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kRead; }
//! Tests whether this operand is write only.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kWrite; }
//! Returns the type of a lead register, which is followed by consecutive registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t consecutiveLeadCount() const noexcept { return _consecutiveLeadCount; }
//! Tests whether this operand is Reg/Mem
//!
//! Reg/Mem operands can use either register or memory.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isRm() const noexcept { return hasOpFlag(OpRWFlags::kRegMem); }
//! Tests whether the operand will be zero extended.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isZExt() const noexcept { return hasOpFlag(OpRWFlags::kZExt); }
//! Tests whether the operand must have allocated a unique physical id that cannot be shared with other register
//! operands.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isUnique() const noexcept { return hasOpFlag(OpRWFlags::kUnique); }
//! \}
//! \name Memory Flags
//! \{
//! Tests whether this is a fake memory operand, which is only used, because of encoding. Fake memory operands do
//! not access any memory, they are only used to encode registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemFake() const noexcept { return hasOpFlag(OpRWFlags::kMemFake); }
//! Tests whether the instruction's memory BASE register is used.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseUsed() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseRW); }
//! Tests whether the instruction reads from its BASE registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseRead() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseRead); }
//! Tests whether the instruction writes to its BASE registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseWrite() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseWrite); }
//! Tests whether the instruction reads and writes from/to its BASE registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseReadWrite() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseRW; }
//! Tests whether the instruction only reads from its BASE registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseReadOnly() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseRead; }
//! Tests whether the instruction only writes to its BASE registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseWrite; }
//! Tests whether the instruction modifies the BASE register before it uses it to calculate the target address.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBasePreModify() const noexcept { return hasOpFlag(OpRWFlags::kMemBasePreModify); }
//! Tests whether the instruction modifies the BASE register after it uses it to calculate the target address.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBasePostModify() const noexcept { return hasOpFlag(OpRWFlags::kMemBasePostModify); }
//! Tests whether the instruction's memory INDEX register is used.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexUsed() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexRW); }
//! Tests whether the instruction reads the INDEX registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexRead() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexRead); }
//! Tests whether the instruction writes to its INDEX registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexWrite() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexWrite); }
//! Tests whether the instruction reads and writes from/to its INDEX registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexReadWrite() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexRW; }
//! Tests whether the instruction only reads from its INDEX registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexReadOnly() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexRead; }
//! Tests whether the instruction only writes to its INDEX registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexWrite; }
//! \}
//! \name Physical Register ID
//! \{
//! Returns a physical id of the register that is fixed for this operand.
//!
//! Returns \ref Reg::kIdBad if any register can be used.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t physId() const noexcept { return _physId; }
//! Tests whether \ref physId() would return a valid physical register id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasPhysId() const noexcept { return _physId != Reg::kIdBad; }
//! Sets physical register id, which would be fixed for this operand.
ASMJIT_INLINE_NODEBUG void setPhysId(uint32_t physId) noexcept { _physId = uint8_t(physId); }
//! \}
//! \name Reg/Mem Information
//! \{
//! Returns Reg/Mem size of the operand.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t rmSize() const noexcept { return _rmSize; }
//! Sets Reg/Mem size of the operand.
ASMJIT_INLINE_NODEBUG void setRmSize(uint32_t rmSize) noexcept { _rmSize = uint8_t(rmSize); }
//! \}
//! \name Read & Write Masks
//! \{
//! Returns read mask.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t readByteMask() const noexcept { return _readByteMask; }
//! Sets read mask.
ASMJIT_INLINE_NODEBUG void setReadByteMask(uint64_t mask) noexcept { _readByteMask = mask; }
//! Returns write mask.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t writeByteMask() const noexcept { return _writeByteMask; }
//! Sets write mask.
ASMJIT_INLINE_NODEBUG void setWriteByteMask(uint64_t mask) noexcept { _writeByteMask = mask; }
//! Returns extend mask.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t extendByteMask() const noexcept { return _extendByteMask; }
//! Sets extend mask.
ASMJIT_INLINE_NODEBUG void setExtendByteMask(uint64_t mask) noexcept { _extendByteMask = mask; }
//! \}
};
//! Flags used by \ref InstRWInfo.
enum class InstRWFlags : uint32_t {
//! No flags.
kNone = 0x00000000u,
//! Describes a move operation.
//!
//! This flag is used by RA to eliminate moves that are guaranteed to be moves only.
kMovOp = 0x00000001u
};
ASMJIT_DEFINE_ENUM_FLAGS(InstRWFlags)
//! Read/Write information of an instruction.
struct InstRWInfo {
//! \name Members
//! \{
//! Instruction flags (there are no flags at the moment, this field is reserved).
InstRWFlags _instFlags;
//! CPU flags read.
CpuRWFlags _readFlags;
//! CPU flags written.
CpuRWFlags _writeFlags;
//! Count of operands.
uint8_t _opCount;
//! CPU feature required for replacing register operand with memory operand.
uint8_t _rmFeature;
//! Reserved for future use.
uint8_t _reserved[18];
//! Read/Write info of extra register (rep{} or kz{}).
OpRWInfo _extraReg;
//! Read/Write info of instruction operands.
OpRWInfo _operands[Globals::kMaxOpCount];
//! \}
//! \name Commons
//! \{
//! Resets this RW information to all zeros.
ASMJIT_INLINE_NODEBUG void reset() noexcept { *this = InstRWInfo{}; }
//! \}
//! \name Instruction Flags
//! \{
//! Returns flags associated with the instruction, see \ref InstRWFlags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstRWFlags instFlags() const noexcept { return _instFlags; }
//! Tests whether the instruction flags contain `flag`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInstFlag(InstRWFlags flag) const noexcept { return Support::test(_instFlags, flag); }
//! Tests whether the instruction flags contain \ref InstRWFlags::kMovOp.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMovOp() const noexcept { return hasInstFlag(InstRWFlags::kMovOp); }
//! \}
//! \name CPU Flags Information
//! \{
//! Returns a mask of CPU flags read.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CpuRWFlags readFlags() const noexcept { return _readFlags; }
//! Returns a mask of CPU flags written.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CpuRWFlags writeFlags() const noexcept { return _writeFlags; }
//! \}
//! \name Reg/Mem Information
//! \{
//! Returns the CPU feature required to replace a register operand with memory operand. If the returned feature is
//! zero (none) then this instruction either doesn't provide memory operand combination or there is no extra CPU
//! feature required.
//!
//! X86 Specific
//! ------------
//!
//! Some AVX+ instructions may require extra features for replacing registers with memory operands, for example
//! VPSLLDQ instruction only supports `vpslldq reg, reg, imm` combination on AVX/AVX2 capable CPUs and requires
//! AVX-512 for `vpslldq reg, mem, imm` combination.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t rmFeature() const noexcept { return _rmFeature; }
//! \}
//! \name Operand Read/Write Information
//! \{
//! Returns RW information of extra register operand (extraReg).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const OpRWInfo& extraReg() const noexcept { return _extraReg; }
//! Returns RW information of all instruction's operands.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const OpRWInfo* operands() const noexcept { return _operands; }
//! Returns RW information of the operand at the given `index`.
[[nodiscard]]
inline const OpRWInfo& operand(size_t index) const noexcept {
ASMJIT_ASSERT(index < Globals::kMaxOpCount);
return _operands[index];
}
//! Returns the number of operands this instruction has.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t opCount() const noexcept { return _opCount; }
//! \}
};
//! Validation flags that can be used with \ref InstAPI::validate().
enum class ValidationFlags : uint8_t {
//! No flags.
kNone = 0,
//! Allow virtual registers in the instruction.
kEnableVirtRegs = 0x01u
};
ASMJIT_DEFINE_ENUM_FLAGS(ValidationFlags)
//! Instruction API.
namespace InstAPI {
#ifndef ASMJIT_NO_TEXT
//! Appends the name of the instruction specified by `instId` and `options` into the `output` string.
//!
//! \note Instruction options would only affect instruction prefix & suffix, other options would be ignored.
//! If `instOptions` is zero then only raw instruction name (without any additional text) will be appended.
ASMJIT_API Error instIdToString(Arch arch, InstId instId, InstStringifyOptions options, String& output) noexcept;
[[deprecated("Use `instIdToString()` with `InstStringifyOptions` parameter")]]
static inline Error instIdToString(Arch arch, InstId instId, String& output) noexcept {
return instIdToString(arch, instId, InstStringifyOptions::kNone, output);
}
//! Parses an instruction name in the given string `s`. Length is specified by `len` argument, which can be
//! `SIZE_MAX` if `s` is known to be null terminated.
//!
//! Returns the parsed instruction id or \ref BaseInst::kIdNone if no such instruction exists.
[[nodiscard]]
ASMJIT_API InstId stringToInstId(Arch arch, const char* s, size_t len) noexcept;
#endif // !ASMJIT_NO_TEXT
#ifndef ASMJIT_NO_VALIDATION
//! Validates the given instruction considering the given `validationFlags`.
[[nodiscard]]
ASMJIT_API Error validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags = ValidationFlags::kNone) noexcept;
#endif // !ASMJIT_NO_VALIDATION
#ifndef ASMJIT_NO_INTROSPECTION
//! Gets Read/Write information of the given instruction.
ASMJIT_API Error queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept;
//! Gets CPU features required by the given instruction.
ASMJIT_API Error queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, CpuFeatures* out) noexcept;
#endif // !ASMJIT_NO_INTROSPECTION
} // {InstAPI}
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_INST_H_INCLUDED

View File

@@ -1,142 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/instdb_p.h"
ASMJIT_BEGIN_NAMESPACE
namespace InstNameUtils {
static constexpr uint32_t kBufferSize = 32;
static ASMJIT_INLINE_CONSTEXPR char decode5BitChar(uint32_t c) noexcept {
uint32_t base = c <= 26 ? uint32_t('a') - 1u : uint32_t('0') - 27u;
return char(base + c);
}
static ASMJIT_INLINE size_t decodeToBuffer(char nameOut[kBufferSize], uint32_t nameValue, InstStringifyOptions options, const char* stringTable) noexcept {
size_t i;
if (nameValue & 0x80000000u) {
// Small string of 5-bit characters.
//
// NOTE: Small string optimization never provides additional
// aliases formatting, so we don't have to consider `options`.
for (i = 0; i < 6; i++, nameValue >>= 5) {
uint32_t c = nameValue & 0x1F;
if (c == 0)
break;
nameOut[i] = decode5BitChar(c);
}
return i;
}
else {
size_t prefixBase = nameValue & 0xFFFu;
size_t prefixSize = (nameValue >> 12) & 0xFu;
size_t suffixBase = (nameValue >> 16) & 0xFFFu;
size_t suffixSize = (nameValue >> 28) & 0x7u;
if (Support::test(options, InstStringifyOptions::kAliases) && suffixBase == 0xFFFu) {
// Alias formatting immediately follows the instruction name in string table.
// The first character specifies the length and then string data follows.
prefixBase += prefixSize;
prefixSize = uint8_t(stringTable[prefixBase]);
ASMJIT_ASSERT(prefixSize <= kBufferSize);
prefixBase += 1; // Skip the byte that specifies the length of a formatted alias.
}
for (i = 0; i < prefixSize; i++) {
nameOut[i] = stringTable[prefixBase + i];
}
char* suffixOut = nameOut + prefixSize;
for (i = 0; i < suffixSize; i++) {
suffixOut[i] = stringTable[suffixBase + i];
}
return prefixSize + suffixSize;
}
}
Error decode(uint32_t nameValue, InstStringifyOptions options, const char* stringTable, String& output) noexcept {
char nameData[kBufferSize];
size_t nameSize = decodeToBuffer(nameData, nameValue, options, stringTable);
return output.append(nameData, nameSize);
}
InstId findInstruction(const char* s, size_t len, const uint32_t* nameTable, const char* stringTable, const InstNameIndex& nameIndex) noexcept {
ASMJIT_ASSERT(s != nullptr);
ASMJIT_ASSERT(len > 0u);
uint32_t prefix = uint32_t(s[0]) - uint32_t('a');
if (ASMJIT_UNLIKELY(prefix > uint32_t('z') - uint32_t('a'))) {
return BaseInst::kIdNone;
}
size_t base = nameIndex.data[prefix].start;
size_t end = nameIndex.data[prefix].end;
if (ASMJIT_UNLIKELY(!base)) {
return BaseInst::kIdNone;
}
char nameData[kBufferSize];
for (size_t lim = end - base; lim != 0; lim >>= 1) {
size_t instId = base + (lim >> 1);
size_t nameSize = decodeToBuffer(nameData, nameTable[instId], InstStringifyOptions::kNone, stringTable);
int result = Support::compareStringViews(s, len, nameData, nameSize);
if (result < 0) {
continue;
}
if (result > 0) {
base = instId + 1;
lim--;
continue;
}
return InstId(instId);
}
return BaseInst::kIdNone;
}
uint32_t findAlias(const char* s, size_t len, const uint32_t* nameTable, const char* stringTable, uint32_t aliasNameCount) noexcept {
ASMJIT_ASSERT(s != nullptr);
ASMJIT_ASSERT(len > 0u);
size_t base = 0;
char nameData[kBufferSize];
for (size_t lim = size_t(aliasNameCount) - base; lim != 0; lim >>= 1) {
size_t index = base + (lim >> 1);
size_t nameSize = decodeToBuffer(nameData, nameTable[index], InstStringifyOptions::kNone, stringTable);
int result = Support::compareStringViews(s, len, nameData, nameSize);
if (result < 0) {
continue;
}
if (result > 0) {
base = index + 1;
lim--;
continue;
}
return uint32_t(index);
}
return Globals::kInvalidId;
}
} // {InstNameUtils}
ASMJIT_END_NAMESPACE

View File

@@ -1,41 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_INSTDB_P_H_INCLUDED
#define ASMJIT_CORE_INSTDB_P_H_INCLUDED
#include "../core/inst.h"
#include "../core/string.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_instruction_db
//! \{
struct InstNameIndex {
struct Span {
uint16_t start;
uint16_t end;
};
Span data[26];
uint16_t maxNameLength;
};
namespace InstNameUtils {
Error decode(uint32_t nameValue, InstStringifyOptions options, const char* stringTable, String& output) noexcept;
InstId findInstruction(const char* s, size_t len, const uint32_t* nameTable, const char* stringTable, const InstNameIndex& nameIndex) noexcept;
uint32_t findAlias(const char* s, size_t len, const uint32_t* nameTable, const char* stringTable, uint32_t aliasNameCount) noexcept;
} // {InstNameUtils}
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_INSTDB_P_H_INCLUDED

File diff suppressed because it is too large Load Diff

View File

@@ -1,576 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_JITALLOCATOR_H_INCLUDED
#define ASMJIT_CORE_JITALLOCATOR_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_JIT
#include "../core/globals.h"
#include "../core/support.h"
#include "../core/virtmem.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_virtual_memory
//! \{
//! Options used by \ref JitAllocator.
enum class JitAllocatorOptions : uint32_t {
//! No options.
kNone = 0,
//! Enables the use of an anonymous memory-mapped memory that is mapped into two buffers having a different pointer.
//! The first buffer has read and execute permissions and the second buffer has read+write permissions.
//!
//! See \ref VirtMem::allocDualMapping() for more details about this feature.
//!
//! \remarks Dual mapping would be automatically turned on by \ref JitAllocator in case of hardened runtime that
//! enforces `W^X` policy, so specifying this flag is essentially forcing to use dual mapped pages even when RWX
//! pages can be allocated and dual mapping is not necessary.
kUseDualMapping = 0x00000001u,
//! Enables the use of multiple pools with increasing granularity instead of a single pool. This flag would enable
//! 3 internal pools in total having 64, 128, and 256 bytes granularity.
//!
//! This feature is only recommended for users that generate a lot of code and would like to minimize the overhead
//! of `JitAllocator` itself by having blocks of different allocation granularities. Using this feature only for
//! few allocations won't pay off as the allocator may need to create more blocks initially before it can take the
//! advantage of variable block granularity.
kUseMultiplePools = 0x00000002u,
//! Always fill reserved memory by a fill-pattern.
//!
//! Causes a new block to be cleared by the fill pattern and freshly released memory to be cleared before making
//! it ready for another use.
kFillUnusedMemory = 0x00000004u,
//! When this flag is set the allocator would immediately release unused blocks during `release()` or `reset()`.
//! When this flag is not set the allocator would keep one empty block in each pool to prevent excessive virtual
//! memory allocations and deallocations in border cases, which involve constantly allocating and deallocating a
//! single block caused by repetitive calling `alloc()` and `release()` when the allocator has either no blocks
//! or have all blocks fully occupied.
kImmediateRelease = 0x00000008u,
//! This flag enables placing functions (or allocating memory) at the very beginning of each memory mapped region.
//!
//! Initially, this was the default behavior. However, LLVM developers working on undefined behavior sanitizer
//! (UBSAN) decided that they want to store metadata before each function and to access such metadata before an
//! indirect function call. This means that the instrumented code always reads from `[fnPtr - 8]` to decode whether
//! the function has his metadata present. However, reading 8 bytes below a function means that if a function is
//! placed at the very beginning of a memory mapped region, it could try to read bytes that are inaccessible. And
//! since AsmJit can be compiled as a shared library and used by applications instrumented by UBSAN, it's not
//! possible to conditionally compile the support only when necessary.
//!
//! \remarks This flag controls a workaround to make it possible to use LLVM UBSAN with AsmJit's \ref JitAllocator.
//! There is no undefined behavior even when `kDisableInitialPadding` is used, however, that doesn't really matter
//! as LLVM's UBSAN introduces one, and according to LLVM developers it's a "trade-off". This flag is safe to use
//! when the code is not instrumented with LLVM's UBSAN.
kDisableInitialPadding = 0x00000010u,
//! Enables the use of large pages, if they are supported and the process can actually allocate them.
//!
//! \remarks This flag is a hint - if large pages can be allocated, JitAllocator would try to allocate them.
//! However, if the allocation fails, it will still try to fallback to use regular pages as \ref JitAllocator
//! is designed to minimize allocation failures, so a regular page is better than no page at all. Also, if a
//! block \ref JitAllocator wants to allocate is too small to consume a whole large page, regular page(s) will
//! be allocated as well.
kUseLargePages = 0x00000020u,
//! Forces \ref JitAllocator to always align block size to be at least as big as a large page, if large pages are
//! enabled. This option does nothing if large pages are disabled.
//!
//! \remarks If \ref kUseLargePages option is used, the allocator would prefer large pages only when allocating a
//! block that has a sufficient size. Usually the allocator first allocates smaller block and when more requests
//! come it will start increasing the block size of next allocations. This option makes it sure that even the first
//! allocation would be the same as a minimum large page when large pages are enabled and can be allocated.
kAlignBlockSizeToLargePage = 0x00000040u,
//! Use a custom fill pattern, must be combined with `kFlagFillUnusedMemory`.
kCustomFillPattern = 0x10000000u
};
ASMJIT_DEFINE_ENUM_FLAGS(JitAllocatorOptions)
//! A simple implementation of memory manager that uses `asmjit::VirtMem`
//! functions to manage virtual memory for JIT compiled code.
//!
//! Implementation notes:
//!
//! - Granularity of allocated blocks is different than granularity for a typical C malloc. In addition, the allocator
//! can use several memory pools having a different granularity to minimize the maintenance overhead. Multiple pools
//! feature requires `kFlagUseMultiplePools` flag to be set.
//!
//! - The allocator doesn't store any information in executable memory, instead, the implementation uses two
//! bit-vectors to manage allocated memory of each allocator-block. The first bit-vector called 'used' is used to
//! track used memory (where each bit represents memory size defined by granularity) and the second bit vector called
//! 'stop' is used as a sentinel to mark where the allocated area ends.
//!
//! - Internally, the allocator also uses RB tree to keep track of all blocks across all pools. Each inserted block is
//! added to the tree so it can be matched fast during `release()` and `shrink()`.
class JitAllocator {
public:
ASMJIT_NONCOPYABLE(JitAllocator)
//! Visible \ref JitAllocator implementation data.
struct Impl {
//! Allocator options.
JitAllocatorOptions options;
//! Base block size (0 if the allocator is not initialized).
uint32_t blockSize;
//! Base granularity (0 if the allocator is not initialized).
uint32_t granularity;
//! A pattern that is used to fill unused memory if secure mode is enabled.
uint32_t fillPattern;
};
//! \name Members
//! \{
//! Allocator implementation (private).
Impl* _impl;
//! \}
//! \name Construction & Destruction
//! \{
//! Parameters that can be passed to `JitAllocator` constructor.
//!
//! Use it like this:
//!
//! ```
//! // Zero initialize (zero means the default value) and change what you need.
//! JitAllocator::CreateParams params {};
//! params.blockSize = 1024 * 1024;
//!
//! // Create the allocator.
//! JitAllocator allocator(&params);
//! ```
struct CreateParams {
//! Allocator options.
//!
//! No options are used by default.
JitAllocatorOptions options = JitAllocatorOptions::kNone;
//! Base size of a single block in bytes (default 64kB).
//!
//! \remarks Block size must be equal to or greater than page size and must be power of 2. If the input is not
//! valid then the default block size will be used instead.
uint32_t blockSize = 0;
//! Base granularity (and also natural alignment) of allocations in bytes (default 64).
//!
//! Since the `JitAllocator` uses bit-arrays to mark used memory the granularity also specifies how many bytes
//! correspond to a single bit in such bit-array. Higher granularity means more waste of virtual memory (as it
//! increases the natural alignment), but smaller bit-arrays as less bits would be required per a single block.
uint32_t granularity = 0;
//! Patter to use to fill unused memory.
//!
//! Only used if \ref JitAllocatorOptions::kCustomFillPattern is set.
uint32_t fillPattern = 0;
// Reset the content of `CreateParams`.
ASMJIT_INLINE_NODEBUG void reset() noexcept { *this = CreateParams{}; }
};
//! Creates a `JitAllocator` instance.
ASMJIT_API explicit JitAllocator(const CreateParams* params = nullptr) noexcept;
//! Destroys the `JitAllocator` instance and release all blocks held.
ASMJIT_API ~JitAllocator() noexcept;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _impl->blockSize == 0; }
//! Free all allocated memory - makes all pointers returned by `alloc()` invalid.
//!
//! \remarks This function is not thread-safe as it's designed to be used when nobody else is using allocator.
//! The reason is that there is no point of calling `reset()` when the allocator is still in use.
ASMJIT_API void reset(ResetPolicy resetPolicy = ResetPolicy::kSoft) noexcept;
//! \}
//! \name Accessors
//! \{
//! Returns allocator options, see `Flags`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG JitAllocatorOptions options() const noexcept { return _impl->options; }
//! Tests whether the allocator has the given `option` set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOption(JitAllocatorOptions option) const noexcept { return uint32_t(_impl->options & option) != 0; }
//! Returns a base block size (a minimum size of block that the allocator would allocate).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t blockSize() const noexcept { return _impl->blockSize; }
//! Returns granularity of the allocator.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t granularity() const noexcept { return _impl->granularity; }
//! Returns pattern that is used to fill unused memory if `kFlagUseFillPattern` is set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t fillPattern() const noexcept { return _impl->fillPattern; }
//! \}
//! \name Alloc & Release
//! \{
//! A memory reference returned by \ref JitAllocator::alloc().
//!
//! Span contains everything needed to actually write new code to the memory chunk it references.
class Span {
public:
//! \name Constants
//! \{
//! Span flags
enum class Flags : uint32_t {
//! No flags.
kNone = 0u,
//! The process has never executed the region of the span.
//!
//! If this flag is set on a \ref Span it would mean that the allocator can avoid flushing
//! instruction cache after a code has been written to it.
kInstructionCacheClean = 0x00000001u
};
//! \}
//! \name Members
//! \{
//! Address of memory that has Read and Execute permissions.
void* _rx = nullptr;
//! Address of memory that has Read and Write permissions.
void* _rw = nullptr;
//! Size of the span in bytes (rounded up to the allocation granularity).
size_t _size = 0;
//! Pointer that references a memory block maintained by \ref JitAllocator.
//!
//! This pointer is considered private and should never be used nor inspected outside of AsmJit.
void* _block = nullptr;
//! Span flags.
Flags _flags = Flags::kNone;
//! Reserved for future use.
uint32_t _reserved = 0;
//! \}
//! \name Accessors
//! \{
//! Returns a pointer having Read & Execute permissions (references executable memory).
//!
//! This pointer is never NULL if the allocation succeeded, it points to an executable memory.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG void* rx() const noexcept { return _rx; }
//! Returns a pointer having Read & Write permissions (references writable memory).
//!
//! Depending on the type of the allocation strategy this could either be:
//!
//! - the same address as returned by `rx()` if the allocator uses RWX mapping (pages have all of Read, Write,
//! and Execute permissions) or MAP_JIT, which requires either \ref VirtMem::ProtectJitReadWriteScope or to
//! call \ref VirtMem::protectJitMemory() manually.
//! - a valid pointer, but not the same as `rx` - this would be valid if dual mapping is used.
//! - NULL pointer, in case that the allocation strategy doesn't use RWX, MAP_JIT, or dual mapping. In this
//! case only \ref JitAllocator can copy new code into the executable memory referenced by \ref Span.
//!
//! \note If `rw()` returns a non-null pointer it's important to use either VirtMem::protectJitMemory() or
//! \ref VirtMem::ProtectJitReadWriteScope to guard the write, because in case of `MAP_JIT` it would temporarily
//! switch the permissions of the pointer to RW (that's per thread permissions).
//!
//! If \ref VirtMem::ProtectJitReadWriteScope is not used it's important to clear the instruction cache via
//! \ref VirtMem::flushInstructionCache() after the write is done.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG void* rw() const noexcept { return _rw; }
//! Returns size of this span, aligned to the allocator granularity.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
//! Returns span flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Flags flags() const noexcept { return _flags; }
//! Shrinks this span to `newSize`.
//!
//! \note This is the only function that is able to change the size of a span, and it's only use case is to
//! shrink the span size during \ref JitAllocator::write(). When the writer detects that the span size shrunk,
//! it will automatically shrink the memory used by the span, and propagate the new aligned size to the caller.
ASMJIT_INLINE_NODEBUG void shrink(size_t newSize) noexcept { _size = Support::min(_size, newSize); }
//! Returns whether \ref rw() returns a non-null pointer.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isDirectlyWritable() const noexcept { return _rw != nullptr; }
//! \}
};
//! Allocates a new memory span of the requested `size`.
[[nodiscard]]
ASMJIT_API Error alloc(Span& out, size_t size) noexcept;
//! Releases a memory block returned by `alloc()`.
//!
//! \remarks This function is thread-safe.
ASMJIT_API Error release(void* rx) noexcept;
//! Frees extra memory allocated with `rx` by shrinking it to the given `newSize`.
//!
//! \remarks This function is thread-safe.
ASMJIT_API Error shrink(Span& span, size_t newSize) noexcept;
//! Queries information about an allocated memory block that contains the given `rx`, and writes it to `out`.
//!
//! If the pointer is matched, the function returns `kErrorOk` and fills `out` with the corresponding span.
[[nodiscard]]
ASMJIT_API Error query(Span& out, void* rx) const noexcept;
//! \}
//! \name Write Operations
//! \{
using WriteFunc = Error (ASMJIT_CDECL*)(Span& span, void* userData) noexcept;
ASMJIT_API Error write(
Span& span,
size_t offset,
const void* src,
size_t size,
VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept;
ASMJIT_API Error write(
Span& span,
WriteFunc writeFunc,
void* userData,
VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept;
template<class Lambda>
ASMJIT_INLINE Error write(
Span& span,
Lambda&& lambdaFunc,
VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept {
WriteFunc wrapperFunc = [](Span& span, void* userData) noexcept -> Error {
Lambda& lambdaFunc = *static_cast<Lambda*>(userData);
return lambdaFunc(span);
};
return write(span, wrapperFunc, (void*)(&lambdaFunc), policy);
}
//! \}
//! \name Write Operations with Scope
//! \{
//! \cond INTERNAL
//! Write scope data.
//!
//! This is mostly for internal purposes, please use \ref WriteScope instead.
struct WriteScopeData {
//! \name Members
//! \{
//! Link to the allocator.
JitAllocator* _allocator;
//! Cache policy passed to \ref JitAllocator::beginWriteScope().
VirtMem::CachePolicy _policy;
//! Internal flags used by the implementation.
uint32_t _flags;
//! Internal data used by the implementation.
size_t _data[64];
//! \}
};
//! Begins a write `scope`.
//!
//! This is mostly for internal purposes, please use \ref WriteScope constructor instead.
ASMJIT_API Error beginWriteScope(WriteScopeData& scope, VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept;
//! Ends a write `scope`.
//!
//! This is mostly for internal purposes, please use \ref WriteScope destructor instead.
ASMJIT_API Error endWriteScope(WriteScopeData& scope) noexcept;
//! Flushes accumulated changes in a write `scope`.
//!
//! This is mostly for internal purposes, please use \ref WriteScope destructor or \ref WriteScope::flush() instead.
ASMJIT_API Error flushWriteScope(WriteScopeData& scope) noexcept;
//! Alternative to `JitAllocator::write(span, offset, src, size)`, but under a write `scope`.
//!
//! This is mostly for internal purposes, please use \ref WriteScope::write() instead.
ASMJIT_API Error scopedWrite(WriteScopeData& scope, Span& span, size_t offset, const void* src, size_t size) noexcept;
//! Alternative to `JitAllocator::write(span, writeFunc, userData)`, but under a write `scope`.
//!
//! This is mostly for internal purposes, please use \ref WriteScope::write() instead.
ASMJIT_API Error scopedWrite(WriteScopeData& scope, Span& span, WriteFunc writeFunc, void* userData) noexcept;
//! Alternative to `JitAllocator::write(span, [lambda])`, but under a write `scope`.
//!
//! This is mostly for internal purposes, please use \ref WriteScope::write() instead.
template<class Lambda>
inline Error scopedWrite(WriteScopeData& scope, Span& span, Lambda&& lambdaFunc) noexcept {
WriteFunc wrapperFunc = [](Span& span, void* userData) noexcept -> Error {
Lambda& lambdaFunc = *static_cast<Lambda*>(userData);
return lambdaFunc(span);
};
return scopedWrite(scope, span, wrapperFunc, (void*)(&lambdaFunc));
}
//! \endcond
//! Write scope can be used to create a single scope that is optimized for writing multiple spans.
class WriteScope : public WriteScopeData {
public:
ASMJIT_NONCOPYABLE(WriteScope)
//! \name Construction & Destruction
//! \{
// Begins a write scope.
inline explicit WriteScope(JitAllocator* allocator, VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept {
allocator->beginWriteScope(*this, policy);
}
// Ends a write scope.
inline ~WriteScope() noexcept {
if (_allocator)
_allocator->endWriteScope(*this);
}
//! \}
//! \name Accessors
//! \{
//! Returns \ref JitAllocator associated with this write scope.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG JitAllocator* allocator() const noexcept { return _allocator; }
//! Returns cache policy this write scope is using.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VirtMem::CachePolicy policy() const noexcept { return _policy; }
//! \}
//! \name Operations
//! \{
//! Similar to `JitAllocator::write(span, offset, src, size)`, but under a write scope.
ASMJIT_INLINE_NODEBUG Error write(Span& span, size_t offset, const void* src, size_t size) noexcept {
return _allocator->scopedWrite(*this, span, offset, src, size);
}
//! Similar to `JitAllocator::write(span, writeFunc, userData)`, but under a write scope.
ASMJIT_INLINE_NODEBUG Error write(Span& span, WriteFunc writeFunc, void* userData) noexcept {
return _allocator->scopedWrite(*this, span, writeFunc, userData);
}
//! Similar to `JitAllocator::write(span, <lambda>)`, but under a write scope.
template<class Lambda>
ASMJIT_INLINE_NODEBUG Error write(Span& span, Lambda&& lambdaFunc) noexcept {
return _allocator->scopedWrite(*this, span, lambdaFunc);
}
//! Flushes accumulated changes in this write scope.
ASMJIT_INLINE_NODEBUG Error flush() noexcept {
return _allocator->flushWriteScope(*this);
}
//! \}
};
//! \}
//! \name Statistics
//! \{
//! Statistics about `JitAllocator`.
struct Statistics {
//! Number of blocks `JitAllocator` maintains.
size_t _blockCount;
//! Number of active allocations.
size_t _allocationCount;
//! How many bytes are currently used / allocated.
size_t _usedSize;
//! How many bytes are currently reserved by the allocator.
size_t _reservedSize;
//! Allocation overhead (in bytes) required to maintain all blocks.
size_t _overheadSize;
//! Resets the statistics to all zeros.
ASMJIT_INLINE_NODEBUG void reset() noexcept { *this = Statistics{}; }
//! Returns count of blocks managed by `JitAllocator` at the moment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t blockCount() const noexcept { return _blockCount; }
//! Returns the number of active allocations.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t allocationCount() const noexcept { return _allocationCount; }
//! Returns how many bytes are currently used.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t usedSize() const noexcept { return _usedSize; }
//! Returns the number of bytes unused by the allocator at the moment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t unusedSize() const noexcept { return _reservedSize - _usedSize; }
//! Returns the total number of bytes reserved by the allocator (sum of sizes of all blocks).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t reservedSize() const noexcept { return _reservedSize; }
//! Returns the number of bytes the allocator needs to manage the allocated memory.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t overheadSize() const noexcept { return _overheadSize; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG double usedSizeAsPercent() const noexcept {
return (double(usedSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG double unusedSizeAsPercent() const noexcept {
return (double(unusedSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG double overheadSizeAsPercent() const noexcept {
return (double(overheadSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
}
};
//! Returns JIT allocator statistics.
//!
//! \remarks This function is thread-safe.
[[nodiscard]]
ASMJIT_API Statistics statistics() const noexcept;
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif
#endif

View File

@@ -1,83 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_JIT
#include "../core/cpuinfo.h"
#include "../core/jitruntime.h"
ASMJIT_BEGIN_NAMESPACE
JitRuntime::JitRuntime(const JitAllocator::CreateParams* params) noexcept
: _allocator(params) {
_environment = Environment::host();
_environment.setObjectFormat(ObjectFormat::kJIT);
_cpuFeatures = CpuInfo::host().features();
}
JitRuntime::~JitRuntime() noexcept {}
Error JitRuntime::_add(void** dst, CodeHolder* code) noexcept {
*dst = nullptr;
ASMJIT_PROPAGATE(code->flatten());
ASMJIT_PROPAGATE(code->resolveCrossSectionFixups());
size_t estimatedCodeSize = code->codeSize();
if (ASMJIT_UNLIKELY(estimatedCodeSize == 0)) {
return DebugUtils::errored(kErrorNoCodeGenerated);
}
JitAllocator::Span span;
ASMJIT_PROPAGATE(_allocator.alloc(span, estimatedCodeSize));
// Relocate the code.
CodeHolder::RelocationSummary relocationSummary;
Error err = code->relocateToBase(uintptr_t(span.rx()), &relocationSummary);
if (ASMJIT_UNLIKELY(err)) {
_allocator.release(span.rx());
return err;
}
// Recalculate the final code size and shrink the memory we allocated for it
// in case that some relocations didn't require records in an address table.
size_t codeSize = estimatedCodeSize - relocationSummary.codeSizeReduction;
// If not true it means that `relocateToBase()` filled wrong information in `relocationSummary`.
ASMJIT_ASSERT(codeSize == code->codeSize());
_allocator.write(span, [&](JitAllocator::Span& span) noexcept -> Error {
uint8_t* rw = static_cast<uint8_t*>(span.rw());
for (Section* section : code->_sections) {
size_t offset = size_t(section->offset());
size_t bufferSize = size_t(section->bufferSize());
size_t virtualSize = size_t(section->virtualSize());
ASMJIT_ASSERT(offset + bufferSize <= span.size());
memcpy(rw + offset, section->data(), bufferSize);
if (virtualSize > bufferSize) {
ASMJIT_ASSERT(offset + virtualSize <= span.size());
memset(rw + offset + bufferSize, 0, virtualSize - bufferSize);
}
}
span.shrink(codeSize);
return kErrorOk;
});
*dst = span.rx();
return kErrorOk;
}
Error JitRuntime::_release(void* p) noexcept {
return _allocator.release(p);
}
ASMJIT_END_NAMESPACE
#endif

View File

@@ -1,107 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_JITRUNTIME_H_INCLUDED
#define ASMJIT_CORE_JITRUNTIME_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_JIT
#include "../core/codeholder.h"
#include "../core/jitallocator.h"
#include "../core/target.h"
ASMJIT_BEGIN_NAMESPACE
class CodeHolder;
//! \addtogroup asmjit_virtual_memory
//! \{
//! JIT execution runtime is a special `Target` that is designed to store and execute a generated code.
//!
//! JIT runtime is the easiest way of using AsmJit as it abstracts allocation and deallocation of virtual memory
//! where executable code can be placed and from which it can be executed as well.
class ASMJIT_VIRTAPI JitRuntime : public Target {
public:
ASMJIT_NONCOPYABLE(JitRuntime)
//! Virtual memory allocator.
JitAllocator _allocator;
//! \name Construction & Destruction
//! \{
//! Creates a `JitRuntime` instance.
ASMJIT_API explicit JitRuntime(const JitAllocator::CreateParams* params = nullptr) noexcept;
//! Creates a `JitRuntime` instance.
ASMJIT_INLINE explicit JitRuntime(const JitAllocator::CreateParams& params) noexcept
: JitRuntime(&params) {}
//! Destroys the `JitRuntime` instance.
ASMJIT_API ~JitRuntime() noexcept override;
//! \}
//! \name Accessors
//! \{
//! Resets the \ref JitRuntime, freeing everything that was allocated by it.
//!
//! Depending on `resetPolicy` the currently held memory can be either freed entirely when ResetPolicy::kHard is used,
//! or the allocator can keep some of it for next allocations when ResetPolicy::kSoft is used, which is the default
//! behavior.
ASMJIT_INLINE_NODEBUG void reset(ResetPolicy resetPolicy = ResetPolicy::kSoft) noexcept {
_allocator.reset(resetPolicy);
}
//! \}
//! \name Accessors
//! \{
//! Returns the associated `JitAllocator`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG JitAllocator* allocator() const noexcept { return const_cast<JitAllocator*>(&_allocator); }
//! \}
//! \name Utilities
//! \{
// NOTE: To allow passing function pointers to `add()` and `release()` the
// virtual methods are prefixed with `_` and called from templates instead.
//! Allocates memory needed for a code stored in the `CodeHolder` and relocates the code to the pointer allocated.
//!
//! The beginning of the memory allocated for the function is returned in `dst`. If failed `Error` code is returned
//! and `dst` is explicitly set to `nullptr` (this means that you don't have to set it to null before calling `add()`).
template<typename Func>
ASMJIT_INLINE_NODEBUG Error add(Func* dst, CodeHolder* code) noexcept {
return _add(Support::ptr_cast_impl<void**, Func*>(dst), code);
}
//! Releases `p` which was obtained by calling `add()`.
template<typename Func>
ASMJIT_INLINE_NODEBUG Error release(Func p) noexcept {
return _release(Support::ptr_cast_impl<void*, Func>(p));
}
//! Type-unsafe version of `add()`.
ASMJIT_API virtual Error _add(void** dst, CodeHolder* code) noexcept;
//! Type-unsafe version of `release()`.
ASMJIT_API virtual Error _release(void* p) noexcept;
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif
#endif

View File

@@ -1,79 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_LOGGING
#include "../core/logger.h"
#include "../core/string.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// Logger - Implementation
// =======================
Logger::Logger() noexcept
: _options() {}
Logger::~Logger() noexcept {}
// [[pure virtual]]
Error Logger::_log(const char* data, size_t size) noexcept {
DebugUtils::unused(data, size);
// Do not error in this case - the logger would just sink to /dev/null.
return kErrorOk;
}
Error Logger::logf(const char* fmt, ...) noexcept {
Error err;
va_list ap;
va_start(ap, fmt);
err = logv(fmt, ap);
va_end(ap);
return err;
}
Error Logger::logv(const char* fmt, va_list ap) noexcept {
StringTmp<2048> sb;
ASMJIT_PROPAGATE(sb.appendVFormat(fmt, ap));
return log(sb);
}
// FileLogger - Implementation
// ===========================
FileLogger::FileLogger(FILE* file) noexcept
: _file(file) {}
FileLogger::~FileLogger() noexcept {}
Error FileLogger::_log(const char* data, size_t size) noexcept {
if (!_file) {
return kErrorOk;
}
if (size == SIZE_MAX) {
size = strlen(data);
}
fwrite(data, 1, size, _file);
return kErrorOk;
}
// StringLogger - Implementation
// =============================
StringLogger::StringLogger() noexcept {}
StringLogger::~StringLogger() noexcept {}
Error StringLogger::_log(const char* data, size_t size) noexcept {
return _content.append(data, size);
}
ASMJIT_END_NAMESPACE
#endif

View File

@@ -1,222 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_LOGGING_H_INCLUDED
#define ASMJIT_CORE_LOGGING_H_INCLUDED
#include "../core/inst.h"
#include "../core/string.h"
#include "../core/formatter.h"
#ifndef ASMJIT_NO_LOGGING
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_logging
//! \{
//! Logging interface.
//!
//! This class can be inherited and reimplemented to fit into your own logging needs. When reimplementing a logger
//! use \ref Logger::_log() method to log customize the output.
//!
//! There are two `Logger` implementations offered by AsmJit:
//! - \ref FileLogger - logs into a `FILE*`.
//! - \ref StringLogger - concatenates all logs into a \ref String.
class ASMJIT_VIRTAPI Logger {
public:
ASMJIT_BASE_CLASS(Logger)
ASMJIT_NONCOPYABLE(Logger)
//! Format options.
FormatOptions _options;
//! \name Construction & Destruction
//! \{
//! Creates a `Logger` instance.
ASMJIT_API Logger() noexcept;
//! Destroys the `Logger` instance.
ASMJIT_API virtual ~Logger() noexcept;
//! \}
//! \name Format Options
//! \{
//! Returns \ref FormatOptions of this logger.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FormatOptions& options() noexcept { return _options; }
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FormatOptions& options() const noexcept { return _options; }
//! Sets formatting options of this Logger to `options`.
ASMJIT_INLINE_NODEBUG void setOptions(const FormatOptions& options) noexcept { _options = options; }
//! Resets formatting options of this Logger to defaults.
ASMJIT_INLINE_NODEBUG void resetOptions() noexcept { _options.reset(); }
//! Returns formatting flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FormatFlags flags() const noexcept { return _options.flags(); }
//! Tests whether the logger has the given `flag` enabled.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(FormatFlags flag) const noexcept { return _options.hasFlag(flag); }
//! Sets formatting flags to `flags`.
ASMJIT_INLINE_NODEBUG void setFlags(FormatFlags flags) noexcept { _options.setFlags(flags); }
//! Enables the given formatting `flags`.
ASMJIT_INLINE_NODEBUG void addFlags(FormatFlags flags) noexcept { _options.addFlags(flags); }
//! Disables the given formatting `flags`.
ASMJIT_INLINE_NODEBUG void clearFlags(FormatFlags flags) noexcept { _options.clearFlags(flags); }
//! Returns indentation of a given indentation `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t indentation(FormatIndentationGroup type) const noexcept { return _options.indentation(type); }
//! Sets indentation of the given indentation `group` to `n` spaces.
ASMJIT_INLINE_NODEBUG void setIndentation(FormatIndentationGroup type, uint32_t n) noexcept { _options.setIndentation(type, n); }
//! Resets indentation of the given indentation `group` to 0 spaces.
ASMJIT_INLINE_NODEBUG void resetIndentation(FormatIndentationGroup type) noexcept { _options.resetIndentation(type); }
//! Returns padding of a given padding `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t padding(FormatPaddingGroup type) const noexcept { return _options.padding(type); }
//! Sets padding of a given padding `group` to `n`.
ASMJIT_INLINE_NODEBUG void setPadding(FormatPaddingGroup type, uint32_t n) noexcept { _options.setPadding(type, n); }
//! Resets padding of a given padding `group` to 0, which means that a default will be used.
ASMJIT_INLINE_NODEBUG void resetPadding(FormatPaddingGroup type) noexcept { _options.resetPadding(type); }
//! \}
//! \name Logging Interface
//! \{
//! Logs `str` - must be reimplemented.
//!
//! The function can accept either a null terminated string if `size` is `SIZE_MAX` or a non-null terminated
//! string of the given `size`. The function cannot assume that the data is null terminated and must handle
//! non-null terminated inputs.
ASMJIT_API virtual Error _log(const char* data, size_t size) noexcept;
//! Logs string `str`, which is either null terminated or having size `size`.
ASMJIT_INLINE_NODEBUG Error log(const char* data, size_t size = SIZE_MAX) noexcept { return _log(data, size); }
//! Logs content of a string `str`.
ASMJIT_INLINE_NODEBUG Error log(const String& str) noexcept { return _log(str.data(), str.size()); }
//! Formats the message by using `snprintf()` and then passes the formatted string to \ref _log().
ASMJIT_API Error logf(const char* fmt, ...) noexcept;
//! Formats the message by using `vsnprintf()` and then passes the formatted string to \ref _log().
ASMJIT_API Error logv(const char* fmt, va_list ap) noexcept;
//! \}
};
//! Logger that can log to a `FILE*`.
class ASMJIT_VIRTAPI FileLogger : public Logger {
public:
ASMJIT_NONCOPYABLE(FileLogger)
FILE* _file;
//! \name Construction & Destruction
//! \{
//! Creates a new `FileLogger` that logs to `FILE*`.
ASMJIT_API FileLogger(FILE* file = nullptr) noexcept;
//! Destroys the `FileLogger`.
ASMJIT_API ~FileLogger() noexcept override;
//! \}
//! \name Accessors
//! \{
//! Returns the logging output stream or null if the logger has no output stream.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FILE* file() const noexcept { return _file; }
//! Sets the logging output stream to `stream` or null.
//!
//! \note If the `file` is null the logging will be disabled. When a logger is attached to `CodeHolder` or any
//! emitter the logging API will always be called regardless of the output file. This means that if you really
//! want to disable logging at emitter level you must not attach a logger to it.
ASMJIT_INLINE_NODEBUG void setFile(FILE* file) noexcept { _file = file; }
//! \}
ASMJIT_API Error _log(const char* data, size_t size = SIZE_MAX) noexcept override;
};
//! Logger that stores everything in an internal string buffer.
class ASMJIT_VIRTAPI StringLogger : public Logger {
public:
ASMJIT_NONCOPYABLE(StringLogger)
//! Logger data as string.
String _content;
//! \name Construction & Destruction
//! \{
//! Create new `StringLogger`.
ASMJIT_API StringLogger() noexcept;
//! Destroys the `StringLogger`.
ASMJIT_API ~StringLogger() noexcept override;
//! \}
//! \name Logger Data Accessors
//! \{
//! Returns the content of the logger as \ref String.
//!
//! It can be moved, if desired.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG String& content() noexcept { return _content; }
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const String& content() const noexcept { return _content; }
//! Returns aggregated logger data as `char*` pointer.
//!
//! The pointer is owned by `StringLogger`, it can't be modified or freed.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* data() const noexcept { return _content.data(); }
//! Returns size of the data returned by `data()`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t dataSize() const noexcept { return _content.size(); }
//! \}
//! \name Logger Data Manipulation
//! \{
//! Clears the accumulated logger data.
ASMJIT_INLINE_NODEBUG void clear() noexcept { _content.clear(); }
//! \}
ASMJIT_API Error _log(const char* data, size_t size = SIZE_MAX) noexcept override;
};
//! \}
ASMJIT_END_NAMESPACE
#endif
#endif // ASMJIT_CORE_LOGGER_H_INCLUDED

View File

@@ -1,33 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_MISC_P_H_INCLUDED
#define ASMJIT_CORE_MISC_P_H_INCLUDED
#include "../core/api-config.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_utilities
//! \{
#define ASMJIT_LOOKUP_TABLE_4(T, I) T((I)), T((I+1)), T((I+2)), T((I+3))
#define ASMJIT_LOOKUP_TABLE_8(T, I) ASMJIT_LOOKUP_TABLE_4(T, I), ASMJIT_LOOKUP_TABLE_4(T, I + 4)
#define ASMJIT_LOOKUP_TABLE_16(T, I) ASMJIT_LOOKUP_TABLE_8(T, I), ASMJIT_LOOKUP_TABLE_8(T, I + 8)
#define ASMJIT_LOOKUP_TABLE_32(T, I) ASMJIT_LOOKUP_TABLE_16(T, I), ASMJIT_LOOKUP_TABLE_16(T, I + 16)
#define ASMJIT_LOOKUP_TABLE_40(T, I) ASMJIT_LOOKUP_TABLE_16(T, I), ASMJIT_LOOKUP_TABLE_16(T, I + 16), ASMJIT_LOOKUP_TABLE_8(T, I + 32)
#define ASMJIT_LOOKUP_TABLE_64(T, I) ASMJIT_LOOKUP_TABLE_32(T, I), ASMJIT_LOOKUP_TABLE_32(T, I + 32)
#define ASMJIT_LOOKUP_TABLE_128(T, I) ASMJIT_LOOKUP_TABLE_64(T, I), ASMJIT_LOOKUP_TABLE_64(T, I + 64)
#define ASMJIT_LOOKUP_TABLE_256(T, I) ASMJIT_LOOKUP_TABLE_128(T, I), ASMJIT_LOOKUP_TABLE_128(T, I + 128)
#define ASMJIT_LOOKUP_TABLE_512(T, I) ASMJIT_LOOKUP_TABLE_256(T, I), ASMJIT_LOOKUP_TABLE_256(T, I + 256)
#define ASMJIT_LOOKUP_TABLE_1024(T, I) ASMJIT_LOOKUP_TABLE_512(T, I), ASMJIT_LOOKUP_TABLE_512(T, I + 512)
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_MISC_P_H_INCLUDED

View File

@@ -1,132 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/operand.h"
ASMJIT_BEGIN_NAMESPACE
// Operand - Tests
// ===============
#if defined(ASMJIT_TEST)
enum class StrongEnumForImmTests : uint32_t {
kValue0,
kValue0xFFFFFFFF = 0xFFFFFFFFu
};
UNIT(operand) {
INFO("Checking operand sizes");
EXPECT_EQ(sizeof(Operand), 16u);
EXPECT_EQ(sizeof(Reg), 16u);
EXPECT_EQ(sizeof(BaseMem), 16u);
EXPECT_EQ(sizeof(Imm), 16u);
EXPECT_EQ(sizeof(Label), 16u);
INFO("Checking basic functionality of Operand");
Operand a, b;
Operand dummy;
EXPECT_TRUE(a.isNone());
EXPECT_FALSE(a.isReg());
EXPECT_FALSE(a.isMem());
EXPECT_FALSE(a.isImm());
EXPECT_FALSE(a.isLabel());
EXPECT_EQ(a, b);
EXPECT_EQ(a._data[0], 0u);
EXPECT_EQ(a._data[1], 0u);
INFO("Checking basic functionality of Label");
Label label;
EXPECT_FALSE(label.isValid());
EXPECT_EQ(label.id(), Globals::kInvalidId);
INFO("Checking basic functionality of Reg");
EXPECT_TRUE(Reg().isReg());
EXPECT_FALSE(Reg().isValid());
EXPECT_EQ(Reg()._data[0], 0u);
EXPECT_EQ(Reg()._data[1], 0u);
EXPECT_FALSE(dummy.as<Reg>().isValid());
// Create some register (not specific to any architecture).
OperandSignature rSig = OperandSignature::fromOpType(OperandType::kReg) |
OperandSignature::fromRegType(RegType::kVec128) |
OperandSignature::fromRegGroup(RegGroup::kVec) |
OperandSignature::fromSize(8);
Reg r1(rSig, 5);
EXPECT_TRUE(r1.isValid());
EXPECT_TRUE(r1.isReg());
EXPECT_TRUE(r1.isReg(RegType::kVec128));
EXPECT_TRUE(r1.isPhysReg());
EXPECT_FALSE(r1.isVirtReg());
EXPECT_EQ(r1.signature(), rSig);
EXPECT_EQ(r1.regType(), RegType::kVec128);
EXPECT_EQ(r1.regGroup(), RegGroup::kVec);
EXPECT_EQ(r1.size(), 8u);
EXPECT_EQ(r1.id(), 5u);
EXPECT_TRUE(r1.isReg(RegType::kVec128, 5)); // RegType and Id.
EXPECT_EQ(r1._data[0], 0u);
EXPECT_EQ(r1._data[1], 0u);
// The same type of register having different id.
Reg r2(r1, 6);
EXPECT_TRUE(r2.isValid());
EXPECT_TRUE(r2.isReg());
EXPECT_TRUE(r2.isReg(RegType::kVec128));
EXPECT_TRUE(r2.isPhysReg());
EXPECT_FALSE(r2.isVirtReg());
EXPECT_EQ(r2.signature(), rSig);
EXPECT_EQ(r2.regType(), r1.regType());
EXPECT_EQ(r2.regGroup(), r1.regGroup());
EXPECT_EQ(r2.size(), r1.size());
EXPECT_EQ(r2.id(), 6u);
EXPECT_TRUE(r2.isReg(RegType::kVec128, 6));
r1.reset();
EXPECT_FALSE(r1.isReg());
EXPECT_FALSE(r1.isValid());
INFO("Checking basic functionality of BaseMem");
BaseMem m;
EXPECT_TRUE(m.isMem());
EXPECT_EQ(m, BaseMem());
EXPECT_FALSE(m.hasBase());
EXPECT_FALSE(m.hasIndex());
EXPECT_FALSE(m.hasOffset());
EXPECT_TRUE(m.isOffset64Bit());
EXPECT_EQ(m.offset(), 0);
m.setOffset(-1);
EXPECT_EQ(m.offsetLo32(), -1);
EXPECT_EQ(m.offset(), -1);
int64_t x = int64_t(0xFF00FF0000000001u);
int32_t xHi = int32_t(0xFF00FF00u);
m.setOffset(x);
EXPECT_EQ(m.offset(), x);
EXPECT_EQ(m.offsetLo32(), 1);
EXPECT_EQ(m.offsetHi32(), xHi);
INFO("Checking basic functionality of Imm");
Imm immValue(-42);
EXPECT_EQ(immValue.type(), ImmType::kInt);
EXPECT_EQ(Imm(-1).value(), -1);
EXPECT_EQ(imm(-1).value(), -1);
EXPECT_EQ(immValue.value(), -42);
EXPECT_EQ(imm(0xFFFFFFFF).value(), int64_t(0xFFFFFFFF));
Imm immDouble(0.4);
EXPECT_EQ(immDouble.type(), ImmType::kDouble);
EXPECT_EQ(immDouble.valueAs<double>(), 0.4);
EXPECT_EQ(immDouble, imm(0.4));
EXPECT_EQ(Imm(StrongEnumForImmTests::kValue0).value(), 0);
EXPECT_EQ(Imm(StrongEnumForImmTests::kValue0xFFFFFFFF).value(), 0xFFFFFFFFu);
}
#endif
ASMJIT_END_NAMESPACE

File diff suppressed because it is too large Load Diff

View File

@@ -1,41 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/osutils_p.h"
#include "../core/support.h"
#if !defined(_WIN32)
#include <fcntl.h>
#include <unistd.h>
#endif
ASMJIT_BEGIN_NAMESPACE
#if !defined(_WIN32)
Error OSUtils::readFile(const char* name, String& dst, size_t maxSize) noexcept {
char* buffer = dst.prepare(String::ModifyOp::kAssign, maxSize);
if (ASMJIT_UNLIKELY(!buffer)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
int fd = ASMJIT_FILE64_API(::open)(name, O_RDONLY);
if (fd < 0) {
dst.clear();
return DebugUtils::errored(kErrorFailedToOpenFile);
}
intptr_t len = ::read(fd, buffer, maxSize);
if (len >= 0) {
buffer[len] = '\0';
dst._setSize(size_t(len));
}
::close(fd);
return kErrorOk;
}
#endif
ASMJIT_END_NAMESPACE

View File

@@ -1,54 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_OSUTILS_H_INCLUDED
#define ASMJIT_CORE_OSUTILS_H_INCLUDED
#include "../core/globals.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_utilities
//! \{
//! \cond INTERNAL
//! Lock.
//!
//! Lock is internal, it cannot be used outside of AsmJit, however, its internal
//! layout is exposed as it's used by some other classes, which are public.
class Lock {
public:
ASMJIT_NONCOPYABLE(Lock)
#if defined(_WIN32)
#pragma pack(push, 8)
struct ASMJIT_MAY_ALIAS Handle {
void* DebugInfo;
long LockCount;
long RecursionCount;
void* OwningThread;
void* LockSemaphore;
unsigned long* SpinCount;
};
Handle _handle;
#pragma pack(pop)
#elif !defined(__EMSCRIPTEN__)
using Handle = pthread_mutex_t;
Handle _handle;
#endif
ASMJIT_INLINE_NODEBUG Lock() noexcept;
ASMJIT_INLINE_NODEBUG ~Lock() noexcept;
ASMJIT_INLINE_NODEBUG void lock() noexcept;
ASMJIT_INLINE_NODEBUG void unlock() noexcept;
};
//! \endcond
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_OSUTILS_H_INCLUDED

View File

@@ -1,78 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_OSUTILS_P_H_INCLUDED
#define ASMJIT_CORE_OSUTILS_P_H_INCLUDED
#include "../core/osutils.h"
#include "../core/string.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_utilities
//! \{
#if defined(_WIN32)
// Windows implementation.
static_assert(sizeof(Lock::Handle) == sizeof(CRITICAL_SECTION), "asmjit::Lock::Handle layout must match CRITICAL_SECTION");
static_assert(alignof(Lock::Handle) == alignof(CRITICAL_SECTION), "asmjit::Lock::Handle alignment must match CRITICAL_SECTION");
ASMJIT_INLINE_NODEBUG Lock::Lock() noexcept { InitializeCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
ASMJIT_INLINE_NODEBUG Lock::~Lock() noexcept { DeleteCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
ASMJIT_INLINE_NODEBUG void Lock::lock() noexcept { EnterCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
ASMJIT_INLINE_NODEBUG void Lock::unlock() noexcept { LeaveCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
#elif !defined(__EMSCRIPTEN__)
// PThread implementation.
#ifdef PTHREAD_MUTEX_INITIALIZER
ASMJIT_INLINE_NODEBUG Lock::Lock() noexcept : _handle(PTHREAD_MUTEX_INITIALIZER) {}
#else
ASMJIT_INLINE_NODEBUG Lock::Lock() noexcept { pthread_mutex_init(&_handle, nullptr); }
#endif
ASMJIT_INLINE_NODEBUG Lock::~Lock() noexcept { pthread_mutex_destroy(&_handle); }
ASMJIT_INLINE_NODEBUG void Lock::lock() noexcept { pthread_mutex_lock(&_handle); }
ASMJIT_INLINE_NODEBUG void Lock::unlock() noexcept { pthread_mutex_unlock(&_handle); }
#else
// Dummy implementation - Emscripten or other unsupported platform.
ASMJIT_INLINE_NODEBUG Lock::Lock() noexcept {}
ASMJIT_INLINE_NODEBUG Lock::~Lock() noexcept {}
ASMJIT_INLINE_NODEBUG void Lock::lock() noexcept {}
ASMJIT_INLINE_NODEBUG void Lock::unlock() noexcept {}
#endif
//! Scoped lock.
class LockGuard {
public:
ASMJIT_NONCOPYABLE(LockGuard)
Lock& _target;
ASMJIT_INLINE_NODEBUG LockGuard(Lock& target) noexcept
: _target(target) { _target.lock(); }
ASMJIT_INLINE_NODEBUG ~LockGuard() noexcept { _target.unlock(); }
};
#if !defined(_WIN32)
namespace OSUtils {
//! Reads a file, only used on non-Windows platforms to access /sys or other files when necessary.
Error readFile(const char* name, String& dst, size_t maxSize) noexcept;
} // {OSUtils}
#endif
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_OSUTILS_P_H_INCLUDED

View File

@@ -1,444 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_RAASSIGNMENT_P_H_INCLUDED
#define ASMJIT_CORE_RAASSIGNMENT_P_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/radefs_p.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_ra
//! \{
//! Holds the current register assignment.
//!
//! Has two purposes:
//!
//! 1. Holds register assignment of a local register allocator (see \ref RALocalAllocator).
//! 2. Holds register assignment of the entry of basic blocks (see \ref RABlock).
class RAAssignment {
public:
ASMJIT_NONCOPYABLE(RAAssignment)
enum Ids : uint32_t {
kPhysNone = 0xFF,
kWorkNone = RAWorkReg::kIdNone
};
enum DirtyBit : uint32_t {
kClean = 0,
kDirty = 1
};
struct Layout {
//! Index of architecture registers per group.
RARegIndex physIndex;
//! Count of architecture registers per group.
RARegCount physCount;
//! Count of physical registers of all groups.
uint32_t physTotal;
//! Count of work registers.
uint32_t workCount;
//! WorkRegs data (vector).
const RAWorkRegs* workRegs;
inline void reset() noexcept {
physIndex.reset();
physCount.reset();
physTotal = 0;
workCount = 0;
workRegs = nullptr;
}
};
struct PhysToWorkMap {
//! Assigned registers (each bit represents one physical reg).
RARegMask assigned;
//! Dirty registers (spill slot out of sync or no spill slot).
RARegMask dirty;
//! PhysReg to WorkReg mapping.
uint32_t workIds[1 /* ... */];
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG size_t sizeOf(size_t count) noexcept {
return Support::alignUp(sizeof(PhysToWorkMap) - sizeof(uint32_t) + count * sizeof(uint32_t), Globals::kZoneAlignment);
}
ASMJIT_INLINE void reset(size_t count) noexcept {
assigned.reset();
dirty.reset();
for (size_t i = 0; i < count; i++) {
workIds[i] = kWorkNone;
}
}
ASMJIT_INLINE void copyFrom(const PhysToWorkMap* other, size_t count) noexcept {
size_t size = sizeOf(count);
memcpy(this, other, size);
}
ASMJIT_INLINE void unassign(RegGroup group, uint32_t physId, uint32_t indexInWorkIds) noexcept {
assigned.clear(group, Support::bitMask(physId));
dirty.clear(group, Support::bitMask(physId));
workIds[indexInWorkIds] = kWorkNone;
}
};
struct WorkToPhysMap {
//! WorkReg to PhysReg mapping
uint8_t physIds[1 /* ... */];
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG size_t sizeOf(size_t count) noexcept {
return Support::alignUp(size_t(count) * sizeof(uint8_t), Globals::kZoneAlignment);
}
ASMJIT_INLINE void reset(size_t count) noexcept {
for (size_t i = 0; i < count; i++) {
physIds[i] = kPhysNone;
}
}
ASMJIT_INLINE void copyFrom(const WorkToPhysMap* other, size_t count) noexcept {
size_t size = sizeOf(count);
if (ASMJIT_LIKELY(size)) {
memcpy(this, other, size);
}
}
};
//! \name Members
//! \{
//! Physical registers layout.
Layout _layout;
//! WorkReg to PhysReg mapping.
WorkToPhysMap* _workToPhysMap;
//! PhysReg to WorkReg mapping and assigned/dirty bits.
PhysToWorkMap* _physToWorkMap;
//! Optimization to translate PhysRegs to WorkRegs faster.
Support::Array<uint32_t*, Globals::kNumVirtGroups> _physToWorkIds;
//! \}
//! \name Construction & Destruction
//! \{
inline RAAssignment() noexcept {
_layout.reset();
resetMaps();
}
ASMJIT_INLINE void initLayout(const RARegCount& physCount, const RAWorkRegs& workRegs) noexcept {
// Layout must be initialized before data.
ASMJIT_ASSERT(_physToWorkMap == nullptr);
ASMJIT_ASSERT(_workToPhysMap == nullptr);
_layout.physIndex.buildIndexes(physCount);
_layout.physCount = physCount;
_layout.physTotal = uint32_t(_layout.physIndex[RegGroup::kMaxVirt]) +
uint32_t(_layout.physCount[RegGroup::kMaxVirt]) ;
_layout.workCount = workRegs.size();
_layout.workRegs = &workRegs;
}
ASMJIT_INLINE void initMaps(PhysToWorkMap* physToWorkMap, WorkToPhysMap* workToPhysMap) noexcept {
_physToWorkMap = physToWorkMap;
_workToPhysMap = workToPhysMap;
for (RegGroup group : RegGroupVirtValues{}) {
_physToWorkIds[group] = physToWorkMap->workIds + _layout.physIndex.get(group);
}
}
ASMJIT_INLINE void resetMaps() noexcept {
_physToWorkMap = nullptr;
_workToPhysMap = nullptr;
_physToWorkIds.fill(nullptr);
}
//! \}
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG PhysToWorkMap* physToWorkMap() const noexcept { return _physToWorkMap; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG WorkToPhysMap* workToPhysMap() const noexcept { return _workToPhysMap; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RARegMask& assigned() noexcept { return _physToWorkMap->assigned; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RARegMask& assigned() const noexcept { return _physToWorkMap->assigned; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t assigned(RegGroup group) const noexcept { return _physToWorkMap->assigned[group]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RARegMask& dirty() noexcept { return _physToWorkMap->dirty; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RARegMask& dirty() const noexcept { return _physToWorkMap->dirty; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask dirty(RegGroup group) const noexcept { return _physToWorkMap->dirty[group]; }
[[nodiscard]]
inline uint32_t workToPhysId(RegGroup group, uint32_t workId) const noexcept {
DebugUtils::unused(group);
ASMJIT_ASSERT(workId != kWorkNone);
ASMJIT_ASSERT(workId < _layout.workCount);
return _workToPhysMap->physIds[workId];
}
[[nodiscard]]
inline uint32_t physToWorkId(RegGroup group, uint32_t physId) const noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
return _physToWorkIds[group][physId];
}
[[nodiscard]]
inline bool isPhysAssigned(RegGroup group, uint32_t physId) const noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
return Support::bitTest(_physToWorkMap->assigned[group], physId);
}
[[nodiscard]]
inline bool isPhysDirty(RegGroup group, uint32_t physId) const noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
return Support::bitTest(_physToWorkMap->dirty[group], physId);
}
//! \}
//! \name Assignment
//!
//! These are low-level allocation helpers that are used to update the current mappings between physical and
//! virt/work registers and also to update masks that represent allocated and dirty registers. These functions
//! don't emit any code; they are only used to update and keep all mappings in sync.
//!
//! \{
//! Assign [VirtReg/WorkReg] to a physical register.
inline void assign(RegGroup group, uint32_t workId, uint32_t physId, bool dirty) noexcept {
ASMJIT_ASSERT(workToPhysId(group, workId) == kPhysNone);
ASMJIT_ASSERT(physToWorkId(group, physId) == kWorkNone);
ASMJIT_ASSERT(!isPhysAssigned(group, physId));
ASMJIT_ASSERT(!isPhysDirty(group, physId));
_workToPhysMap->physIds[workId] = uint8_t(physId);
_physToWorkIds[group][physId] = workId;
RegMask regMask = Support::bitMask(physId);
_physToWorkMap->assigned[group] |= regMask;
_physToWorkMap->dirty[group] |= regMask & Support::bitMaskFromBool<RegMask>(dirty);
verify();
}
//! Reassign [VirtReg/WorkReg] to `dstPhysId` from `srcPhysId`.
inline void reassign(RegGroup group, uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept {
ASMJIT_ASSERT(dstPhysId != srcPhysId);
ASMJIT_ASSERT(workToPhysId(group, workId) == srcPhysId);
ASMJIT_ASSERT(physToWorkId(group, srcPhysId) == workId);
ASMJIT_ASSERT(isPhysAssigned(group, srcPhysId) == true);
ASMJIT_ASSERT(isPhysAssigned(group, dstPhysId) == false);
_workToPhysMap->physIds[workId] = uint8_t(dstPhysId);
_physToWorkIds[group][srcPhysId] = kWorkNone;
_physToWorkIds[group][dstPhysId] = workId;
RegMask srcMask = Support::bitMask(srcPhysId);
RegMask dstMask = Support::bitMask(dstPhysId);
bool dirty = (_physToWorkMap->dirty[group] & srcMask) != 0;
RegMask regMask = dstMask | srcMask;
_physToWorkMap->assigned[group] ^= regMask;
_physToWorkMap->dirty[group] ^= regMask & Support::bitMaskFromBool<RegMask>(dirty);
verify();
}
inline void swap(RegGroup group, uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept {
ASMJIT_ASSERT(aPhysId != bPhysId);
ASMJIT_ASSERT(workToPhysId(group, aWorkId) == aPhysId);
ASMJIT_ASSERT(workToPhysId(group, bWorkId) == bPhysId);
ASMJIT_ASSERT(physToWorkId(group, aPhysId) == aWorkId);
ASMJIT_ASSERT(physToWorkId(group, bPhysId) == bWorkId);
ASMJIT_ASSERT(isPhysAssigned(group, aPhysId));
ASMJIT_ASSERT(isPhysAssigned(group, bPhysId));
_workToPhysMap->physIds[aWorkId] = uint8_t(bPhysId);
_workToPhysMap->physIds[bWorkId] = uint8_t(aPhysId);
_physToWorkIds[group][aPhysId] = bWorkId;
_physToWorkIds[group][bPhysId] = aWorkId;
RegMask aMask = Support::bitMask(aPhysId);
RegMask bMask = Support::bitMask(bPhysId);
RegMask flipMask = Support::bitMaskFromBool<RegMask>(((_physToWorkMap->dirty[group] & aMask) != 0) ^ ((_physToWorkMap->dirty[group] & bMask) != 0));
RegMask regMask = aMask | bMask;
_physToWorkMap->dirty[group] ^= regMask & flipMask;
verify();
}
//! Unassign [VirtReg/WorkReg] from a physical register.
inline void unassign(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
ASMJIT_ASSERT(workToPhysId(group, workId) == physId);
ASMJIT_ASSERT(physToWorkId(group, physId) == workId);
ASMJIT_ASSERT(isPhysAssigned(group, physId));
_workToPhysMap->physIds[workId] = kPhysNone;
_physToWorkIds[group][physId] = kWorkNone;
RegMask regMask = Support::bitMask(physId);
_physToWorkMap->assigned[group] &= ~regMask;
_physToWorkMap->dirty[group] &= ~regMask;
verify();
}
inline void makeClean(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
DebugUtils::unused(workId);
RegMask regMask = Support::bitMask(physId);
_physToWorkMap->dirty[group] &= ~regMask;
}
inline void makeDirty(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
DebugUtils::unused(workId);
RegMask regMask = Support::bitMask(physId);
_physToWorkMap->dirty[group] |= regMask;
}
//! \}
//! \name Utilities
//! \{
ASMJIT_INLINE void swap(RAAssignment& other) noexcept {
std::swap(_workToPhysMap, other._workToPhysMap);
std::swap(_physToWorkMap, other._physToWorkMap);
_physToWorkIds.swap(other._physToWorkIds);
}
inline void assignWorkIdsFromPhysIds() noexcept {
memset(_workToPhysMap, uint8_t(Reg::kIdBad), WorkToPhysMap::sizeOf(_layout.workCount));
for (RegGroup group : RegGroupVirtValues{}) {
uint32_t physBaseIndex = _layout.physIndex[group];
Support::BitWordIterator<RegMask> it(_physToWorkMap->assigned[group]);
while (it.hasNext()) {
uint32_t physId = it.next();
uint32_t workId = _physToWorkMap->workIds[physBaseIndex + physId];
ASMJIT_ASSERT(workId != kWorkNone);
_workToPhysMap->physIds[workId] = uint8_t(physId);
}
}
}
inline void copyFrom(const PhysToWorkMap* physToWorkMap) noexcept {
memcpy(_physToWorkMap, physToWorkMap, PhysToWorkMap::sizeOf(_layout.physTotal));
assignWorkIdsFromPhysIds();
}
inline void copyFrom(const PhysToWorkMap* physToWorkMap, const WorkToPhysMap* workToPhysMap) noexcept {
memcpy(_physToWorkMap, physToWorkMap, PhysToWorkMap::sizeOf(_layout.physTotal));
memcpy(_workToPhysMap, workToPhysMap, WorkToPhysMap::sizeOf(_layout.workCount));
}
inline void copyFrom(const RAAssignment& other) noexcept {
copyFrom(other.physToWorkMap(), other.workToPhysMap());
}
// Not really useful outside of debugging.
[[nodiscard]]
bool equals(const RAAssignment& other) const noexcept {
// Layout should always match.
if (_layout.physIndex != other._layout.physIndex ||
_layout.physCount != other._layout.physCount ||
_layout.physTotal != other._layout.physTotal ||
_layout.workCount != other._layout.workCount ||
_layout.workRegs != other._layout.workRegs)
return false;
uint32_t physTotal = _layout.physTotal;
uint32_t workCount = _layout.workCount;
for (uint32_t physId = 0; physId < physTotal; physId++) {
uint32_t thisWorkId = _physToWorkMap->workIds[physId];
uint32_t otherWorkId = other._physToWorkMap->workIds[physId];
if (thisWorkId != otherWorkId) {
return false;
}
}
for (uint32_t workId = 0; workId < workCount; workId++) {
uint32_t thisPhysId = _workToPhysMap->physIds[workId];
uint32_t otherPhysId = other._workToPhysMap->physIds[workId];
if (thisPhysId != otherPhysId) {
return false;
}
}
if (_physToWorkMap->assigned != other._physToWorkMap->assigned ||
_physToWorkMap->dirty != other._physToWorkMap->dirty )
return false;
return true;
}
#if defined(ASMJIT_BUILD_DEBUG)
ASMJIT_NOINLINE void verify() noexcept {
// Verify WorkToPhysMap.
{
for (uint32_t workId = 0; workId < _layout.workCount; workId++) {
uint32_t physId = _workToPhysMap->physIds[workId];
if (physId != kPhysNone) {
const RAWorkReg* workReg = _layout.workRegs->at(workId);
RegGroup group = workReg->group();
ASMJIT_ASSERT(_physToWorkIds[group][physId] == workId);
}
}
}
// Verify PhysToWorkMap.
{
for (RegGroup group : RegGroupVirtValues{}) {
uint32_t physCount = _layout.physCount[group];
for (uint32_t physId = 0; physId < physCount; physId++) {
uint32_t workId = _physToWorkIds[group][physId];
if (workId != kWorkNone) {
ASMJIT_ASSERT(_workToPhysMap->physIds[workId] == physId);
}
}
}
}
}
#else
inline void verify() noexcept {}
#endif
//! \}
};
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER
#endif // ASMJIT_CORE_RAASSIGNMENT_P_H_INCLUDED

View File

@@ -1,653 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_RABUILDERS_P_H_INCLUDED
#define ASMJIT_CORE_RABUILDERS_P_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/formatter.h"
#include "../core/rapass_p.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_ra
//! \{
template<typename This>
class RACFGBuilderT {
public:
//! \name Constants
//! \{
static inline constexpr uint32_t kRootIndentation = 2;
static inline constexpr uint32_t kCodeIndentation = 4;
// NOTE: This is a bit hacky. There are some nodes which are processed twice (see `onBeforeInvoke()` and
// `onBeforeRet()`) as they can insert some nodes around them. Since we don't have any flags to mark these
// we just use their position that is [at that time] unassigned.
static inline constexpr uint32_t kNodePositionDidOnBefore = 0xFFFFFFFFu;
//! \}
//! \name Members
//! \{
BaseRAPass* _pass = nullptr;
BaseCompiler* _cc = nullptr;
RABlock* _curBlock = nullptr;
RABlock* _retBlock = nullptr;
FuncNode* _funcNode = nullptr;
RARegsStats _blockRegStats {};
uint32_t _exitLabelId = Globals::kInvalidId;
ZoneVector<uint32_t> _sharedAssignmentsMap {};
// Only used by logging, it's fine to be here to prevent more #ifdefs...
bool _hasCode = false;
RABlock* _lastLoggedBlock = nullptr;
#ifndef ASMJIT_NO_LOGGING
Logger* _logger = nullptr;
FormatOptions _formatOptions {};
StringTmp<512> _sb;
#endif
//! \}
inline RACFGBuilderT(BaseRAPass* pass) noexcept
: _pass(pass),
_cc(pass->cc()) {
#ifndef ASMJIT_NO_LOGGING
_logger = _pass->hasDiagnosticOption(DiagnosticOptions::kRADebugCFG) ? _pass->logger() : nullptr;
if (_logger) {
_formatOptions = _logger->options();
}
#endif
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseCompiler* cc() const noexcept { return _cc; }
//! \name Run
//! \{
//! Called per function by an architecture-specific CFG builder.
[[nodiscard]]
Error run() noexcept {
log("[BuildCFG]\n");
ASMJIT_PROPAGATE(prepare());
logNode(_funcNode, kRootIndentation);
logBlock(_curBlock, kRootIndentation);
RABlock* entryBlock = _curBlock;
BaseNode* node = _funcNode->next();
if (ASMJIT_UNLIKELY(!node)) {
return DebugUtils::errored(kErrorInvalidState);
}
_curBlock->setFirst(_funcNode);
_curBlock->setLast(_funcNode);
RAInstBuilder ib;
ZoneVector<RABlock*> blocksWithUnknownJumps;
for (;;) {
BaseNode* next = node->next();
ASMJIT_ASSERT(node->position() == 0 || node->position() == kNodePositionDidOnBefore);
if (node->isInst()) {
// Instruction | Jump | Invoke | Return
// ------------------------------------
// Handle `InstNode`, `InvokeNode`, and `FuncRetNode`. All of them share the same interface that provides
// operands that have read/write semantics.
if (ASMJIT_UNLIKELY(!_curBlock)) {
// Unreachable code has to be removed, we cannot allocate registers in such code as we cannot do proper
// liveness analysis in such case.
removeNode(node);
node = next;
continue;
}
_hasCode = true;
if (node->isInvoke() || node->isFuncRet()) {
if (node->position() != kNodePositionDidOnBefore) {
// Call and Reg are complicated as they may insert some surrounding code around them. The simplest
// approach is to get the previous node, call the `onBefore()` handlers and then check whether
// anything changed and restart if so. By restart we mean that the current `node` would go back to
// the first possible inserted node by `onBeforeInvoke()` or `onBeforeRet()`.
BaseNode* prev = node->prev();
if (node->type() == NodeType::kInvoke) {
ASMJIT_PROPAGATE(static_cast<This*>(this)->onBeforeInvoke(node->as<InvokeNode>()));
}
else {
ASMJIT_PROPAGATE(static_cast<This*>(this)->onBeforeRet(node->as<FuncRetNode>()));
}
if (prev != node->prev()) {
// If this was the first node in the block and something was
// inserted before it then we have to update the first block.
if (_curBlock->first() == node) {
_curBlock->setFirst(prev->next());
}
node->setPosition(kNodePositionDidOnBefore);
node = prev->next();
// `onBeforeInvoke()` and `onBeforeRet()` can only insert instructions.
ASMJIT_ASSERT(node->isInst());
}
// Necessary if something was inserted after `node`, but nothing before.
next = node->next();
}
else {
// Change the position back to its original value.
node->setPosition(0);
}
}
InstNode* inst = node->as<InstNode>();
logNode(inst, kCodeIndentation);
InstControlFlow cf = InstControlFlow::kRegular;
ib.reset(_curBlock->blockId());
ASMJIT_PROPAGATE(static_cast<This*>(this)->onInst(inst, cf, ib));
if (node->isInvoke()) {
ASMJIT_PROPAGATE(static_cast<This*>(this)->onInvoke(inst->as<InvokeNode>(), ib));
}
if (node->isFuncRet()) {
ASMJIT_PROPAGATE(static_cast<This*>(this)->onRet(inst->as<FuncRetNode>(), ib));
cf = InstControlFlow::kReturn;
}
if (cf == InstControlFlow::kJump) {
uint32_t fixedRegCount = 0;
for (RATiedReg& tiedReg : ib) {
RAWorkReg* workReg = _pass->workRegById(tiedReg.workId());
if (workReg->group() == RegGroup::kGp) {
uint32_t useId = tiedReg.useId();
if (useId == Reg::kIdBad) {
useId = _pass->_scratchRegIndexes[fixedRegCount++];
tiedReg.setUseId(useId);
}
_curBlock->addExitScratchGpRegs(Support::bitMask(useId));
}
}
}
ASMJIT_PROPAGATE(_pass->assignRAInst(inst, _curBlock, ib));
_blockRegStats.combineWith(ib._stats);
if (cf != InstControlFlow::kRegular) {
// Support for conditional and unconditional jumps.
if (cf == InstControlFlow::kJump || cf == InstControlFlow::kBranch) {
_curBlock->setLast(node);
_curBlock->addFlags(RABlockFlags::kHasTerminator);
_curBlock->makeConstructed(_blockRegStats);
if (!inst->hasOption(InstOptions::kUnfollow)) {
// Jmp/Jcc/Call/Loop/etc...
uint32_t opCount = inst->opCount();
const Operand* opArray = inst->operands();
// Cannot jump anywhere without operands.
if (ASMJIT_UNLIKELY(!opCount)) {
return DebugUtils::errored(kErrorInvalidState);
}
if (opArray[opCount - 1].isLabel()) {
// Labels are easy for constructing the control flow.
LabelNode* labelNode;
ASMJIT_PROPAGATE(cc()->labelNodeOf(&labelNode, opArray[opCount - 1].as<Label>()));
RABlock* targetBlock = _pass->newBlockOrExistingAt(labelNode);
if (ASMJIT_UNLIKELY(!targetBlock)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
targetBlock->makeTargetable();
ASMJIT_PROPAGATE(_curBlock->appendSuccessor(targetBlock));
}
else {
// Not a label - could be jump with reg/mem operand, which means that it can go anywhere. Such jumps
// must either be annotated so the CFG can be properly constructed, otherwise we assume the worst case
// - can jump to any basic block.
JumpAnnotation* jumpAnnotation = nullptr;
_curBlock->addFlags(RABlockFlags::kHasJumpTable);
if (inst->type() == NodeType::kJump) {
jumpAnnotation = inst->as<JumpNode>()->annotation();
}
if (jumpAnnotation) {
uint64_t timestamp = _pass->nextTimestamp();
for (uint32_t id : jumpAnnotation->labelIds()) {
LabelNode* labelNode;
ASMJIT_PROPAGATE(cc()->labelNodeOf(&labelNode, id));
RABlock* targetBlock = _pass->newBlockOrExistingAt(labelNode);
if (ASMJIT_UNLIKELY(!targetBlock)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
// Prevents adding basic-block successors multiple times.
if (!targetBlock->hasTimestamp(timestamp)) {
targetBlock->setTimestamp(timestamp);
targetBlock->makeTargetable();
ASMJIT_PROPAGATE(_curBlock->appendSuccessor(targetBlock));
}
}
ASMJIT_PROPAGATE(shareAssignmentAcrossSuccessors(_curBlock));
}
else {
ASMJIT_PROPAGATE(blocksWithUnknownJumps.append(_pass->allocator(), _curBlock));
}
}
}
if (cf == InstControlFlow::kJump) {
// Unconditional jump makes the code after the jump unreachable, which will be removed instantly during
// the CFG construction; as we cannot allocate registers for instructions that are not part of any block.
// Of course we can leave these instructions as they are, however, that would only postpone the problem
// as assemblers can't encode instructions that use virtual registers.
_curBlock = nullptr;
}
else {
node = next;
if (ASMJIT_UNLIKELY(!node))
return DebugUtils::errored(kErrorInvalidState);
RABlock* consecutiveBlock;
if (node->type() == NodeType::kLabel) {
if (node->hasPassData()) {
consecutiveBlock = node->passData<RABlock>();
}
else {
consecutiveBlock = _pass->newBlock(node);
if (ASMJIT_UNLIKELY(!consecutiveBlock)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
node->setPassData<RABlock>(consecutiveBlock);
}
}
else {
consecutiveBlock = _pass->newBlock(node);
if (ASMJIT_UNLIKELY(!consecutiveBlock)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
}
_curBlock->addFlags(RABlockFlags::kHasConsecutive);
ASMJIT_PROPAGATE(_curBlock->prependSuccessor(consecutiveBlock));
_curBlock = consecutiveBlock;
_hasCode = false;
_blockRegStats.reset();
if (_curBlock->isConstructed()) {
break;
}
ASMJIT_PROPAGATE(_pass->addBlock(consecutiveBlock));
logBlock(_curBlock, kRootIndentation);
continue;
}
}
if (cf == InstControlFlow::kReturn) {
_curBlock->setLast(node);
_curBlock->makeConstructed(_blockRegStats);
ASMJIT_PROPAGATE(_curBlock->appendSuccessor(_retBlock));
_curBlock = nullptr;
}
}
}
else if (node->type() == NodeType::kLabel) {
// Label - Basic-Block Management
// ------------------------------
if (!_curBlock) {
// If the current code is unreachable the label makes it reachable again. We may remove the whole block in
// the future if it's not referenced though.
_curBlock = node->passData<RABlock>();
if (_curBlock) {
// If the label has a block assigned we can either continue with it or skip it if the block has been
// constructed already.
if (_curBlock->isConstructed()) {
break;
}
}
else {
// No block assigned - create a new one and assign it.
_curBlock = _pass->newBlock(node);
if (ASMJIT_UNLIKELY(!_curBlock)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
node->setPassData<RABlock>(_curBlock);
}
_curBlock->makeTargetable();
_hasCode = false;
_blockRegStats.reset();
ASMJIT_PROPAGATE(_pass->addBlock(_curBlock));
}
else {
if (node->hasPassData()) {
RABlock* consecutive = node->passData<RABlock>();
consecutive->makeTargetable();
if (_curBlock == consecutive) {
// The label currently processed is part of the current block. This is only possible for multiple labels
// that are right next to each other or labels that are separated by non-code nodes like directives and
// comments.
if (ASMJIT_UNLIKELY(_hasCode)) {
return DebugUtils::errored(kErrorInvalidState);
}
}
else {
// Label makes the current block constructed. There is a chance that the Label is not used, but we don't
// know that at this point. In the worst case there would be two blocks next to each other, it's just fine.
ASMJIT_ASSERT(_curBlock->last() != node);
_curBlock->setLast(node->prev());
_curBlock->addFlags(RABlockFlags::kHasConsecutive);
_curBlock->makeConstructed(_blockRegStats);
ASMJIT_PROPAGATE(_curBlock->appendSuccessor(consecutive));
ASMJIT_PROPAGATE(_pass->addBlock(consecutive));
_curBlock = consecutive;
_hasCode = false;
_blockRegStats.reset();
}
}
else {
// First time we see this label.
if (_hasCode || _curBlock == entryBlock) {
// Cannot continue the current block if it already contains some code or it's a block entry. We need to
// create a new block and make it a successor.
ASMJIT_ASSERT(_curBlock->last() != node);
_curBlock->setLast(node->prev());
_curBlock->addFlags(RABlockFlags::kHasConsecutive);
_curBlock->makeConstructed(_blockRegStats);
RABlock* consecutive = _pass->newBlock(node);
if (ASMJIT_UNLIKELY(!consecutive)) {
return DebugUtils::errored(kErrorOutOfMemory);
}
consecutive->makeTargetable();
ASMJIT_PROPAGATE(_curBlock->appendSuccessor(consecutive));
ASMJIT_PROPAGATE(_pass->addBlock(consecutive));
_curBlock = consecutive;
_hasCode = false;
_blockRegStats.reset();
}
node->setPassData<RABlock>(_curBlock);
}
}
if (_curBlock && _curBlock != _lastLoggedBlock) {
logBlock(_curBlock, kRootIndentation);
}
logNode(node, kRootIndentation);
// Unlikely: Assume that the exit label is reached only once per function.
if (ASMJIT_UNLIKELY(node->as<LabelNode>()->labelId() == _exitLabelId)) {
_curBlock->setLast(node);
_curBlock->makeConstructed(_blockRegStats);
ASMJIT_PROPAGATE(_pass->addExitBlock(_curBlock));
_curBlock = nullptr;
}
}
else {
// Other Nodes | Function Exit
// ---------------------------
logNode(node, kCodeIndentation);
if (node->type() == NodeType::kSentinel) {
if (node == _funcNode->endNode()) {
// Make sure we didn't flow here if this is the end of the function sentinel.
if (ASMJIT_UNLIKELY(_curBlock && _hasCode)) {
return DebugUtils::errored(kErrorInvalidState);
}
break;
}
}
else if (node->type() == NodeType::kFunc) {
// RAPass can only compile a single function at a time. If we
// encountered a function it must be the current one, bail if not.
if (ASMJIT_UNLIKELY(node != _funcNode)) {
return DebugUtils::errored(kErrorInvalidState);
}
// PASS if this is the first node.
}
else {
// PASS if this is a non-interesting or unknown node.
}
}
// Advance to the next node.
node = next;
// NOTE: We cannot encounter a NULL node, because every function must be terminated by a sentinel (`stop`)
// node. If we encountered a NULL node it means that something went wrong and this node list is corrupted;
// bail in such case.
if (ASMJIT_UNLIKELY(!node)) {
return DebugUtils::errored(kErrorInvalidState);
}
}
if (_pass->hasDanglingBlocks()) {
return DebugUtils::errored(kErrorInvalidState);
}
for (RABlock* block : blocksWithUnknownJumps) {
ASMJIT_PROPAGATE(handleBlockWithUnknownJump(block));
}
return _pass->initSharedAssignments(_sharedAssignmentsMap);
}
//! \}
//! \name Prepare
//! \{
//! Prepares the CFG builder of the current function.
[[nodiscard]]
Error prepare() noexcept {
FuncNode* func = _pass->func();
BaseNode* node = nullptr;
// Create entry and exit blocks.
_funcNode = func;
_retBlock = _pass->newBlockOrExistingAt(func->exitNode(), &node);
if (ASMJIT_UNLIKELY(!_retBlock))
return DebugUtils::errored(kErrorOutOfMemory);
_retBlock->makeTargetable();
ASMJIT_PROPAGATE(_pass->addExitBlock(_retBlock));
if (node != func) {
_curBlock = _pass->newBlock();
if (ASMJIT_UNLIKELY(!_curBlock))
return DebugUtils::errored(kErrorOutOfMemory);
}
else {
// Function that has no code at all.
_curBlock = _retBlock;
}
// Reset everything we may need.
_blockRegStats.reset();
_exitLabelId = func->exitNode()->labelId();
// Initially we assume there is no code in the function body.
_hasCode = false;
return _pass->addBlock(_curBlock);
}
//! \}
//! \name Utilities
//! \{
//! Called when a `node` is removed, e.g. because of a dead code elimination.
void removeNode(BaseNode* node) noexcept {
logNode(node, kRootIndentation, "<Removed>");
cc()->removeNode(node);
}
//! Handles block with unknown jump, which could be a jump to a jump table.
//!
//! If we encounter such block we basically insert all existing blocks as successors except the function entry
//! block and a natural successor, if such block exists.
[[nodiscard]]
Error handleBlockWithUnknownJump(RABlock* block) noexcept {
RABlocks& blocks = _pass->blocks();
size_t blockCount = blocks.size();
// NOTE: Iterate from `1` as the first block is the entry block, we don't
// allow the entry to be a successor of any block.
RABlock* consecutive = block->consecutive();
for (size_t i = 1; i < blockCount; i++) {
RABlock* candidate = blocks[i];
if (candidate == consecutive || !candidate->isTargetable()) {
continue;
}
ASMJIT_PROPAGATE(block->appendSuccessor(candidate));
}
return shareAssignmentAcrossSuccessors(block);
}
[[nodiscard]]
Error shareAssignmentAcrossSuccessors(RABlock* block) noexcept {
if (block->successors().size() <= 1) {
return kErrorOk;
}
RABlock* consecutive = block->consecutive();
uint32_t sharedAssignmentId = Globals::kInvalidId;
for (RABlock* successor : block->successors()) {
if (successor == consecutive) {
continue;
}
if (successor->hasSharedAssignmentId()) {
if (sharedAssignmentId == Globals::kInvalidId) {
sharedAssignmentId = successor->sharedAssignmentId();
}
else {
_sharedAssignmentsMap[successor->sharedAssignmentId()] = sharedAssignmentId;
}
}
else {
if (sharedAssignmentId == Globals::kInvalidId) {
ASMJIT_PROPAGATE(newSharedAssignmentId(&sharedAssignmentId));
}
successor->setSharedAssignmentId(sharedAssignmentId);
}
}
return kErrorOk;
}
[[nodiscard]]
Error newSharedAssignmentId(uint32_t* out) noexcept {
uint32_t id = _sharedAssignmentsMap.size();
ASMJIT_PROPAGATE(_sharedAssignmentsMap.append(_pass->allocator(), id));
*out = id;
return kErrorOk;
}
//! \}
//! \name Logging
//! \{
#ifndef ASMJIT_NO_LOGGING
template<typename... Args>
inline void log(const char* fmt, Args&&... args) noexcept {
if (_logger) {
_logger->logf(fmt, std::forward<Args>(args)...);
}
}
inline void logBlock(RABlock* block, uint32_t indentation = 0) noexcept {
if (_logger) {
_logBlock(block, indentation);
}
}
inline void logNode(BaseNode* node, uint32_t indentation = 0, const char* action = nullptr) noexcept {
if (_logger) {
_logNode(node, indentation, action);
}
}
void _logBlock(RABlock* block, uint32_t indentation) noexcept {
_sb.clear();
_sb.appendChars(' ', indentation);
_sb.appendFormat("{#%u}\n", block->blockId());
_logger->log(_sb);
_lastLoggedBlock = block;
}
void _logNode(BaseNode* node, uint32_t indentation, const char* action) noexcept {
_sb.clear();
_sb.appendChars(' ', indentation);
if (action) {
_sb.append(action);
_sb.append(' ');
}
Formatter::formatNode(_sb, _formatOptions, cc(), node);
_sb.append('\n');
_logger->log(_sb);
}
#else
template<typename... Args>
inline void log(const char* fmt, Args&&... args) noexcept {
DebugUtils::unused(fmt);
DebugUtils::unused(std::forward<Args>(args)...);
}
inline void logBlock(RABlock* block, uint32_t indentation = 0) noexcept {
DebugUtils::unused(block, indentation);
}
inline void logNode(BaseNode* node, uint32_t indentation = 0, const char* action = nullptr) noexcept {
DebugUtils::unused(node, indentation, action);
}
#endif
//! \}
};
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER
#endif // ASMJIT_CORE_RABUILDERS_P_H_INCLUDED

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,307 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_RALOCAL_P_H_INCLUDED
#define ASMJIT_CORE_RALOCAL_P_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/raassignment_p.h"
#include "../core/radefs_p.h"
#include "../core/rapass_p.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_ra
//! \{
//! Local register allocator.
class RALocalAllocator {
public:
ASMJIT_NONCOPYABLE(RALocalAllocator)
using PhysToWorkMap = RAAssignment::PhysToWorkMap;
using WorkToPhysMap = RAAssignment::WorkToPhysMap;
//! Link to `BaseRAPass`.
BaseRAPass* _pass {};
//! Link to `BaseCompiler`.
BaseCompiler* _cc {};
//! Architecture traits.
const ArchTraits* _archTraits {};
//! Registers available to the allocator.
RARegMask _availableRegs {};
//! Registers clobbered by the allocator.
RARegMask _clobberedRegs {};
//! Registers that must be preserved by the function (clobbering means saving & restoring in function prolog & epilog).
RARegMask _funcPreservedRegs {};
//! Register assignment (current).
RAAssignment _curAssignment {};
//! Register assignment used temporarily during assignment switches.
RAAssignment _tmpAssignment {};
//! Link to the current `RABlock`.
RABlock* _block {};
//! InstNode.
InstNode* _node {};
//! RA instruction.
RAInst* _raInst {};
//! Count of all TiedReg's.
uint32_t _tiedTotal {};
//! TiedReg's total counter.
RARegCount _tiedCount {};
//! Temporary workToPhysMap that can be used freely by the allocator.
WorkToPhysMap* _tmpWorkToPhysMap {};
//! \name Construction & Destruction
//! \{
inline explicit RALocalAllocator(BaseRAPass* pass) noexcept
: _pass(pass),
_cc(pass->cc()),
_archTraits(pass->_archTraits),
_availableRegs(pass->_availableRegs) {
_funcPreservedRegs.init(pass->func()->frame().preservedRegs());
}
Error init() noexcept;
//! \}
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAWorkReg* workRegById(uint32_t workId) const noexcept { return _pass->workRegById(workId); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG PhysToWorkMap* physToWorkMap() const noexcept { return _curAssignment.physToWorkMap(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG WorkToPhysMap* workToPhysMap() const noexcept { return _curAssignment.workToPhysMap(); }
//! Returns the currently processed block.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RABlock* block() const noexcept { return _block; }
//! Sets the currently processed block.
ASMJIT_INLINE_NODEBUG void setBlock(RABlock* block) noexcept { _block = block; }
//! Returns the currently processed `InstNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstNode* node() const noexcept { return _node; }
//! Returns the currently processed `RAInst`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAInst* raInst() const noexcept { return _raInst; }
//! Returns all tied regs as `RATiedReg` array.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedReg* tiedRegs() const noexcept { return _raInst->tiedRegs(); }
//! Returns tied registers grouped by the given `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedReg* tiedRegs(RegGroup group) const noexcept { return _raInst->tiedRegs(group); }
//! Returns count of all TiedRegs used by the instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t tiedCount() const noexcept { return _tiedTotal; }
//! Returns count of TiedRegs used by the given register `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t tiedCount(RegGroup group) const noexcept { return _tiedCount.get(group); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isGroupUsed(RegGroup group) const noexcept { return _tiedCount[group] != 0; }
//! \}
//! \name Assignment
//! \{
[[nodiscard]]
Error makeInitialAssignment() noexcept;
[[nodiscard]]
Error replaceAssignment(const PhysToWorkMap* physToWorkMap) noexcept;
//! Switch to the given assignment by reassigning all register and emitting code that reassigns them.
//! This is always used to switch to a previously stored assignment.
//!
//! If `tryMode` is true then the final assignment doesn't have to be exactly same as specified by `dstPhysToWorkMap`
//! and `dstWorkToPhysMap`. This mode is only used before conditional jumps that already have assignment to generate
//! a code sequence that is always executed regardless of the flow.
[[nodiscard]]
Error switchToAssignment(PhysToWorkMap* dstPhysToWorkMap, const ZoneBitVector& liveIn, bool dstReadOnly, bool tryMode) noexcept;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Error spillRegsBeforeEntry(RABlock* block) noexcept {
return spillScratchGpRegsBeforeEntry(block->entryScratchGpRegs());
}
[[nodiscard]]
Error spillScratchGpRegsBeforeEntry(uint32_t scratchRegs) noexcept;
//! \}
//! \name Allocation
//! \{
[[nodiscard]]
Error allocInst(InstNode* node) noexcept;
[[nodiscard]]
Error spillAfterAllocation(InstNode* node) noexcept;
[[nodiscard]]
Error allocBranch(InstNode* node, RABlock* target, RABlock* cont) noexcept;
[[nodiscard]]
Error allocJumpTable(InstNode* node, const RABlocks& targets, RABlock* cont) noexcept;
//! \}
//! \name Decision Making
//! \{
enum CostModel : uint32_t {
kCostOfFrequency = 1048576,
kCostOfDirtyFlag = kCostOfFrequency / 4
};
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t costByFrequency(float freq) const noexcept {
return uint32_t(int32_t(freq * float(kCostOfFrequency)));
}
[[nodiscard]]
ASMJIT_INLINE uint32_t calculateSpillCost(RegGroup group, uint32_t workId, uint32_t assignedId) const noexcept {
RAWorkReg* workReg = workRegById(workId);
uint32_t cost = costByFrequency(workReg->liveStats().freq());
if (_curAssignment.isPhysDirty(group, assignedId))
cost += kCostOfDirtyFlag;
return cost;
}
[[nodiscard]]
ASMJIT_INLINE uint32_t pickBestSuitableRegister(RegGroup group, RegMask allocableRegs) const noexcept {
// These are registers must be preserved by the function itself.
RegMask preservedRegs = _funcPreservedRegs[group];
// Reduce the set by removing preserved registers when possible.
if (allocableRegs & ~preservedRegs) {
allocableRegs &= ~preservedRegs;
}
return Support::ctz(allocableRegs);
}
//! Decides on register assignment.
[[nodiscard]]
uint32_t decideOnAssignment(RegGroup group, uint32_t workId, uint32_t assignedId, RegMask allocableRegs) const noexcept;
//! Decides on whether to MOVE or SPILL the given WorkReg, because it's allocated in a physical register that have
//! to be used by another WorkReg.
//!
//! The function must return either `RAAssignment::kPhysNone`, which means that the WorkReg of `workId` should be
//! spilled, or a valid physical register ID, which means that the register should be moved to that physical register
//! instead.
[[nodiscard]]
uint32_t decideOnReassignment(RegGroup group, uint32_t workId, uint32_t assignedId, RegMask allocableRegs, RAInst* raInst) const noexcept;
//! Decides on best spill given a register mask `spillableRegs`
[[nodiscard]]
uint32_t decideOnSpillFor(RegGroup group, uint32_t workId, RegMask spillableRegs, uint32_t* spillWorkId) const noexcept;
//! \}
//! \name Emit
//! \{
//! Emits a move between a destination and source register, and fixes the
//! register assignment.
[[nodiscard]]
inline Error onMoveReg(RegGroup group, uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept {
if (dstPhysId == srcPhysId) {
return kErrorOk;
}
_curAssignment.reassign(group, workId, dstPhysId, srcPhysId);
return _pass->emitMove(workId, dstPhysId, srcPhysId);
}
//! Emits a swap between two physical registers and fixes their assignment.
//!
//! \note Target must support this operation otherwise this would ASSERT.
[[nodiscard]]
inline Error onSwapReg(RegGroup group, uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept {
_curAssignment.swap(group, aWorkId, aPhysId, bWorkId, bPhysId);
return _pass->emitSwap(aWorkId, aPhysId, bWorkId, bPhysId);
}
//! Emits a load from [VirtReg/WorkReg]'s spill slot to a physical register
//! and makes it assigned and clean.
[[nodiscard]]
inline Error onLoadReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
_curAssignment.assign(group, workId, physId, RAAssignment::kClean);
return _pass->emitLoad(workId, physId);
}
//! Emits a save a physical register to a [VirtReg/WorkReg]'s spill slot,
//! keeps it assigned, and makes it clean.
[[nodiscard]]
inline Error onSaveReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
ASMJIT_ASSERT(_curAssignment.workToPhysId(group, workId) == physId);
ASMJIT_ASSERT(_curAssignment.physToWorkId(group, physId) == workId);
_curAssignment.makeClean(group, workId, physId);
return _pass->emitSave(workId, physId);
}
//! Assigns a register, the content of it is undefined at this point.
[[nodiscard]]
inline Error onAssignReg(RegGroup group, uint32_t workId, uint32_t physId, bool dirty) noexcept {
_curAssignment.assign(group, workId, physId, dirty);
return kErrorOk;
}
//! Spills a variable/register, saves the content to the memory-home if modified.
[[nodiscard]]
inline Error onSpillReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
if (_curAssignment.isPhysDirty(group, physId))
ASMJIT_PROPAGATE(onSaveReg(group, workId, physId));
onKillReg(group, workId, physId);
return kErrorOk;
}
[[nodiscard]]
inline Error onDirtyReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
_curAssignment.makeDirty(group, workId, physId);
return kErrorOk;
}
inline void onKillReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
_curAssignment.unassign(group, workId, physId);
}
//! \}
};
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER
#endif // ASMJIT_CORE_RALOCAL_P_H_INCLUDED

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,193 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/rastack_p.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// RAStackAllocator - Slots
// ========================
RAStackSlot* RAStackAllocator::newSlot(uint32_t baseRegId, uint32_t size, uint32_t alignment, uint32_t flags) noexcept {
if (ASMJIT_UNLIKELY(_slots.willGrow(allocator(), 1) != kErrorOk)) {
return nullptr;
}
RAStackSlot* slot = zone()->alloc<RAStackSlot>();
if (ASMJIT_UNLIKELY(!slot)) {
return nullptr;
}
slot->_baseRegId = uint8_t(baseRegId);
slot->_alignment = uint8_t(Support::max<uint32_t>(alignment, 1));
slot->_flags = uint16_t(flags);
slot->_size = size;
slot->_useCount = 0;
slot->_weight = 0;
slot->_offset = 0;
_alignment = Support::max<uint32_t>(_alignment, alignment);
_slots.appendUnsafe(slot);
return slot;
}
// RAStackAllocator - Utilities
// ============================
struct RAStackGap {
inline RAStackGap() noexcept
: offset(0),
size(0) {}
inline RAStackGap(uint32_t offset, uint32_t size) noexcept
: offset(offset),
size(size) {}
inline RAStackGap(const RAStackGap& other) noexcept
: offset(other.offset),
size(other.size) {}
uint32_t offset;
uint32_t size;
};
Error RAStackAllocator::calculateStackFrame() noexcept {
// Base weight added to all registers regardless of their size and alignment.
uint32_t kBaseRegWeight = 16;
// STEP 1:
//
// Update usage based on the size of the slot. We boost smaller slots in a way that 32-bit register has a higher
// priority than a 128-bit register, however, if one 128-bit register is used 4 times more than some other 32-bit
// register it will overweight it.
for (RAStackSlot* slot : _slots) {
uint32_t alignment = slot->alignment();
ASMJIT_ASSERT(alignment > 0);
uint32_t power = Support::min<uint32_t>(Support::ctz(alignment), 6);
uint64_t weight;
if (slot->isRegHome()) {
weight = kBaseRegWeight + (uint64_t(slot->useCount()) * (7 - power));
}
else {
weight = power;
}
// If overflown, which has less chance of winning a lottery, just use max possible weight. In such case it
// probably doesn't matter at all.
if (weight > 0xFFFFFFFFu) {
weight = 0xFFFFFFFFu;
}
slot->setWeight(uint32_t(weight));
}
// STEP 2:
//
// Sort stack slots based on their newly calculated weight (in descending order).
_slots.sort([](const RAStackSlot* a, const RAStackSlot* b) noexcept {
return a->weight() > b->weight() ? 1 :
a->weight() == b->weight() ? 0 : -1;
});
// STEP 3:
//
// Calculate offset of each slot. We start from the slot that has the highest weight and advance to slots with
// lower weight. It could look that offsets start from the first slot in our list and then simply increase, but
// it's not always the case as we also try to fill all gaps introduced by the fact that slots are sorted by
// weight and not by size & alignment, so when we need to align some slot we distribute the gap caused by the
// alignment to `gaps`.
uint32_t offset = 0;
ZoneVector<RAStackGap> gaps[kSizeCount - 1];
for (RAStackSlot* slot : _slots) {
if (slot->isStackArg()) {
continue;
}
uint32_t slotAlignment = slot->alignment();
uint32_t alignedOffset = Support::alignUp(offset, slotAlignment);
// Try to find a slot within gaps first, before advancing the `offset`.
bool foundGap = false;
uint32_t gapSize = 0;
uint32_t gapOffset = 0;
{
uint32_t slotSize = slot->size();
if (slotSize < (1u << uint32_t(ASMJIT_ARRAY_SIZE(gaps)))) {
// Iterate from the lowest to the highest possible.
uint32_t index = Support::ctz(slotSize);
do {
if (!gaps[index].empty()) {
RAStackGap gap = gaps[index].pop();
ASMJIT_ASSERT(Support::isAligned(gap.offset, slotAlignment));
slot->setOffset(int32_t(gap.offset));
gapSize = gap.size - slotSize;
gapOffset = gap.offset - slotSize;
foundGap = true;
break;
}
} while (++index < uint32_t(ASMJIT_ARRAY_SIZE(gaps)));
}
}
// No gap found, we may create a new one(s) if the current offset is not aligned.
if (!foundGap && offset != alignedOffset) {
gapSize = alignedOffset - offset;
gapOffset = alignedOffset;
offset = alignedOffset;
}
// True if we have found a gap and not filled all of it or we aligned the current offset.
if (gapSize) {
uint32_t gapEnd = gapSize + gapOffset;
while (gapOffset < gapEnd) {
uint32_t index = Support::ctz(gapOffset);
uint32_t slotSize = 1u << index;
// Weird case, better to bail...
if (gapEnd - gapOffset < slotSize) {
break;
}
ASMJIT_PROPAGATE(gaps[index].append(allocator(), RAStackGap(gapOffset, slotSize)));
gapOffset += slotSize;
}
}
if (!foundGap) {
ASMJIT_ASSERT(Support::isAligned(offset, slotAlignment));
slot->setOffset(int32_t(offset));
offset += slot->size();
}
}
_stackSize = Support::alignUp(offset, _alignment);
return kErrorOk;
}
Error RAStackAllocator::adjustSlotOffsets(int32_t offset) noexcept {
for (RAStackSlot* slot : _slots) {
if (!slot->isStackArg()) {
slot->_offset += offset;
}
}
return kErrorOk;
}
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER

Some files were not shown because too many files have changed in this diff Show More